diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 2fd6abc9a2..856a64b814 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v5 with: - go-version: ^1.18 + go-version: ^1.19 id: go - name: Check out code into the Go module directory diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 15f3a980ec..8b22906eb9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,11 +28,11 @@ jobs: name: Set up Go uses: actions/setup-go@v5 with: - go-version: 1.18 + go-version: 1.19 - name: Import GPG key id: import_gpg - uses: crazy-max/ghaction-import-gpg@v6.0.0 + uses: crazy-max/ghaction-import-gpg@v6.1.0 with: # These secrets will need to be configured for the repository: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} diff --git a/.gitignore b/.gitignore index ae7bfbaa25..f4ae5814f9 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ vendor/ !command/test-fixtures/**/*.tfstate !command/test-fixtures/**/.terraform/ +*.sh diff --git a/.secrets.baseline b/.secrets.baseline index dfa3b8db5b..0cd34d7245 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "go.mod|go.sum|.*.map|^.secrets.baseline$", "lines": null }, - "generated_at": "2023-12-11T16:26:19Z", + "generated_at": "2024-01-29T11:06:53Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -680,7 +680,7 @@ "hashed_secret": "34f3e72c9e8f331c6c12a39b4ca27d46880ab2cd", "is_secret": false, "is_verified": false, - "line_number": 85, + "line_number": 61, "type": "Secret Keyword", "verified_result": null }, @@ -688,7 +688,7 @@ "hashed_secret": "f4aa5360c26e2a4e2d45e095bd597e84c497fbcd", "is_secret": false, "is_verified": false, - "line_number": 130, + "line_number": 106, "type": "Secret Keyword", "verified_result": null }, @@ -696,7 +696,7 @@ "hashed_secret": "912accc17209bb36cb22d76d430ef9e9ec99dd4c", "is_secret": false, "is_verified": false, - "line_number": 188, + "line_number": 164, "type": "Secret Keyword", "verified_result": null }, @@ -704,7 +704,7 @@ "hashed_secret": "514edd121688f936809a62aecd24419c7eaa772b", "is_secret": false, "is_verified": false, - "line_number": 275, + "line_number": 251, "type": "Secret Keyword", "verified_result": null }, @@ -712,7 +712,7 @@ "hashed_secret": "fa33d07da58b52eee9f13b88e9cda8b98f1c19b6", "is_secret": false, "is_verified": false, - "line_number": 286, + "line_number": 262, "type": "Secret Keyword", "verified_result": null }, @@ -720,7 +720,7 @@ "hashed_secret": "5926151b9a84e25fbc262e88ef6c1d58f0c95548", "is_secret": false, "is_verified": false, - "line_number": 298, + "line_number": 274, "type": "Secret Keyword", "verified_result": null } @@ -760,7 +760,7 @@ "hashed_secret": "731438016c5ab94431f61820f35e3ae5f8ad6004", "is_secret": false, "is_verified": false, - "line_number": 417, + "line_number": 438, "type": "Secret Keyword", "verified_result": null }, @@ -768,7 +768,7 @@ "hashed_secret": "12da2e35d6b50c902c014f1ab9e3032650368df7", "is_secret": false, "is_verified": false, - "line_number": 423, + "line_number": 444, "type": "Secret Keyword", "verified_result": null }, @@ -776,7 +776,7 @@ "hashed_secret": "813274ccae5b6b509379ab56982d862f7b5969b6", "is_secret": false, "is_verified": false, - "line_number": 1128, + "line_number": 1179, "type": "Base64 High Entropy String", "verified_result": null } @@ -786,7 +786,7 @@ "hashed_secret": "9184b0c38101bf24d78b2bb0d044deb1d33696fc", "is_secret": false, "is_verified": false, - "line_number": 131, + "line_number": 132, "type": "Secret Keyword", "verified_result": null }, @@ -794,7 +794,7 @@ "hashed_secret": "c427f185ddcb2440be9b77c8e45f1cd487a2e790", "is_secret": false, "is_verified": false, - "line_number": 1438, + "line_number": 1454, "type": "Base64 High Entropy String", "verified_result": null }, @@ -802,7 +802,7 @@ "hashed_secret": "1f7e33de15e22de9d2eaf502df284ed25ca40018", "is_secret": false, "is_verified": false, - "line_number": 1505, + "line_number": 1521, "type": "Secret Keyword", "verified_result": null }, @@ -810,7 +810,7 @@ "hashed_secret": "1f614c2eb6b3da22d89bd1b9fd47d7cb7c8fc670", "is_secret": false, "is_verified": false, - "line_number": 3298, + "line_number": 3342, "type": "Secret Keyword", "verified_result": null }, @@ -818,7 +818,7 @@ "hashed_secret": "7abfce65b8504403afc25c9790f358d513dfbcc6", "is_secret": false, "is_verified": false, - "line_number": 3311, + "line_number": 3355, "type": "Secret Keyword", "verified_result": null }, @@ -826,7 +826,7 @@ "hashed_secret": "0c2d85bf9a9b1579b16f220a4ea8c3d62b2e24b1", "is_secret": false, "is_verified": false, - "line_number": 3352, + "line_number": 3396, "type": "Secret Keyword", "verified_result": null } @@ -846,7 +846,7 @@ "hashed_secret": "da8cae6284528565678de15e03d461e23fe22538", "is_secret": false, "is_verified": false, - "line_number": 1858, + "line_number": 1895, "type": "Secret Keyword", "verified_result": null }, @@ -854,7 +854,7 @@ "hashed_secret": "1a0334cfa65f4be58b9d914b8e96e9d9478bfbac", "is_secret": false, "is_verified": false, - "line_number": 3239, + "line_number": 3276, "type": "Secret Keyword", "verified_result": null } @@ -864,7 +864,7 @@ "hashed_secret": "c8b6f5ef11b9223ac35a5663975a466ebe7ebba9", "is_secret": false, "is_verified": false, - "line_number": 1806, + "line_number": 1842, "type": "Secret Keyword", "verified_result": null }, @@ -872,7 +872,7 @@ "hashed_secret": "8abf4899c01104241510ba87685ad4de76b0c437", "is_secret": false, "is_verified": false, - "line_number": 1812, + "line_number": 1848, "type": "Secret Keyword", "verified_result": null } @@ -2056,7 +2056,7 @@ "hashed_secret": "deab23f996709b4e3d14e5499d1cc2de677bfaa8", "is_secret": false, "is_verified": false, - "line_number": 1334, + "line_number": 1421, "type": "Secret Keyword", "verified_result": null }, @@ -2064,7 +2064,7 @@ "hashed_secret": "20a25bac21219ffff1904bde871ded4027eca2f8", "is_secret": false, "is_verified": false, - "line_number": 1923, + "line_number": 2017, "type": "Secret Keyword", "verified_result": null }, @@ -2072,7 +2072,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 1942, + "line_number": 2036, "type": "Secret Keyword", "verified_result": null }, @@ -2080,7 +2080,7 @@ "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", "is_secret": false, "is_verified": false, - "line_number": 2155, + "line_number": 2249, "type": "Secret Keyword", "verified_result": null } @@ -2100,7 +2100,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 205, + "line_number": 202, "type": "Secret Keyword", "verified_result": null } @@ -2110,7 +2110,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 823, + "line_number": 784, "type": "Secret Keyword", "verified_result": null } @@ -2120,7 +2120,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 778, + "line_number": 832, "type": "Secret Keyword", "verified_result": null } @@ -2130,7 +2130,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 213, + "line_number": 216, "type": "Secret Keyword", "verified_result": null } @@ -2140,7 +2140,7 @@ "hashed_secret": "8cbbbfad0206e5953901f679b0d26d583c4f5ffe", "is_secret": false, "is_verified": false, - "line_number": 271, + "line_number": 268, "type": "Secret Keyword", "verified_result": null }, @@ -2148,7 +2148,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 336, + "line_number": 333, "type": "Secret Keyword", "verified_result": null } @@ -2158,7 +2158,7 @@ "hashed_secret": "5667b8489a17faa9ef54941db31ed762be280bec", "is_secret": false, "is_verified": false, - "line_number": 157, + "line_number": 154, "type": "Secret Keyword", "verified_result": null }, @@ -2166,7 +2166,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 189, + "line_number": 186, "type": "Secret Keyword", "verified_result": null } @@ -2176,7 +2176,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 214, + "line_number": 220, "type": "Secret Keyword", "verified_result": null } @@ -2186,7 +2186,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 150, + "line_number": 156, "type": "Secret Keyword", "verified_result": null } @@ -2196,7 +2196,7 @@ "hashed_secret": "728e83f156932be9b1dc48a5c3f7a3bfbeeb08ce", "is_secret": false, "is_verified": false, - "line_number": 490, + "line_number": 496, "type": "Secret Keyword", "verified_result": null }, @@ -2204,7 +2204,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 658, + "line_number": 673, "type": "Secret Keyword", "verified_result": null } @@ -2214,7 +2214,7 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 223, + "line_number": 229, "type": "Secret Keyword", "verified_result": null } @@ -2224,7 +2224,15 @@ "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 272, + "line_number": 282, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "44cdfc3615970ada14420caaaa5c5745fca06002", + "is_secret": false, + "is_verified": false, + "line_number": 300, "type": "Secret Keyword", "verified_result": null } @@ -2234,7 +2242,7 @@ "hashed_secret": "c237978e1983e0caf1c3a84f1c2e72a7fb2981f2", "is_secret": false, "is_verified": false, - "line_number": 19, + "line_number": 20, "type": "Secret Keyword", "verified_result": null }, @@ -2242,7 +2250,7 @@ "hashed_secret": "d67007844d8f7fbc45ea3b27c4bea0bffafb53a0", "is_secret": false, "is_verified": false, - "line_number": 27, + "line_number": 28, "type": "Secret Keyword", "verified_result": null }, @@ -2250,7 +2258,7 @@ "hashed_secret": "279fb854eb9fa001b4629518a45c921cfad6d697", "is_secret": false, "is_verified": false, - "line_number": 35, + "line_number": 36, "type": "Secret Keyword", "verified_result": null }, @@ -2258,7 +2266,7 @@ "hashed_secret": "dad6fac3e5b6be7bb6f274970b4c50739a7e26ee", "is_secret": false, "is_verified": false, - "line_number": 59, + "line_number": 60, "type": "Secret Keyword", "verified_result": null }, @@ -2266,7 +2274,7 @@ "hashed_secret": "8cbbbfad0206e5953901f679b0d26d583c4f5ffe", "is_secret": false, "is_verified": false, - "line_number": 67, + "line_number": 68, "type": "Secret Keyword", "verified_result": null }, @@ -2274,7 +2282,7 @@ "hashed_secret": "f5ecb30890399c7b1d1781583478aaa9d0b0c89d", "is_secret": false, "is_verified": false, - "line_number": 91, + "line_number": 92, "type": "Secret Keyword", "verified_result": null }, @@ -2282,7 +2290,23 @@ "hashed_secret": "6da9eab371358a331c59a76d80a0ffcd589fe3c9", "is_secret": false, "is_verified": false, - "line_number": 101, + "line_number": 102, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, + "is_verified": false, + "line_number": 163, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "e03932ac8a17ed1819fe161fd253bf323e0e3ec9", + "is_secret": false, + "is_verified": false, + "line_number": 172, "type": "Secret Keyword", "verified_result": null } @@ -2816,7 +2840,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 1326, + "line_number": 1154, "type": "Secret Keyword", "verified_result": null } @@ -2876,7 +2900,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 335, + "line_number": 380, "type": "Secret Keyword", "verified_result": null }, @@ -2884,7 +2908,7 @@ "hashed_secret": "92f08f2d9a0dc3f0d4cb3796435a48508cf59ecd", "is_secret": false, "is_verified": false, - "line_number": 663, + "line_number": 707, "type": "Secret Keyword", "verified_result": null } @@ -2894,7 +2918,7 @@ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_secret": false, "is_verified": false, - "line_number": 58, + "line_number": 59, "type": "Secret Keyword", "verified_result": null } @@ -2904,7 +2928,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 120, + "line_number": 124, "type": "Secret Keyword", "verified_result": null }, @@ -2912,7 +2936,7 @@ "hashed_secret": "92f08f2d9a0dc3f0d4cb3796435a48508cf59ecd", "is_secret": false, "is_verified": false, - "line_number": 298, + "line_number": 302, "type": "Secret Keyword", "verified_result": null } @@ -2922,7 +2946,7 @@ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_secret": false, "is_verified": false, - "line_number": 55, + "line_number": 56, "type": "Secret Keyword", "verified_result": null } @@ -2932,7 +2956,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 237, + "line_number": 277, "type": "Secret Keyword", "verified_result": null }, @@ -2940,7 +2964,7 @@ "hashed_secret": "92f08f2d9a0dc3f0d4cb3796435a48508cf59ecd", "is_secret": false, "is_verified": false, - "line_number": 875, + "line_number": 1059, "type": "Secret Keyword", "verified_result": null } @@ -2968,7 +2992,7 @@ "hashed_secret": "92f08f2d9a0dc3f0d4cb3796435a48508cf59ecd", "is_secret": false, "is_verified": false, - "line_number": 499, + "line_number": 497, "type": "Secret Keyword", "verified_result": null } @@ -3038,7 +3062,7 @@ "hashed_secret": "3c2ecad9b250fd6d99893e4d05ec02ca19aa95d0", "is_secret": false, "is_verified": false, - "line_number": 389, + "line_number": 396, "type": "Secret Keyword", "verified_result": null } @@ -3081,36 +3105,6 @@ "verified_result": null } ], - "ibm/service/scc/data_source_ibm_scc_provider_type_instance_test.go": [ - { - "hashed_secret": "83747cea2b26d7652ed39218ddcdb1461c570535", - "is_secret": false, - "is_verified": false, - "line_number": 79, - "type": "Hex High Entropy String", - "verified_result": null - } - ], - "ibm/service/scc/data_source_ibm_scc_provider_type_test.go": [ - { - "hashed_secret": "83747cea2b26d7652ed39218ddcdb1461c570535", - "is_secret": false, - "is_verified": false, - "line_number": 43, - "type": "Hex High Entropy String", - "verified_result": null - } - ], - "ibm/service/scc/resource_ibm_scc_provider_type_instance_test.go": [ - { - "hashed_secret": "83747cea2b26d7652ed39218ddcdb1461c570535", - "is_secret": false, - "is_verified": false, - "line_number": 94, - "type": "Hex High Entropy String", - "verified_result": null - } - ], "ibm/service/schematics/data_source_ibm_schematics_action.go": [ { "hashed_secret": "49f3bb8f759241df51c899d3725d877bad58f66e", @@ -3144,7 +3138,7 @@ "hashed_secret": "09c0dfbba1f2b2576cfbac116e13b0258bc26bfa", "is_secret": false, "is_verified": false, - "line_number": 468, + "line_number": 470, "type": "Secret Keyword", "verified_result": null }, @@ -3152,7 +3146,7 @@ "hashed_secret": "d282ab8a33d987146dda0381b4effdf2d91c0d65", "is_secret": false, "is_verified": false, - "line_number": 474, + "line_number": 476, "type": "Secret Keyword", "verified_result": null } @@ -3200,7 +3194,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 185, + "line_number": 180, "type": "Secret Keyword", "verified_result": null }, @@ -3208,7 +3202,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 312, + "line_number": 307, "type": "Secret Keyword", "verified_result": null } @@ -3226,7 +3220,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 291, + "line_number": 286, "type": "Secret Keyword", "verified_result": null } @@ -3274,7 +3268,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 235, + "line_number": 230, "type": "Secret Keyword", "verified_result": null }, @@ -3282,7 +3276,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 414, + "line_number": 409, "type": "Secret Keyword", "verified_result": null } @@ -3486,7 +3480,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 162, + "line_number": 157, "type": "Secret Keyword", "verified_result": null }, @@ -3494,7 +3488,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 278, + "line_number": 273, "type": "Secret Keyword", "verified_result": null } @@ -3532,7 +3526,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 204, + "line_number": 197, "type": "Secret Keyword", "verified_result": null }, @@ -3540,7 +3534,7 @@ "hashed_secret": "108b310facc1a193833fc2971fd83081f775ea0c", "is_secret": false, "is_verified": false, - "line_number": 395, + "line_number": 388, "type": "Secret Keyword", "verified_result": null }, @@ -3548,7 +3542,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 398, + "line_number": 391, "type": "Secret Keyword", "verified_result": null } @@ -3568,7 +3562,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 109, + "line_number": 108, "type": "Secret Keyword", "verified_result": null }, @@ -3576,7 +3570,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 430, + "line_number": 429, "type": "Secret Keyword", "verified_result": null }, @@ -3584,7 +3578,7 @@ "hashed_secret": "9beb31de125498074813c6f31c0e4df3e54a5489", "is_secret": false, "is_verified": false, - "line_number": 646, + "line_number": 645, "type": "Secret Keyword", "verified_result": null } @@ -3612,7 +3606,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 298, + "line_number": 296, "type": "Secret Keyword", "verified_result": null }, @@ -3620,7 +3614,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 539, + "line_number": 537, "type": "Secret Keyword", "verified_result": null } @@ -3666,7 +3660,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 397, + "line_number": 396, "type": "Secret Keyword", "verified_result": null }, @@ -3674,7 +3668,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 637, + "line_number": 636, "type": "Secret Keyword", "verified_result": null } @@ -3694,7 +3688,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 39, + "line_number": 44, "type": "Secret Keyword", "verified_result": null }, @@ -3702,7 +3696,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 144, + "line_number": 152, "type": "Secret Keyword", "verified_result": null } @@ -3789,12 +3783,12 @@ "verified_result": null } ], - "ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret.go": [ + "ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret.go": [ { "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 190, + "line_number": 189, "type": "Secret Keyword", "verified_result": null }, @@ -3802,7 +3796,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 443, + "line_number": 442, "type": "Secret Keyword", "verified_result": null } @@ -3812,7 +3806,7 @@ "hashed_secret": "3046d9f6cfaaeea6eed9bb7a4ab010fe49b0cfd4", "is_secret": false, "is_verified": false, - "line_number": 124, + "line_number": 123, "type": "Secret Keyword", "verified_result": null }, @@ -3820,7 +3814,7 @@ "hashed_secret": "b732fb611fd46a38e8667f9972e0cde777fbe37f", "is_secret": false, "is_verified": false, - "line_number": 356, + "line_number": 354, "type": "Secret Keyword", "verified_result": null } @@ -4370,11 +4364,19 @@ } ], "website/docs/r/database.html.markdown": [ + { + "hashed_secret": "10c28f9cf0668595d45c1090a7b4a2ae98edfa58", + "is_secret": false, + "is_verified": false, + "line_number": 153, + "type": "Secret Keyword", + "verified_result": null + }, { "hashed_secret": "2317aa72dafa0a07f05af47baa2e388f95dcf6f3", "is_secret": false, "is_verified": false, - "line_number": 494, + "line_number": 541, "type": "Secret Keyword", "verified_result": null }, @@ -4382,7 +4384,7 @@ "hashed_secret": "ddf75a48487b387b1dc328ac0a942377b377c556", "is_secret": false, "is_verified": false, - "line_number": 559, + "line_number": 606, "type": "Secret Keyword", "verified_result": null }, @@ -4390,7 +4392,7 @@ "hashed_secret": "91199272d5d6a574a51722ca6f3d1148edb1a0e7", "is_secret": false, "is_verified": false, - "line_number": 583, + "line_number": 630, "type": "Secret Keyword", "verified_result": null } @@ -4712,7 +4714,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 128, + "line_number": 127, "type": "Secret Keyword", "verified_result": null }, @@ -4720,7 +4722,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 130, + "line_number": 129, "type": "Secret Keyword", "verified_result": null } @@ -4766,7 +4768,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 148, + "line_number": 147, "type": "Secret Keyword", "verified_result": null }, @@ -4774,7 +4776,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 150, + "line_number": 149, "type": "Secret Keyword", "verified_result": null } @@ -4820,7 +4822,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 137, + "line_number": 139, "type": "Secret Keyword", "verified_result": null }, @@ -4828,7 +4830,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 139, + "line_number": 141, "type": "Secret Keyword", "verified_result": null } @@ -4960,7 +4962,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 191, + "line_number": 192, "type": "Secret Keyword", "verified_result": null }, @@ -4968,7 +4970,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 193, + "line_number": 194, "type": "Secret Keyword", "verified_result": null } @@ -4986,7 +4988,7 @@ "hashed_secret": "d47dcacc720a39e236679ac3e311a0d58bb6519e", "is_secret": false, "is_verified": false, - "line_number": 122, + "line_number": 121, "type": "Secret Keyword", "verified_result": null }, @@ -4994,7 +4996,7 @@ "hashed_secret": "e66e7d67fdf3c596c435fc7828b13205e4950a0f", "is_secret": false, "is_verified": false, - "line_number": 124, + "line_number": 123, "type": "Secret Keyword", "verified_result": null } diff --git a/.travis.yml b/.travis.yml index 06452293b7..8b5a0e900f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ dist: bionic sudo: false language: go go: - - 1.18.x + - 1.19.x addons: apt: diff --git a/CHANGELOG.md b/CHANGELOG.md index be65d39357..d6842cdc6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,172 @@ +# 1.62.0 (Jan 30, 2024) +Features +* Support for Power Virtual Instance + - **Datasources** + - ibm_pi_volume_clone + - **Resources** + - ibm_pi_volume_clone +Enhancements +* Support security groups for Kuberentes workers ([4953](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4953)) +* Support service_subnet, pod_subnet for Satellite location ([4953](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4953)) +* Add resource instance sample config code part in the website doc ([5023](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5023)) +* fix(vpc-routing-table): support removing of advertise routes and accept routes from array ([5039](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5039)) +* support parameters for resource instance datasource ([5065](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5065)) +* remove forcenew from workerpool fields in cluster resource and added ApplyOnce ([4955](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4955)) +* SM fixes ([5045](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5045)) +* Terraform support for ICD isolated compute and multitenant cores ([4628](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4628)) +* Refactor Cloud connection refactor data source and documentation ([5053](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5053)) +* support empty lists for CBR rule contexts and zone addresses ([5058](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5058)) +* fix(IAM Policy Management): Add operator support to subject_attributes in Authorziation Policy ([5076](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5076)) +* fix(routing-table-route) - fix routing table route advertise patch and action ([5069](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5069)) +* encode test ids as constants for easy replacement ([5059](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5059)) +* Adding the fix for cos deletion access denied issue ([5083](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5083)) +* fix: fixed name update issue on is_instance boot_volume ([5084](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5084)) + +BugFixes +* CD scc doc updates ([4984](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4984)) +* Fix mtu requirement bug ([5027](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5027)) +* listing all the connection for transit gateway over the pagination set ([5033](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5033)) +* Projects issue #2672 - "Terraform sees inputs as changed ([5042](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5033)) +* added a nil check on data source of bm servers ([5062](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5062)) +* Soft remove the datasources for secretManager v1 ([5063](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5063)) +* Schematics agent related fixes for GA ([5041](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5041)) +* SM docs fix ([5080](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5080)) + +# 1.62.0-beta0 (Jan 21, 2024) +Features +* Support for Power Virtual Instance + - **Datasources** + - ibm_pi_volume_clone + - **Resources** + - ibm_pi_volume_clone +Enhancements +* Support security groups for Kuberentes workers ([4953](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4953)) +* Support service_subnet, pod_subnet for Satellite location ([4953](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4953)) +* Add resource instance sample config code part in the website doc ([5023](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5023)) +* fix(vpc-routing-table): support removing of advertise routes and accept routes from array ([5039](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5039)) +* support parameters for resource instance datasource ([5065](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5065)) +* remove forcenew from workerpool fields in cluster resource and added ApplyOnce ([4955](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4955)) +* SM fixes ([5045](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5045)) + +BugFixes +* CD scc doc updates ([4984](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4984)) +* Fix mtu requirement bug ([5027](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5027)) +* listing all the connection for transit gateway over the pagination set ([5033](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5033)) +* Projects issue #2672 - "Terraform sees inputs as changed ([5042](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5033)) +* added a nil check on data source of bm servers ([5062](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5062)) +* Soft remove the datasources for secretManager v1 ([5063](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5063)) +* Schematics agent related fixes for GA ([5041](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5041)) + + +# 1.61.0 (Jan 05, 2024) +Features +* Support for MQ on Cloud + - **Datasources** + - ibm_mqcloud_queue_manager + - ibm_mqcloud_queue_manager_status + - ibm_mqcloud_application + - ibm_mqcloud_user + - ibm_mqcloud_keystore_certificate + - ibm_mqcloud_truststore_certificate + - **Resources** + - ibm_mqcloud_queue_manager + - ibm_mqcloud_application + - ibm_mqcloud_user + - ibm_mqcloud_keystore_certificate + - ibm_mqcloud_truststore_certificate +* Support for Secret Manager + - **Datasources** + - ibm_sm_service_credentials_secret_metadata + - ibm_sm_service_credentials_secret + - **Resources** + - ibm_sm_service_credentials_secret +* Support for VPC + - **Datasources** + - ibm_is_snapshot_consistency_group + - ibm_is_snapshot_consistency_groups + - **Resources** + - ibm_is_snapshot_consistency_group + +Enhancements +* feat(Cloud Databases): Database user password complexity validation ([4931](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4931)) +* Update pi_user_data to accept string input ([4974](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4974)) +* support host_link_agent_endpoint for Satellite host ([4970](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4970)) +* Add mtu and accessConfig flags to subnet create commands for terraform ([4690](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4690)) +* feat(Cloud Databases): Redis Database User RBAC support ([4982](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4982)) +* fix(Cloud Databases): fix Unwrap return value for go 1.18 compat ([4991](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4991)) +* update issue fixed ibm_is_subnet_reserved_ip ([4988](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4988)) +* Adding Flexible IOPS ([4992](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4992)) +* Removing Support For Power VPN Create ([4993](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4993)) +* Feature(share-crr): Share cross region replication ([4995](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4995)) +* Enhancement: Added operating system attributes to is images datasources ([4998](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4998)) +* added enhancement to one step delegate resolver in is_vpc ([5000](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5000)) +* resolved delete issue for the floated nics on bm server ([5001](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5001)) +* Regenerate projects provider based off the latest go sdk ([5003](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5003)) +* Support route advertising in vpc ([5005](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5005)) +* Add a nil check for boottarget of bms ([5014](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5014)) +* Delete wait logic changes ([5017](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5017)) + +BugFixes +* Fix IBM pi documentation bug ([4969](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4969)) +* Incorrect key_algorithm handling forces delete & replace of ibm_sm_private_certificate on every apply ([4978](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4978)) +* ibm_sm_private_certificate_configuration_template arguments ttl and max_ttl are not documented ([4977](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4977)) +* ibm_sm_private_certificate unsupported argument: rotation.rotate_keys ([4976](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4976)) +* data ibm_schematics_workspace bug ([4990](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4990)) +* Secret Manager docs bug fix ([5018](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5018)) + +# 1.61.0-beta0 (Dec 20, 2023) +Features +* Support for MQ on Cloud + - **Datasources** + - ibm_mqcloud_queue_manager + - ibm_mqcloud_queue_manager_status + - ibm_mqcloud_application + - ibm_mqcloud_user + - ibm_mqcloud_keystore_certificate + - ibm_mqcloud_truststore_certificate + - **Resources** + - ibm_mqcloud_queue_manager + - ibm_mqcloud_application + - ibm_mqcloud_user + - ibm_mqcloud_keystore_certificate + - ibm_mqcloud_truststore_certificate +* Support for Secret Manager + - **Datasources** + - ibm_sm_service_credentials_secret_metadata + - ibm_sm_service_credentials_secret + - **Resources** + - ibm_sm_service_credentials_secret +* Support for VPC + - **Datasources** + - ibm_is_snapshot_consistency_group + - ibm_is_snapshot_consistency_groups + - **Resources** + - ibm_is_snapshot_consistency_group + +Enhancements +* feat(Cloud Databases): Database user password complexity validation ([4931](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4931)) +* Update pi_user_data to accept string input ([4974](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4974)) +* support host_link_agent_endpoint for Satellite host ([4970](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4970)) +* Add mtu and accessConfig flags to subnet create commands for terraform ([4690](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4690)) +* feat(Cloud Databases): Redis Database User RBAC support ([4982](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4982)) +* fix(Cloud Databases): fix Unwrap return value for go 1.18 compat ([4991](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4991)) +* update issue fixed ibm_is_subnet_reserved_ip ([4988](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4988)) +* Adding Flexible IOPS ([4992](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4992)) +* Removing Support For Power VPN Create ([4993](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4993)) +* Feature(share-crr): Share cross region replication ([4995](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4995)) +* Enhancement: Added operating system attributes to is images datasources ([4998](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4998)) +* added enhancement to one step delegate resolver in is_vpc ([5000](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5000)) +* resolved delete issue for the floated nics on bm server ([5001](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/5001)) + +BugFixes +* Fix IBM pi documentation bug ([4969](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4969)) + +# 1.60.1 (Nov 09, 2023) + +BugFixes +* Regenerate Projects TF to fix generated doc and samples + ([4961](https://github.com/IBM-Cloud/terraform-provider-ibm/pull/4961)) + # 1.60.0 (Nov 29, 2023) Features * Support for Projects diff --git a/examples/ibm-is-ng/main.tf b/examples/ibm-is-ng/main.tf index 896216cc78..c344f98a6c 100644 --- a/examples/ibm-is-ng/main.tf +++ b/examples/ibm-is-ng/main.tf @@ -1408,7 +1408,14 @@ resource "ibm_is_image_obsolete" "example" { image = ibm_is_image.image1.id } - +resource "ibm_is_share" "share" { + zone = "us-east-1" + source_share_crn = "crn:v1:staging:public:is:us-south-1:a/efe5afc483594adaa8325e2b4d1290df::share:r134-d8c8821c-a227-451d-a9ed-0c0cd2358829" + encryption_key = "crn:v1:staging:public:kms:us-south:a/efe5afc483594adaa8325e2b4d1290df:1be45161-6dae-44ca-b248-837f98004057:key:3dd21cc5-cc20-4f7c-bc62-8ec9a8a3d1bd" + replication_cron_spec = "5 * * * *" + name = "tfp-temp-crr" + profile = "dp2" +} //snapshot consistency group resource "ibm_is_snapshot_consistency_group" "is_snapshot_consistency_group_instance" { diff --git a/examples/ibm-mqcloud/README.md b/examples/ibm-mqcloud/README.md index ae8ef2a8dd..05d15097c6 100644 --- a/examples/ibm-mqcloud/README.md +++ b/examples/ibm-mqcloud/README.md @@ -1,14 +1,21 @@ -# Example for MqcloudV1 +# Examples for MQ on Cloud -This example illustrates how to use the MqcloudV1 +These examples illustrate how to use the resources and data sources associated with MQ on Cloud. -The following types of resources are supported: +The following resources are supported: +* ibm_mqcloud_queue_manager +* ibm_mqcloud_application +* ibm_mqcloud_user +* ibm_mqcloud_keystore_certificate +* ibm_mqcloud_truststore_certificate -* mqcloud_queue_manager -* mqcloud_application -* mqcloud_user -* mqcloud_keystore_certificate -* mqcloud_truststore_certificate +The following data sources are supported: +* ibm_mqcloud_queue_manager +* ibm_mqcloud_queue_manager_status +* ibm_mqcloud_application +* ibm_mqcloud_user +* ibm_mqcloud_truststore_certificate +* ibm_mqcloud_keystore_certificate ## Usage @@ -22,13 +29,12 @@ $ terraform apply Run `terraform destroy` when you don't need these resources. +## MQ on Cloud resources -## MqcloudV1 resources - -mqcloud_queue_manager resource: +### Resource: ibm_mqcloud_queue_manager ```hcl -resource "mqcloud_queue_manager" "mqcloud_queue_manager_instance" { +resource "ibm_mqcloud_queue_manager" "mqcloud_queue_manager_instance" { service_instance_guid = var.mqcloud_queue_manager_service_instance_guid name = var.mqcloud_queue_manager_name display_name = var.mqcloud_queue_manager_display_name @@ -37,95 +43,301 @@ resource "mqcloud_queue_manager" "mqcloud_queue_manager_instance" { version = var.mqcloud_queue_manager_version } ``` -mqcloud_application resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | A queue manager name conforming to MQ restrictions. | `string` | true | +| display_name | A displayable name for the queue manager - limited only in length. | `string` | false | +| location | The locations in which the queue manager could be deployed. | `string` | true | +| size | The queue manager sizes of deployment available. Deployment of lite queue managers for aws_us_east_1 and aws_eu_west_1 locations is not available. | `string` | true | +| version | The MQ version of the queue manager. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| status_uri | A reference uri to get deployment status of the queue manager. | +| web_console_url | The url through which to access the web console for this queue manager. | +| rest_api_endpoint_url | The url through which to access REST APIs for this queue manager. | +| administrator_api_endpoint_url | The url through which to access the Admin REST APIs for this queue manager. | +| connection_info_uri | The uri through which the CDDT for this queue manager can be obtained. | +| date_created | RFC3339 formatted UTC date for when the queue manager was created. | +| upgrade_available | Describes whether an upgrade is available for this queue manager. | +| available_upgrade_versions_uri | The uri through which the available versions to upgrade to can be found for this queue manager. | +| href | The URL for this queue manager. | +| queue_manager_id | The ID of the queue manager which was allocated on creation, and can be used for delete calls. | + +### Resource: ibm_mqcloud_application ```hcl -resource "mqcloud_application" "mqcloud_application_instance" { +resource "ibm_mqcloud_application" "mqcloud_application_instance" { service_instance_guid = var.mqcloud_application_service_instance_guid name = var.mqcloud_application_name } ``` -mqcloud_user resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | The name of the application - conforming to MQ rules. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| create_api_key_uri | The URI to create a new apikey for the application. | +| href | The URL for this application. | +| application_id | The ID of the application which was allocated on creation, and can be used for delete calls. | + +### Resource: ibm_mqcloud_user ```hcl -resource "mqcloud_user" "mqcloud_user_instance" { +resource "ibm_mqcloud_user" "mqcloud_user_instance" { service_instance_guid = var.mqcloud_user_service_instance_guid name = var.mqcloud_user_name email = var.mqcloud_user_email } ``` -mqcloud_keystore_certificate resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance. | `string` | true | +| email | The email of the user. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| href | The URL for the user details. | +| user_id | The ID of the user which was allocated on creation, and can be used for delete calls. | + +### Resource: ibm_mqcloud_keystore_certificate ```hcl -resource "mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { +resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { service_instance_guid = var.mqcloud_keystore_certificate_service_instance_guid queue_manager_id = var.mqcloud_keystore_certificate_queue_manager_id label = var.mqcloud_keystore_certificate_label + certificate_file = var.mqcloud_keystore_certificate_certificate_file } ``` -mqcloud_truststore_certificate resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | +| label | The label to use for the certificate to be uploaded. | `string` | true | +| certificate_file | The filename and path of the certificate to be uploaded. | `base64-encoded string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| certificate_type | The type of certificate. | +| fingerprint_sha256 | Fingerprint SHA256. | +| subject_dn | Subject's Distinguished Name. | +| subject_cn | Subject's Common Name. | +| issuer_dn | Issuer's Distinguished Name. | +| issuer_cn | Issuer's Common Name. | +| issued | Date certificate was issued. | +| expiry | Expiry date for the certificate. | +| is_default | Indicates whether it is the queue manager's default certificate. | +| dns_names_total_count | The total count of dns names. | +| dns_names | The list of DNS names. | +| href | The URL for this key store certificate. | +| certificate_id | ID of the certificate. | + +### Resource: ibm_mqcloud_truststore_certificate ```hcl -resource "mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { +resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { service_instance_guid = var.mqcloud_truststore_certificate_service_instance_guid queue_manager_id = var.mqcloud_truststore_certificate_queue_manager_id label = var.mqcloud_truststore_certificate_label + certificate_file = var.mqcloud_truststore_certificate_certificate_file } ``` -## MqcloudV1 data sources +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | +| label | The label to use for the certificate to be uploaded. | `string` | true | +| certificate_file | The filename and path of the certificate to be uploaded. | `base64-encoded string` | true | + +#### Outputs -mqcloud_queue_manager data source: +| Name | Description | +|------|-------------| +| certificate_type | The type of certificate. | +| fingerprint_sha256 | Fingerprint SHA256. | +| subject_dn | Subject's Distinguished Name. | +| subject_cn | Subject's Common Name. | +| issuer_dn | Issuer's Distinguished Name. | +| issuer_cn | Issuer's Common Name. | +| issued | The Date the certificate was issued. | +| expiry | Expiry date for the certificate. | +| trusted | Indicates whether a certificate is trusted. | +| href | The URL for this trust store certificate. | +| certificate_id | Id of the certificate. | + +## MQ on Cloud data sources + +### Data source: ibm_mqcloud_queue_manager ```hcl -data "mqcloud_queue_manager" "mqcloud_queue_manager_instance" { - service_instance_guid = var.mqcloud_queue_manager_service_instance_guid - name = var.mqcloud_queue_manager_name +data "ibm_mqcloud_queue_manager" "mqcloud_queue_manager_instance" { + service_instance_guid = var.data_mqcloud_queue_manager_service_instance_guid + name = var.data_mqcloud_queue_manager_name } ``` -mqcloud_queue_manager_status data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | A queue manager name conforming to MQ restrictions. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| queue_managers | List of queue managers. | + +### Data source: ibm_mqcloud_queue_manager_status ```hcl -data "mqcloud_queue_manager_status" "mqcloud_queue_manager_status_instance" { +data "ibm_mqcloud_queue_manager_status" "mqcloud_queue_manager_status_instance" { service_instance_guid = var.mqcloud_queue_manager_status_service_instance_guid queue_manager_id = var.mqcloud_queue_manager_status_queue_manager_id } ``` -mqcloud_application data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| status | The deploying and failed states are not queue manager states, they are states which can occur when the request to deploy has been fired, or with that request has failed without producing a queue manager to have any state. The other states map to the queue manager states. State "ending" is either quiesing or ending immediately. State "ended" is either ended normally or endedimmediately. The others map one to one with queue manager states. | + +### Data source: ibm_mqcloud_application ```hcl -data "mqcloud_application" "mqcloud_application_instance" { - service_instance_guid = var.mqcloud_application_service_instance_guid - name = var.mqcloud_application_name +data "ibm_mqcloud_application" "mqcloud_application_instance" { + service_instance_guid = var.data_mqcloud_application_service_instance_guid + name = var.data_mqcloud_application_name } ``` -mqcloud_user data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | The name of the application - conforming to MQ rules. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| applications | List of applications. | + +### Data source: ibm_mqcloud_user ```hcl -data "mqcloud_user" "mqcloud_user_instance" { - service_instance_guid = var.mqcloud_user_service_instance_guid - name = var.mqcloud_user_name +data "ibm_mqcloud_user" "mqcloud_user_instance" { + service_instance_guid = var.data_mqcloud_user_service_instance_guid + name = var.data_mqcloud_user_name } ``` -mqcloud_truststore_certificate data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| name | The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| users | List of users. | + +### Data source: ibm_mqcloud_truststore_certificate ```hcl -data "mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { - service_instance_guid = var.mqcloud_truststore_certificate_service_instance_guid - queue_manager_id = var.mqcloud_truststore_certificate_queue_manager_id - label = var.mqcloud_truststore_certificate_label +data "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { + service_instance_guid = var.data_mqcloud_truststore_certificate_service_instance_guid + queue_manager_id = var.data_mqcloud_truststore_certificate_queue_manager_id + label = var.data_mqcloud_truststore_certificate_label } ``` -mqcloud_keystore_certificate data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | +| label | Certificate label in queue manager store. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| total_count | The total count of trust store certificates. | +| trust_store | The list of trust store certificates. | + +### Data source: ibm_mqcloud_keystore_certificate ```hcl -data "mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { - service_instance_guid = var.mqcloud_keystore_certificate_service_instance_guid - queue_manager_id = var.mqcloud_keystore_certificate_queue_manager_id - label = var.mqcloud_keystore_certificate_label +data "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { + service_instance_guid = var.data_mqcloud_keystore_certificate_service_instance_guid + queue_manager_id = var.data_mqcloud_keystore_certificate_queue_manager_id + label = var.data_mqcloud_keystore_certificate_label } ``` +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | +| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | +| label | Certificate label in queue manager store. | `string` | false | + +#### Outputs + +| Name | Description | +|------|-------------| +| total_count | The total count of key store certificates. | +| key_store | The list of key store certificates. | + ## Assumptions 1. TODO @@ -145,31 +357,3 @@ data "mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { | Name | Version | |------|---------| | ibm | 1.13.1 | - -## Inputs - -| Name | Description | Type | Required | -|------|-------------|------|---------| -| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | -| service_instance_guid | The GUID that uniquely identifies the MQ on Cloud service instance. | `string` | true | -| name | A queue manager name conforming to MQ restrictions. | `string` | true | -| display_name | A displayable name for the queue manager - limited only in length. | `string` | false | -| location | The locations in which the queue manager could be deployed. | `string` | true | -| size | The queue manager sizes of deployment available. Deployment of lite queue managers for aws_us_east_1 and aws_eu_west_1 locations is not available. | `string` | true | -| version | The MQ version of the queue manager. | `string` | false | -| name | The name of the application - conforming to MQ rules. | `string` | true | -| name | The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance. | `string` | true | -| email | The email of the user. | `string` | true | -| queue_manager_id | The id of the queue manager to retrieve its full details. | `string` | true | -| label | Certificate label in queue manager store. | `string` | true | - -## Outputs - -| Name | Description | -|------|-------------| -| mqcloud_queue_manager | mqcloud_queue_manager object | -| mqcloud_queue_manager_status | mqcloud_queue_manager_status object | -| mqcloud_application | mqcloud_application object | -| mqcloud_user | mqcloud_user object | -| mqcloud_truststore_certificate | mqcloud_truststore_certificate object | -| mqcloud_keystore_certificate | mqcloud_keystore_certificate object | diff --git a/examples/ibm-mqcloud/main.tf b/examples/ibm-mqcloud/main.tf index b89e08c72a..16dc2df7dc 100644 --- a/examples/ibm-mqcloud/main.tf +++ b/examples/ibm-mqcloud/main.tf @@ -30,6 +30,7 @@ resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instan service_instance_guid = var.mqcloud_keystore_certificate_service_instance_guid queue_manager_id = var.mqcloud_keystore_certificate_queue_manager_id label = var.mqcloud_keystore_certificate_label + certificate_file = var.mqcloud_keystore_certificate_certificate_file } // Provision mqcloud_truststore_certificate resource instance @@ -37,6 +38,7 @@ resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_in service_instance_guid = var.mqcloud_truststore_certificate_service_instance_guid queue_manager_id = var.mqcloud_truststore_certificate_queue_manager_id label = var.mqcloud_truststore_certificate_label + certificate_file = var.mqcloud_truststore_certificate_certificate_file } // Data source is not linked to a resource instance @@ -44,9 +46,8 @@ resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_in /* // Create mqcloud_queue_manager data source data "ibm_mqcloud_queue_manager" "mqcloud_queue_manager_instance" { - service_instance_guid = var.mqcloud_queue_manager_service_instance_guid - - name = var.mqcloud_queue_manager_name + service_instance_guid = var.data_mqcloud_queue_manager_service_instance_guid + name = var.data_mqcloud_queue_manager_name } */ @@ -65,8 +66,8 @@ data "ibm_mqcloud_queue_manager_status" "mqcloud_queue_manager_status_instance" /* // Create mqcloud_application data source data "ibm_mqcloud_application" "mqcloud_application_instance" { - service_instance_guid = var.mqcloud_application_service_instance_guid - name = var.mqcloud_application_name + service_instance_guid = var.data_mqcloud_application_service_instance_guid + name = var.data_mqcloud_application_name } */ @@ -75,8 +76,8 @@ data "ibm_mqcloud_application" "mqcloud_application_instance" { /* // Create mqcloud_user data source data "ibm_mqcloud_user" "mqcloud_user_instance" { - service_instance_guid = var.mqcloud_user_service_instance_guid - name = var.mqcloud_user_name + service_instance_guid = var.data_mqcloud_user_service_instance_guid + name = var.data_mqcloud_user_name } */ @@ -85,9 +86,9 @@ data "ibm_mqcloud_user" "mqcloud_user_instance" { /* // Create mqcloud_truststore_certificate data source data "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { - service_instance_guid = var.mqcloud_truststore_certificate_service_instance_guid - queue_manager_id = var.mqcloud_truststore_certificate_queue_manager_id - label = var.mqcloud_truststore_certificate_label + service_instance_guid = var.data_mqcloud_truststore_certificate_service_instance_guid + queue_manager_id = var.data_mqcloud_truststore_certificate_queue_manager_id + label = var.data_mqcloud_truststore_certificate_label } */ @@ -96,8 +97,8 @@ data "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instan /* // Create mqcloud_keystore_certificate data source data "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { - service_instance_guid = var.mqcloud_keystore_certificate_service_instance_guid - queue_manager_id = var.mqcloud_keystore_certificate_queue_manager_id - label = var.mqcloud_keystore_certificate_label + service_instance_guid = var.data_mqcloud_keystore_certificate_service_instance_guid + queue_manager_id = var.data_mqcloud_keystore_certificate_queue_manager_id + label = var.data_mqcloud_keystore_certificate_label } */ diff --git a/examples/ibm-mqcloud/variables.tf b/examples/ibm-mqcloud/variables.tf index dba4d0fff0..66a9dda4ae 100644 --- a/examples/ibm-mqcloud/variables.tf +++ b/examples/ibm-mqcloud/variables.tf @@ -22,7 +22,7 @@ variable "mqcloud_queue_manager_display_name" { variable "mqcloud_queue_manager_location" { description = "The locations in which the queue manager could be deployed." type = string - default = "reserved-eu-fr-cluster-f884" + default = "reserved-eu-de-cluster-f884" } variable "mqcloud_queue_manager_size" { description = "The queue manager sizes of deployment available. Deployment of lite queue managers for aws_us_east_1 and aws_eu_west_1 locations is not available." @@ -53,24 +53,6 @@ variable "mqcloud_user_service_instance_guid" { type = string default = "Service Instance ID" } - -// Data source arguments for mqcloud_application -variable "mqcloud_application_service_instance_guid" { - description = "The GUID that uniquely identifies the MQ on Cloud service instance." - type = string - default = "Service Instance ID" -} -variable "mqcloud_user_name" { - description = "The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance." - type = string - default = "name" -} - -variable "mqcloud_application_name" { - description = "The name of the application - conforming to MQ rules." - type = string - default = "name" -} variable "mqcloud_user_name" { description = "The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance." type = string @@ -94,10 +76,15 @@ variable "mqcloud_keystore_certificate_queue_manager_id" { default = "Queue Manager ID" } variable "mqcloud_keystore_certificate_label" { - description = "Certificate label in queue manager store." + description = "The label to use for the certificate to be uploaded." type = string default = "label" } +variable "mqcloud_keystore_certificate_certificate_file" { + description = "The filename and path of the certificate to be uploaded." + type = string + default = "SGVsbG8gd29ybGQ=" +} // Resource arguments for mqcloud_truststore_certificate variable "mqcloud_truststore_certificate_service_instance_guid" { @@ -110,21 +97,24 @@ variable "mqcloud_truststore_certificate_queue_manager_id" { type = string default = "Queue Manager ID" } - variable "mqcloud_truststore_certificate_label" { - description = "Certificate label in queue manager store." + description = "The label to use for the certificate to be uploaded." type = string default = "label" } +variable "mqcloud_truststore_certificate_certificate_file" { + description = "The filename and path of the certificate to be uploaded." + type = string + default = "SGVsbG8gd29ybGQ=" +} // Data source arguments for mqcloud_queue_manager -variable "mqcloud_queue_manager_service_instance_guid" { +variable "data_mqcloud_queue_manager_service_instance_guid" { description = "The GUID that uniquely identifies the MQ on Cloud service instance." type = string default = "Service Instance ID" } - -variable "mqcloud_queue_manager_name" { +variable "data_mqcloud_queue_manager_name" { description = "A queue manager name conforming to MQ restrictions." type = string default = "name" @@ -143,60 +133,58 @@ variable "mqcloud_queue_manager_status_queue_manager_id" { } // Data source arguments for mqcloud_application -variable "mqcloud_application_service_instance_guid" { +variable "data_mqcloud_application_service_instance_guid" { description = "The GUID that uniquely identifies the MQ on Cloud service instance." type = string default = "Service Instance ID" } - -variable "mqcloud_application_name" { +variable "data_mqcloud_application_name" { description = "The name of the application - conforming to MQ rules." type = string default = "name" } // Data source arguments for mqcloud_user -variable "mqcloud_user_service_instance_guid" { +variable "data_mqcloud_user_service_instance_guid" { description = "The GUID that uniquely identifies the MQ on Cloud service instance." type = string default = "Service Instance ID" } - -variable "mqcloud_user_name" { +variable "data_mqcloud_user_name" { description = "The shortname of the user that will be used as the IBM MQ administrator in interactions with a queue manager for this service instance." type = string default = "name" } // Data source arguments for mqcloud_truststore_certificate -variable "mqcloud_truststore_certificate_service_instance_guid" { +variable "data_mqcloud_truststore_certificate_service_instance_guid" { description = "The GUID that uniquely identifies the MQ on Cloud service instance." type = string default = "Service Instance ID" } -variable "mqcloud_truststore_certificate_queue_manager_id" { +variable "data_mqcloud_truststore_certificate_queue_manager_id" { description = "The id of the queue manager to retrieve its full details." type = string default = "Queue Manager ID" } -variable "mqcloud_truststore_certificate_label" { +variable "data_mqcloud_truststore_certificate_label" { description = "Certificate label in queue manager store." type = string default = "label" } // Data source arguments for mqcloud_keystore_certificate -variable "mqcloud_keystore_certificate_service_instance_guid" { +variable "data_mqcloud_keystore_certificate_service_instance_guid" { description = "The GUID that uniquely identifies the MQ on Cloud service instance." type = string default = "Service Instance ID" } -variable "mqcloud_keystore_certificate_queue_manager_id" { +variable "data_mqcloud_keystore_certificate_queue_manager_id" { description = "The id of the queue manager to retrieve its full details." type = string default = "Queue Manager ID" } -variable "mqcloud_keystore_certificate_label" { +variable "data_mqcloud_keystore_certificate_label" { description = "Certificate label in queue manager store." type = string default = "label" diff --git a/examples/ibm-power/main.tf b/examples/ibm-power/main.tf index bf6c35f70e..bc34f2c8f6 100644 --- a/examples/ibm-power/main.tf +++ b/examples/ibm-power/main.tf @@ -1,49 +1,64 @@ -data "ibm_pi_image" "data_source_image" { - pi_cloud_instance_id = var.cloud_instance_id +# Create a workspace +resource "ibm_resource_instance" "location" { + name = var.workspace_name + resource_group_id = var.resource_group_id + location = var.datacenter + service = "power-iaas" + plan = "power-virtual-server-group" +} + +# Create an image +resource "ibm_pi_image" "image" { + pi_cloud_instance_id = ibm_resource_instance.location.guid pi_image_name = var.image_name + pi_image_id = var.image_id } -resource "ibm_pi_key" "key" { - pi_cloud_instance_id = var.cloud_instance_id - pi_key_name = var.ssh_key_name - pi_ssh_key = var.ssh_key_rsa +data "ibm_pi_image" "data_source_image" { + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_image_name = resource.ibm_pi_image.image.pi_image_name } -data "ibm_pi_key" "data_source_key" { - depends_on = [ibm_pi_key.key] - pi_cloud_instance_id = var.cloud_instance_id - pi_key_name = var.ssh_key_name -} -resource "ibm_pi_network" "network" { - pi_cloud_instance_id = var.cloud_instance_id +# Create a network +resource "ibm_pi_network" "private_network" { + pi_cloud_instance_id = ibm_resource_instance.location.guid pi_network_name = var.network_name pi_network_type = var.network_type - count = var.network_count + pi_cidr = var.network_cidr + pi_dns = [var.network_dns] + pi_network_mtu = 2000 } -data "ibm_pi_public_network" "data_source_network" { - depends_on = [ibm_pi_network.network] - - pi_cloud_instance_id = var.cloud_instance_id +data "ibm_pi_network" "data_source_private_network" { + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_network_name = resource.ibm_pi_network.private_network.pi_network_name } + +# Create a volume resource "ibm_pi_volume" "volume" { - pi_cloud_instance_id = var.cloud_instance_id + pi_cloud_instance_id = ibm_resource_instance.location.guid pi_volume_name = var.volume_name pi_volume_type = var.volume_type pi_volume_size = var.volume_size pi_volume_shareable = var.volume_shareable } data "ibm_pi_volume" "data_source_volume" { - depends_on = [ibm_pi_volume.volume] + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_volume_name = resource.ibm_pi_volume.volume.pi_volume_name +} - pi_cloud_instance_id = var.cloud_instance_id - pi_volume_name = var.volume_name +# Create an ssh key +resource "ibm_pi_key" "key" { + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_key_name = var.ssh_key_name + pi_ssh_key = var.ssh_key_rsa +} +data "ibm_pi_key" "data_source_key" { + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_key_name = resource.ibm_pi_key.key.pi_key_name } -resource "ibm_pi_instance" "instance" { - depends_on = [data.ibm_pi_image.data_source_image, - data.ibm_pi_key.data_source_key, - data.ibm_pi_volume.data_source_volume, - data.ibm_pi_public_network.data_source_network] - pi_cloud_instance_id = var.cloud_instance_id +# Create an instance +resource "ibm_pi_instance" "instance" { + pi_cloud_instance_id = ibm_resource_instance.location.guid pi_instance_name = var.instance_name pi_memory = var.memory pi_processors = var.processors @@ -52,13 +67,12 @@ resource "ibm_pi_instance" "instance" { pi_sys_type = var.sys_type pi_image_id = data.ibm_pi_image.data_source_image.id pi_key_pair_name = data.ibm_pi_key.data_source_key.id - pi_network { network_id = data.ibm_pi_public_network.data_source_network.id } + pi_network { + network_id = data.ibm_pi_network.data_source_private_network.id + } pi_volume_ids = [data.ibm_pi_volume.data_source_volume.id] } - data "ibm_pi_instance" "data_source_instance" { - depends_on = [ibm_pi_instance.instance] - - pi_cloud_instance_id = var.cloud_instance_id - pi_instance_name = var.instance_name -} \ No newline at end of file + pi_cloud_instance_id = ibm_resource_instance.location.guid + pi_instance_name = resource.ibm_pi_instance.instance.pi_instance_name +} diff --git a/examples/ibm-power/variables.tf b/examples/ibm-power/variables.tf index b3d9adc292..a087dea11d 100644 --- a/examples/ibm-power/variables.tf +++ b/examples/ibm-power/variables.tf @@ -1,65 +1,95 @@ -// Service / Account +## Service // Account variable "ibm_cloud_api_key" { description = "API Key" type = string default = "" } variable "region" { - description = "Reigon of Service" + description = "Region of Service" type = string default = "" } variable "zone" { description = "Zone of Service" type = string - default = "" + default = "" } -variable "cloud_instance_id" { - description = "Cloud Instance ID of Service" - type = string - default = "" + +## Workspace +variable "workspace_name" { + description = "Workspace Name" + type = string + default = "" +} +# See available datacenter regions at: https://cloud.ibm.com/apidocs/power-cloud#endpoint +variable "datacenter" { + description = "Datacenter Region" + type = string + default = "" +} +variable "resource_group_id" { + description = "Resource Group ID" + type = string + default = "" } -// Image +## Image variable "image_name" { - description = "Name of the image to be used" + description = "Name of the image in the image catalog" type = string default = "" } +variable "image_id" { + description = "ID of the image in the image catalog" + type = string + default = "" +} -// Instance -variable "instance_name" { - description = "Name of the instance" +## Private Network +variable "network_name" { + description = "Name of the network" type = string default = "" } -variable "memory" { - description = "Instance memory" - type = number - default = 1 +variable "network_type" { + description = "Type of a network" + type = string + default = "vlan" } -variable "processors" { - description = "Instance processors" - type = number - default = 1 +variable "network_cidr" { + description = "Network in CIDR notation" + type = string + default = "" } -variable "proc_type" { - description = "Instance ProcType" +variable "network_dns" { + description = "Comma seaparated list of DNS Servers to use for this network" type = string - default = "" + default = "" } -variable "storage_type" { - description = "The storage type to be used" + +## Volume +variable "volume_name" { + description = "Name of the volume" type = string - default = "" + default = "" } -variable "sys_type" { - description = "Instance System Type" +variable "volume_size" { + description = "Size of a volume" + type = number + default = 1 +} +variable "volume_shareable" { + description = "Is a volume shareable" + type = bool + default = true +} +variable "volume_type" { + description = "Type of a volume" type = string - default = "" + default = "" } -// SSH Key +## SSH Key variable "ssh_key_name" { description = "Name of the ssh key to be used" type = string @@ -71,41 +101,34 @@ variable "ssh_key_rsa" { default = "" } -// Network -variable "network_name" { - description = "Name of the network" +## Instance +variable "instance_name" { + description = "Name of the instance" type = string default = "" } -variable "network_type" { - description = "Type of a network" - type = string - default = "" +variable "memory" { + description = "Instance memory" + type = number + default = 1 } -variable "network_count" { - description = "Number of networks to provision" +variable "processors" { + description = "Instance processors" type = number default = 1 } - -// Volume -variable "volume_name" { - description = "Name of the volume" +variable "proc_type" { + description = "Instance ProcType" type = string - default = "" -} -variable "volume_size" { - description = "Size of a volume" - type = number - default = 0.25 + default = "" } -variable "volume_shareable" { - description = "Is a volume shareable" - type = bool - default = true +variable "storage_type" { + description = "The storage type to be used" + type = string + default = "" } -variable "volume_type" { - description = "Type of a volume" +variable "sys_type" { + description = "Instance System Type" type = string - default = "" -} \ No newline at end of file + default = "" +} diff --git a/examples/ibm-power/versions.tf b/examples/ibm-power/versions.tf index 0b29488aaf..fa0e2e114c 100644 --- a/examples/ibm-power/versions.tf +++ b/examples/ibm-power/versions.tf @@ -1,7 +1,12 @@ +terraform { + required_version = ">= 0.13" +} + terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" + version = "" } } -} \ No newline at end of file +} diff --git a/examples/ibm-project/README.md b/examples/ibm-project/README.md index 479c92419c..51bef0de0e 100644 --- a/examples/ibm-project/README.md +++ b/examples/ibm-project/README.md @@ -1,12 +1,16 @@ -# Example for ProjectV1 +# Examples for Projects API -This example illustrates how to use the ProjectV1 +These examples illustrate how to use the resources and data sources associated with Projects API. -The following types of resources are supported: +The following resources are supported: +* ibm_project_config +* ibm_project +* ibm_project_environment -* project_config -* project -* project_environment +The following data sources are supported: +* ibm_project_config +* ibm_project +* ibm_project_environment ## Usage @@ -20,61 +24,200 @@ $ terraform apply Run `terraform destroy` when you don't need these resources. +## Projects API resources -## ProjectV1 resources - -project_config resource: +### Resource: ibm_project_config ```hcl -resource "project_config" "project_config_instance" { - project_id = ibm_project.project_instance.project_id +resource "ibm_project_config" "project_config_instance" { + project_id = ibm_project.project_instance.id definition = var.project_config_definition } ``` -project resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| project_id | The unique project ID. | `string` | true | +| schematics | A schematics workspace associated to a project configuration, with scripts. | `` | false | +| definition | | `` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| version | The version of the configuration. | +| is_draft | The flag that indicates whether the version of the configuration is draft, or active. | +| needs_attention_state | The needs attention state of a configuration. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| modified_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| last_saved_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| outputs | The outputs of a Schematics template property. | +| project | The project referenced by this resource. | +| state | The state of the configuration. | +| update_available | The flag that indicates whether a configuration update is available. | +| href | A URL. | +| project_config_id | The ID of the configuration. If this parameter is empty, an ID is automatically created for the configuration. | + +### Resource: ibm_project ```hcl -resource "project" "project_instance" { +resource "ibm_project" "project_instance" { location = var.project_location resource_group = var.project_resource_group definition = var.project_definition } ``` -project_environment resource: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| location | The IBM Cloud location where a resource is deployed. | `string` | true | +| resource_group | The resource group name where the project's data and tools are created. | `string` | true | +| definition | The definition of the project. | `` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| crn | An IBM Cloud resource name, which uniquely identifies a resource. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| cumulative_needs_attention_view | The cumulative list of needs attention items for a project. If the view is successfully retrieved, an array which could be empty is returned. | +| cumulative_needs_attention_view_error | True indicates that the fetch of the needs attention items failed. It only exists if there was an error while retrieving the cumulative needs attention view. | +| resource_group_id | The resource group id where the project's data and tools are created. | +| state | The project status value. | +| href | A URL. | +| event_notifications_crn | The CRN of the event notifications instance if one is connected to this project. | +| configs | The project configurations. These configurations are only included in the response of creating a project if a configs array is specified in the request payload. | +| environments | The project environments. These environments are only included in the response if project environments were created on the project. | + +### Resource: ibm_project_environment ```hcl -resource "project_environment" "project_environment_instance" { - project_id = ibm_project.project_instance.project_id +resource "ibm_project_environment" "project_environment_instance" { + project_id = ibm_project.project_instance.id definition = var.project_environment_definition } ``` -## ProjectV1 data sources +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | +| project_id | The unique project ID. | `string` | true | +| definition | The environment definition. | `` | true | -project_config data source: +#### Outputs + +| Name | Description | +|------|-------------| +| project | The project referenced by this resource. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| target_account | The target account ID derived from the authentication block values. | +| modified_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| href | A URL. | +| project_environment_id | The environment id as a friendly name. | + +## Projects API data sources + +### Data source: ibm_project_config ```hcl -data "project_config" "project_config_instance" { +data "ibm_project_config" "project_config_instance" { project_id = ibm_project.project_instance.id project_config_id = ibm_project_config.project_config_instance.project_config_id } ``` -project data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| project_id | The unique project ID. | `string` | true | +| project_config_id | The unique config ID. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| version | The version of the configuration. | +| is_draft | The flag that indicates whether the version of the configuration is draft, or active. | +| needs_attention_state | The needs attention state of a configuration. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| modified_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| last_saved_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| outputs | The outputs of a Schematics template property. | +| project | The project referenced by this resource. | +| schematics | A schematics workspace associated to a project configuration, with scripts. | +| state | The state of the configuration. | +| update_available | The flag that indicates whether a configuration update is available. | +| href | A URL. | +| definition | | + +### Data source: ibm_project ```hcl -data "project" "project_instance" { +data "ibm_project" "project_instance" { project_id = ibm_project.project_instance.id } ``` -project_environment data source: + +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| project_id | The unique project ID. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| crn | An IBM Cloud resource name, which uniquely identifies a resource. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| cumulative_needs_attention_view | The cumulative list of needs attention items for a project. If the view is successfully retrieved, an array which could be empty is returned. | +| cumulative_needs_attention_view_error | True indicates that the fetch of the needs attention items failed. It only exists if there was an error while retrieving the cumulative needs attention view. | +| location | The IBM Cloud location where a resource is deployed. | +| resource_group_id | The resource group id where the project's data and tools are created. | +| state | The project status value. | +| href | A URL. | +| resource_group | The resource group name where the project's data and tools are created. | +| event_notifications_crn | The CRN of the event notifications instance if one is connected to this project. | +| configs | The project configurations. These configurations are only included in the response of creating a project if a configs array is specified in the request payload. | +| environments | The project environments. These environments are only included in the response if project environments were created on the project. | +| definition | The definition of the project. | + +### Data source: ibm_project_environment ```hcl -data "project_environment" "project_environment_instance" { +data "ibm_project_environment" "project_environment_instance" { project_id = ibm_project.project_instance.id project_environment_id = ibm_project_environment.project_environment_instance.project_environment_id } ``` +#### Inputs + +| Name | Description | Type | Required | +|------|-------------|------|---------| +| project_id | The unique project ID. | `string` | true | +| project_environment_id | The environment ID. | `string` | true | + +#### Outputs + +| Name | Description | +|------|-------------| +| project | The project referenced by this resource. | +| created_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| target_account | The target account ID derived from the authentication block values. | +| modified_at | A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. | +| href | A URL. | +| definition | The environment definition. | + ## Assumptions 1. TODO @@ -94,27 +237,3 @@ data "project_environment" "project_environment_instance" { | Name | Version | |------|---------| | ibm | 1.13.1 | - -## Inputs - -| Name | Description | Type | Required | -|------|-------------|------|---------| -| ibmcloud\_api\_key | IBM Cloud API key | `string` | true | -| project_id | The unique project ID. | `string` | true | -| schematics | A schematics workspace associated to a project configuration, with scripts. | `` | false | -| definition | The name and description of a project configuration. | `` | true | -| location | The IBM Cloud location where a resource is deployed. | `string` | true | -| resource_group | The resource group name where the project's data and tools are created. | `string` | true | -| definition | The definition of the project. | `` | true | -| definition | The environment definition. | `` | true | -| project_id | The unique project ID. | `string` | true | -| project_config_id | The unique config ID. | `string` | true | -| project_environment_id | The environment ID. | `string` | true | - -## Outputs - -| Name | Description | -|------|-------------| -| project_config | project_config object | -| project | project object | -| project_environment | project_environment object | diff --git a/examples/ibm-project/main.tf b/examples/ibm-project/main.tf index f07c03de9e..b31c75f863 100644 --- a/examples/ibm-project/main.tf +++ b/examples/ibm-project/main.tf @@ -4,7 +4,7 @@ provider "ibm" { // Provision project_config resource instance resource "ibm_project_config" "project_config_instance" { - project_id = ibm_project.project_instance.project_id + project_id = ibm_project.project_instance.id definition { name = "static-website-dev" description = "Website - development" @@ -14,7 +14,7 @@ resource "ibm_project_config" "project_config_instance" { } locator_id = "1082e7d2-5e2f-0a11-a3bc-f88a8e1931fc.145be7c1-9ec4-4719-b586-584ee52fbed0-global" inputs = { - name = "app_repo_name" + app_repo_name = "static-website-repo" } } } @@ -32,7 +32,7 @@ resource "ibm_project" "project_instance" { // Provision project_environment resource instance resource "ibm_project_environment" "project_environment_instance" { - project_id = ibm_project.project_instance.project_id + project_id = ibm_project.project_instance.id definition { name = "environment-stage" description = "environment for stage project" @@ -45,7 +45,7 @@ resource "ibm_project_environment" "project_environment_instance" { // Create project_config data source data "ibm_project_config" "project_config_instance" { - project_id = ibm_project_config.project_config_instance.project_id + project_id = ibm_project.project_instance.id project_config_id = ibm_project_config.project_config_instance.project_config_id } diff --git a/examples/ibm-project/variables.tf b/examples/ibm-project/variables.tf index 82c28185f4..6ddd1b127b 100644 --- a/examples/ibm-project/variables.tf +++ b/examples/ibm-project/variables.tf @@ -30,31 +30,31 @@ variable "project_environment_project_id" { } // Data source arguments for project_config -variable "project_config_project_id" { +variable "data_project_config_project_id" { description = "The unique project ID." type = string default = "project_id" } -variable "project_config_id" { +variable "data_project_config_project_config_id" { description = "The unique config ID." type = string default = "project_config_id" } // Data source arguments for project -variable "project_id" { +variable "data_project_project_id" { description = "The unique project ID." type = string default = "project_id" } // Data source arguments for project_environment -variable "project_environment_project_id" { +variable "data_project_environment_project_id" { description = "The unique project ID." type = string default = "project_id" } -variable "project_environment_id" { +variable "data_project_environment_project_environment_id" { description = "The environment ID." type = string default = "project_environment_id" diff --git a/examples/ibm-secrets-manager/README.md b/examples/ibm-secrets-manager/README.md index f68677dec5..b970c84196 100644 --- a/examples/ibm-secrets-manager/README.md +++ b/examples/ibm-secrets-manager/README.md @@ -32,30 +32,6 @@ $ terraform apply Run `terraform destroy` when you don't need these resources. - -## SecretsManagerV1 resources - - -## SecretsManagerV1 Data sources - -secrets_manager_secrets data source: - -```hcl -data "ibm_secrets_manager_secrets" "secrets_manager_secrets_instance" { - instance_id = var.secrets_manager_instance_id - secret_type = var.secrets_manager_secrets_secret_type -} -``` -secrets_manager_secret data source: - -```hcl -data "ibm_secrets_manager_secret" "secrets_manager_secret_instance" { - instance_id = var.secrets_manager_instance_id - secret_type = var.secrets_manager_secret_secret_type - secret_id = var.secrets_manager_secret_id -} -``` - ## SecretsManagerV2 resources sm_secret_group resource: diff --git a/examples/ibm-secrets-manager/outputs.tf b/examples/ibm-secrets-manager/outputs.tf index 9684c8c1eb..f068f78b08 100644 --- a/examples/ibm-secrets-manager/outputs.tf +++ b/examples/ibm-secrets-manager/outputs.tf @@ -1,11 +1,3 @@ -output "secrets_manager_secrets" { - value = data.ibm_secrets_manager_secrets.secrets_manager_secrets_instance -} - -output "secrets_manager_secret" { - value = data.ibm_secrets_manager_secret.secrets_manager_secret_instance -} - // This allows sm_secret_group data to be referenced by other resources and the terraform CLI // Modify this if only certain data should be exposed output "ibm_sm_secret_group" { diff --git a/go.mod b/go.mod index 638e7af953..dcba6684a4 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,15 @@ module github.com/IBM-Cloud/terraform-provider-ibm -go 1.18 +go 1.19 require ( - github.com/IBM-Cloud/bluemix-go v0.0.0-20231204080125-462fa9e436bc - github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231116055201-2a84da7b9bd6 - github.com/IBM-Cloud/power-go-client v1.5.4 + github.com/IBM-Cloud/bluemix-go v0.0.0-20240110132033-6ead1f81a985 + github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231207111718-a3b74cc935fa + github.com/IBM-Cloud/power-go-client v1.5.8 github.com/IBM/apigateway-go-sdk v0.0.0-20210714141226-a5d5d49caaca github.com/IBM/appconfiguration-go-admin-sdk v0.3.0 github.com/IBM/appid-management-go-sdk v0.0.0-20210908164609-dd0e0eaf732f - github.com/IBM/cloud-databases-go-sdk v0.3.2 + github.com/IBM/cloud-databases-go-sdk v0.5.0 github.com/IBM/cloudant-go-sdk v0.0.43 github.com/IBM/code-engine-go-sdk v0.0.0-20231106200405-99e81b3ee752 github.com/IBM/container-registry-go-sdk v1.1.0 @@ -23,25 +23,25 @@ require ( github.com/IBM/ibm-hpcs-tke-sdk v0.0.0-20211109141421-a4b61b05f7d1 github.com/IBM/ibm-hpcs-uko-sdk v0.0.20-beta github.com/IBM/keyprotect-go-client v0.12.2 - github.com/IBM/networking-go-sdk v0.42.2 - github.com/IBM/platform-services-go-sdk v0.54.0 - github.com/IBM/project-go-sdk v0.1.4 + github.com/IBM/networking-go-sdk v0.44.0 + github.com/IBM/platform-services-go-sdk v0.56.3 + github.com/IBM/project-go-sdk v0.2.0 github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5 - github.com/IBM/scc-go-sdk/v5 v5.1.3 - github.com/IBM/schematics-go-sdk v0.2.2 + github.com/IBM/scc-go-sdk/v5 v5.1.4 + github.com/IBM/schematics-go-sdk v0.2.3 github.com/IBM/secrets-manager-go-sdk/v2 v2.0.2 github.com/IBM/vpc-beta-go-sdk v0.6.0 - github.com/IBM/vpc-go-sdk v0.45.0 + github.com/IBM/vpc-go-sdk v0.47.0 github.com/ScaleFT/sshkeys v0.0.0-20200327173127-6142f742bca5 github.com/akamai/AkamaiOPEN-edgegrid-golang v1.2.2 github.com/akamai/AkamaiOPEN-edgegrid-golang/v5 v5.0.0 github.com/apache/openwhisk-client-go v0.0.0-20200201143223-a804fb82d105 github.com/apparentlymart/go-cidr v1.1.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.10 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.4.0 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 @@ -53,7 +53,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/rook/rook v1.11.4 github.com/softlayer/softlayer-go v1.0.3 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible k8s.io/api v0.26.3 @@ -62,7 +62,7 @@ require ( ) require ( - github.com/IBM/mqcloud-go-sdk v0.0.0-20231207105140-14d858932788 + github.com/IBM/mqcloud-go-sdk v0.0.4 github.com/IBM/sarama v1.41.2 k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 sigs.k8s.io/controller-runtime v0.14.1 @@ -83,7 +83,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect @@ -98,19 +98,19 @@ require ( github.com/fatih/color v1.15.0 // indirect github.com/frankban/quicktest v1.14.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-jose/go-jose/v3 v3.0.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/analysis v0.21.5 // indirect + github.com/go-openapi/errors v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.3 // indirect + github.com/go-openapi/loads v0.21.3 // indirect github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.8 // indirect - github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/spec v0.20.12 // indirect + github.com/go-openapi/swag v0.22.5 // indirect + github.com/go-openapi/validate v0.22.4 // indirect github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect @@ -202,7 +202,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect @@ -210,7 +210,7 @@ require ( golang.org/x/net v0.19.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect @@ -235,10 +235,11 @@ replace github.com/softlayer/softlayer-go v1.0.3 => github.com/IBM-Cloud/softlay replace github.com/dgrijalva/jwt-go v3.2.0+incompatible => github.com/golang-jwt/jwt v3.2.1+incompatible +// add sdk changes. replace github.com/portworx/sched-ops v0.0.0-20200831185134-3e8010dc7056 => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 // required by rook v1.7 exclude ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc2 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/client-go v12.0.0+incompatible -) \ No newline at end of file +) diff --git a/go.sum b/go.sum index dd2b86dde3..6e087e9ce1 100644 --- a/go.sum +++ b/go.sum @@ -101,15 +101,13 @@ github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/IBM-Cloud/bluemix-go v0.0.0-20231204080125-462fa9e436bc h1:AeooCa6UMWycgKJ9n0do9PEZaNlYZZHqspfwUzPvopc= -github.com/IBM-Cloud/bluemix-go v0.0.0-20231204080125-462fa9e436bc/go.mod h1:jIGLnIfj+uBv2ALz3rVHzNbNwt0V/bEWNeJKECa8Q+k= -github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231106114255-c50117860a3c h1:tRS4VuOG3lHNG+yrsh3vZZQDVNLuFJB0oZbTJp9YXds= -github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231106114255-c50117860a3c/go.mod h1:xUQL9SGAjoZFd4GNjrjjtEpjpkgU7RFXRyHesbKTjiY= -github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231116055201-2a84da7b9bd6 h1:QXU1Atl/JSI3ZtYB9tHbWLhrFYE1E+5Iww1sjQ7mqdo= -github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231116055201-2a84da7b9bd6/go.mod h1:xUQL9SGAjoZFd4GNjrjjtEpjpkgU7RFXRyHesbKTjiY= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240110132033-6ead1f81a985 h1:Rsi0y9dJZNkF9zIa0Yjf9rdYHb5UqMMGbZvOcsESq90= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240110132033-6ead1f81a985/go.mod h1:jIGLnIfj+uBv2ALz3rVHzNbNwt0V/bEWNeJKECa8Q+k= +github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231207111718-a3b74cc935fa h1:tsgTFGt4j1V3PQmzZbA4wJAeT5rz24OgY4AvY2QGek0= +github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20231207111718-a3b74cc935fa/go.mod h1:xUQL9SGAjoZFd4GNjrjjtEpjpkgU7RFXRyHesbKTjiY= github.com/IBM-Cloud/ibm-cloud-cli-sdk v0.5.3/go.mod h1:RiUvKuHKTBmBApDMUQzBL14pQUGKcx/IioKQPIcRQjs= -github.com/IBM-Cloud/power-go-client v1.5.4 h1:fk+QgOdZvwq696UynehfGrMGMHXDYOJfRCE3Pec9o6c= -github.com/IBM-Cloud/power-go-client v1.5.4/go.mod h1:ZsKqKC4d4MAWujkttW1w9tG7xjlIbkIpVENX476ghVY= +github.com/IBM-Cloud/power-go-client v1.5.8 h1:4l9PmnYRXV/KfVNBRuc9hya6kW5cQZhN4UMUMdpn1JU= +github.com/IBM-Cloud/power-go-client v1.5.8/go.mod h1:y4WDw/l9+29CKX98ngCCvGoHdzX49LL00B1euoAbWzQ= github.com/IBM-Cloud/softlayer-go v1.0.5-tf h1:koUAyF9b6X78lLLruGYPSOmrfY2YcGYKOj/Ug9nbKNw= github.com/IBM-Cloud/softlayer-go v1.0.5-tf/go.mod h1:6HepcfAXROz0Rf63krk5hPZyHT6qyx2MNvYyHof7ik4= github.com/IBM/apigateway-go-sdk v0.0.0-20210714141226-a5d5d49caaca h1:crniVcf+YcmgF03NmmfonXwSQ73oJF+IohFYBwknMxs= @@ -118,8 +116,8 @@ github.com/IBM/appconfiguration-go-admin-sdk v0.3.0 h1:OqFxnDxro0JiRwHBKytCcseY2 github.com/IBM/appconfiguration-go-admin-sdk v0.3.0/go.mod h1:xPxAYhr/uywUIDEo/JqWbkUdTryPdzRdYBfUpA5IjoE= github.com/IBM/appid-management-go-sdk v0.0.0-20210908164609-dd0e0eaf732f h1:4c1kqY4GqmkQ+tO03rneDb74Tv7BhTj8jDiDB1p8mdM= github.com/IBM/appid-management-go-sdk v0.0.0-20210908164609-dd0e0eaf732f/go.mod h1:d22kTYY7RYBWcQlZpqrSdshpB/lJ16viWS5Sbjtlc8s= -github.com/IBM/cloud-databases-go-sdk v0.3.2 h1:AUi7/xswqCwuXIlSyuXtDZJIm4d0ZicUBHhPrE9TnH0= -github.com/IBM/cloud-databases-go-sdk v0.3.2/go.mod h1:nCIVfeZnhBYIiwByT959dFP4VWUeNLxomDYy63tTC6M= +github.com/IBM/cloud-databases-go-sdk v0.5.0 h1:Bie6MnT1jLchQmtKVA20HHETTPdlOR+i11P2kJ55viM= +github.com/IBM/cloud-databases-go-sdk v0.5.0/go.mod h1:nCIVfeZnhBYIiwByT959dFP4VWUeNLxomDYy63tTC6M= github.com/IBM/cloudant-go-sdk v0.0.43 h1:YxTy4RpAEezX32YIWnds76hrBREmO4u6IkBz1WylNuQ= github.com/IBM/cloudant-go-sdk v0.0.43/go.mod h1:WeYrJPaHTw19943ndWnVfwMIlZ5z0XUM2uEXNBrwZ1M= github.com/IBM/code-engine-go-sdk v0.0.0-20231106200405-99e81b3ee752 h1:S5NT0aKKUqd9hnIrPN/qUijKx9cZjJi3kfFpog0ByDA= @@ -156,30 +154,30 @@ github.com/IBM/ibm-hpcs-uko-sdk v0.0.20-beta/go.mod h1:MLVNHMYoKsvovJZ4v1gQCpIYt github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI= github.com/IBM/keyprotect-go-client v0.12.2 h1:Cjxcqin9Pl0xz3MnxdiVd4v/eIa79xL3hQpSbwOr/DQ= github.com/IBM/keyprotect-go-client v0.12.2/go.mod h1:yr8h2noNgU8vcbs+vhqoXp3Lmv73PI0zAc6VMgFvWwM= -github.com/IBM/mqcloud-go-sdk v0.0.0-20231207105140-14d858932788 h1:cIT0YSzqMGqxM3OJQx1gp4gtYYy9U35O0tVdcFHOgwc= -github.com/IBM/mqcloud-go-sdk v0.0.0-20231207105140-14d858932788/go.mod h1:R4NBbDMygpHiFywUnOdV0UfBZap4HcHa3QXLlACr9TU= -github.com/IBM/networking-go-sdk v0.42.2 h1:caqjx4jyFHi10Vlf3skHvlL6K3YJRVstsmCBmvdyqkA= -github.com/IBM/networking-go-sdk v0.42.2/go.mod h1:lTUZwtUkMANMnrLHFIgRhHrkBfwASY/Iho1fabaPHxo= -github.com/IBM/platform-services-go-sdk v0.54.0 h1:WjHWm9ZAJvlq07E1WXXtEe+d/B0sazWD6cEWVT7EMLU= -github.com/IBM/platform-services-go-sdk v0.54.0/go.mod h1:CWSprvsCsXWvujmBzbtoJSmbRZS9FVV3O594b0t/GiM= -github.com/IBM/project-go-sdk v0.1.4 h1:QGehJxpp/QqfrBYSmN2FRYwuGejlHlVscB/9QGQfdLk= -github.com/IBM/project-go-sdk v0.1.4/go.mod h1:lqe0M4cKvABI1iHR1b+KfasVcxQL6nl2VJ8eOyQs8Ig= +github.com/IBM/mqcloud-go-sdk v0.0.4 h1:gqMpoU5a0qJ0GETG4PQrkgeEEoaQLvbxRJnEe6ytvC4= +github.com/IBM/mqcloud-go-sdk v0.0.4/go.mod h1:gQptHC6D+rxfg0muRFFGvTDmvl4YfiDE0uXkaRRewRk= +github.com/IBM/networking-go-sdk v0.44.0 h1:6acyMd6hwxcjK3bJ2suiUBTjzg8mRFAvYD76zbx0adk= +github.com/IBM/networking-go-sdk v0.44.0/go.mod h1:XtqYRInR5NHmFUXhOL6RovpDdv6PnJfZ1lPFvssA8MA= +github.com/IBM/platform-services-go-sdk v0.55.0 h1:W598xZanL61bwd8O2DQexr4qjIr+/tP0Y845zoms5yA= +github.com/IBM/platform-services-go-sdk v0.55.0/go.mod h1:CWSprvsCsXWvujmBzbtoJSmbRZS9FVV3O594b0t/GiM= +github.com/IBM/platform-services-go-sdk v0.56.3 h1:DQ1VMQSknhPsdT7d+AybKiZT82esczAkHCIBkwYubzQ= +github.com/IBM/platform-services-go-sdk v0.56.3/go.mod h1:+U6Kg7o5u/Bh4ZkLxjymSgfdpVsaWAtsMtzhwclUry0= +github.com/IBM/project-go-sdk v0.2.0 h1:DMv0HQfS3GQHkkagZ4E2vt1H1paN5Gh357K9izeaGj8= +github.com/IBM/project-go-sdk v0.2.0/go.mod h1:lqe0M4cKvABI1iHR1b+KfasVcxQL6nl2VJ8eOyQs8Ig= github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5 h1:NPUhkoOCRuv3OFWt19PmwjXGGTKlvmbuPg9fUrBUNe4= github.com/IBM/push-notifications-go-sdk v0.0.0-20210310100607-5790b96c47f5/go.mod h1:b07XHUVh0XYnQE9s2mqgjYST1h9buaQNqN4EcKhOsX0= github.com/IBM/sarama v1.41.2 h1:ZDBZfGPHAD4uuAtSv4U22fRZBgst0eEwGFzLj0fb85c= github.com/IBM/sarama v1.41.2/go.mod h1:xdpu7sd6OE1uxNdjYTSKUfY8FaKkJES9/+EyjSgiGQk= -github.com/IBM/scc-go-sdk/v5 v5.1.3 h1:8zqJx/HgChTlMaC21HzthIR4HbFkuJ3dR/D68254jRg= -github.com/IBM/scc-go-sdk/v5 v5.1.3/go.mod h1:YtAVlzq10bwR82QX4ZavhDIwa1s85RuVO9N/KmXVcuk= -github.com/IBM/schematics-go-sdk v0.2.2 h1:8S3hoVLzF/ZRgWDaLqwHnLmZvlEBHCKgHszmMh7yD2E= -github.com/IBM/schematics-go-sdk v0.2.2/go.mod h1:Tw2OSAPdpC69AxcwoyqcYYaGTTW6YpERF9uNEU+BFRQ= +github.com/IBM/scc-go-sdk/v5 v5.1.4 h1:+HoeUJCyGAJpQv2hBskKdMC1I6K617zbHF5lpbK5VYI= +github.com/IBM/scc-go-sdk/v5 v5.1.4/go.mod h1:YtAVlzq10bwR82QX4ZavhDIwa1s85RuVO9N/KmXVcuk= +github.com/IBM/schematics-go-sdk v0.2.3 h1:lgTt0Sbudii3cuSk1YSQgrtiZAXDbBABAoVj3eQuBrU= +github.com/IBM/schematics-go-sdk v0.2.3/go.mod h1:Tw2OSAPdpC69AxcwoyqcYYaGTTW6YpERF9uNEU+BFRQ= github.com/IBM/secrets-manager-go-sdk/v2 v2.0.2 h1:+Svh1OmoFxMBnZQSOUtp2UUzrOGFsSQlE5TFL/ptJco= github.com/IBM/secrets-manager-go-sdk/v2 v2.0.2/go.mod h1:WII+LS4VkQYykmq65NWSuPb5xGNvsqkcK1aCWZoU2x4= github.com/IBM/vpc-beta-go-sdk v0.6.0 h1:wfM3AcW3zOM3xsRtZ+EA6+sESlGUjQ6Yf4n5QQyz4uc= github.com/IBM/vpc-beta-go-sdk v0.6.0/go.mod h1:fzHDAQIqH/5yJmYsKodKHLcqxMDT+yfH6vZjdiw8CQA= -github.com/IBM/vpc-go-sdk v0.43.0 h1:uy/qWIqETCXraUG2cq5sjScr6pZ79ZteY1v5iLUVQ3Q= -github.com/IBM/vpc-go-sdk v0.43.0/go.mod h1:kRz9tqPvpHoA/qGrC/qVjTbi4ICuTChpG76L89liGL4= -github.com/IBM/vpc-go-sdk v0.45.0 h1:RFbUZH5vBRGAEW5+jRzbDlxB+a+GvG9EBhyYO52Tvrs= -github.com/IBM/vpc-go-sdk v0.45.0/go.mod h1:4Hs5d/aClmsxAzwDQkwG+ri0vW2ykPJdpM6hDLRwKcA= +github.com/IBM/vpc-go-sdk v0.47.0 h1:2Qcjd4zQQRYjz+y4ZMDP6+aWGifyXCZ9uMmlpW7p9To= +github.com/IBM/vpc-go-sdk v0.47.0/go.mod h1:4Hs5d/aClmsxAzwDQkwG+ri0vW2ykPJdpM6hDLRwKcA= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Logicalis/asn1 v0.0.0-20190312173541-d60463189a56 h1:vuquMR410psHNax14XKNWa0Ae/kYgWJcXi0IFuX60N0= @@ -318,8 +316,9 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:tuijfIjZyjZaHq9xDUh0tNitwXshJpbLkqMOJv4H3do= github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:po7NpZ/QiTKzBKyrsEAxwnTamCoh8uDk/egRpQ7siIc= @@ -446,8 +445,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= @@ -483,8 +482,9 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= @@ -497,42 +497,39 @@ github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpR github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI= +github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE= +github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ= +github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew= +github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= @@ -543,10 +540,8 @@ github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= -github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI= +github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -555,26 +550,24 @@ github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk github.com/go-openapi/strfmt v0.19.10/go.mod h1:qBBipho+3EoIqn6YDI+4RnQEtj6jT/IdKm+PAlXxSUc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk= +github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys= +github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8= +github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= @@ -737,8 +730,9 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -1117,7 +1111,6 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= @@ -1275,7 +1268,6 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.18.0/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= @@ -1418,7 +1410,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rook/rook v1.11.4 h1:V5+r8JnVpSCdWGZ8eV5zUX1SnMTgCnz3azux+7Jefzc= github.com/rook/rook v1.11.4/go.mod h1:RwQdIZvb7BGomy9yR9caWYCoT8pHngYsxBXg6Fl8LZk= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1570,12 +1562,11 @@ go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1645,8 +1636,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1747,7 +1738,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1885,7 +1875,6 @@ golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1916,8 +1905,8 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/ibm/acctest/acctest.go b/ibm/acctest/acctest.go index 4968af4c8a..3066238605 100644 --- a/ibm/acctest/acctest.go +++ b/ibm/acctest/acctest.go @@ -104,6 +104,8 @@ var ( DedicatedHostGroupFamily string DedicatedHostGroupClass string ShareProfileName string + SourceShareCRN string + ShareEncryptionKey string VNIId string VolumeProfileName string VSIUnattachedBootVolumeID string @@ -124,10 +126,14 @@ var ( // MQ on Cloud var ( - MqcloudInstanceID string - MqcloudQueueManagerID string - MqcloudKSCertFilePath string - MqcloudTSCertFilePath string + MqcloudConfigEndpoint string + MqcloudInstanceID string + MqcloudQueueManagerID string + MqcloudKSCertFilePath string + MqcloudTSCertFilePath string + MqCloudQueueManagerLocation string + MqCloudQueueManagerVersion string + MqCloudQueueManagerVersionUpdate string ) // Secrets Manager @@ -208,6 +214,8 @@ var ( PiStoragePool string PiStorageType string Pi_shared_processor_pool_id string + Pi_target_storage_tier string + Pi_volume_clone_task_id string Pi_resource_group_id string ) @@ -260,9 +268,13 @@ var Snapshot_month string // Secuity and Complinace Center var ( SccApiEndpoint string + SccEventNotificationsCRN string + SccInstanceID string + SccObjectStorageCRN string + SccObjectStorageBucket string SccProviderTypeAttributes string + SccProviderTypeID string SccReportID string - SccInstanceID string ) // ROKS Cluster @@ -520,7 +532,7 @@ func init() { KubeVersion = os.Getenv("IBM_KUBE_VERSION") if KubeVersion == "" { - KubeVersion = "1.18" + KubeVersion = "1.25.9" fmt.Println("[WARN] Set the environment variable IBM_KUBE_VERSION for testing ibm_container_cluster resource else it is set to default value '1.18.14'") } @@ -848,6 +860,18 @@ func init() { fmt.Println("[INFO] Set the environment variable IS_SHARE_PROFILE for testing ibm_is_instance resource else it is set to default value 'tier-3iops'") } + SourceShareCRN = os.Getenv("IS_SOURCE_SHARE_CRN") + if SourceShareCRN == "" { + SourceShareCRN = "crn:v1:staging:public:is:us-east-1:a/efe5afc483594adaa8325e2b4d1290df::share:r142-a106f162-86e4-4d7f-be75-193cc55a93e9" // for next gen infrastructure + fmt.Println("[INFO] Set the environment variable IS_SHARE_PROFILE for testing ibm_is_instance resource else it is set to default value") + } + + ShareEncryptionKey = os.Getenv("IS_SHARE_ENCRYPTION_KEY") + if ShareEncryptionKey == "" { + ShareEncryptionKey = "crn:v1:staging:public:kms:us-south:a/efe5afc483594adaa8325e2b4d1290df:1be45161-6dae-44ca-b248-837f98004057:key:3dd21cc5-cc20-4f7c-bc62-8ec9a8a3d1bd" // for next gen infrastructure + fmt.Println("[INFO] Set the environment variable IS_SHARE_PROFILE for testing ibm_is_instance resource else it is set to default value") + } + VolumeProfileName = os.Getenv("IS_VOLUME_PROFILE") if VolumeProfileName == "" { VolumeProfileName = "general-purpose" @@ -1074,6 +1098,18 @@ func init() { fmt.Println("[WARN] Set the environment variable PI_SHARED_PROCESSOR_POOL_ID for testing ibm_pi_shared_processor_pool resource else it is set to default value 'tf-pi-shared-processor-pool'") } + Pi_target_storage_tier = os.Getenv("PI_TARGET_STORAGE_TIER") + if Pi_target_storage_tier == "" { + Pi_target_storage_tier = "terraform-test-tier" + fmt.Println("[INFO] Set the environment variable PI_TARGET_STORAGE_TIER for testing Pi_target_storage_tier resource else it is set to default value 'terraform-test-tier'") + } + + Pi_volume_clone_task_id = os.Getenv("PI_VOLUME_CLONE_TASK_ID") + if Pi_volume_clone_task_id == "" { + Pi_volume_clone_task_id = "terraform-test-volume-clone-task-id" + fmt.Println("[INFO] Set the environment variable PI_VOLUME_CLONE_TASK_ID for testing Pi_volume_clone_task_id resource else it is set to default value 'terraform-test-volume-clone-task-id'") + } + Pi_resource_group_id = os.Getenv("PI_RESOURCE_GROUP_ID") if Pi_resource_group_id == "" { Pi_resource_group_id = "" @@ -1383,6 +1419,26 @@ func init() { fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_PROVIDER_TYPE_ATTRIBUTES with a VALID SCC PROVIDER TYPE ATTRIBUTE") } + SccProviderTypeID = os.Getenv("IBMCLOUD_SCC_PROVIDER_TYPE_ID") + if SccProviderTypeID == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_PROVIDER_TYPE_ID with a VALID SCC PROVIDER TYPE ID") + } + + SccEventNotificationsCRN = os.Getenv("IBMCLOUD_SCC_EVENT_NOTIFICATION_CRN") + if SccEventNotificationsCRN == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_EVENT_NOTIFICATION_CRN") + } + + SccObjectStorageCRN = os.Getenv("IBMCLOUD_SCC_OBJECT_STORAGE_CRN") + if SccObjectStorageCRN == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_OBJECT_STORAGE_CRN with a valid cloud object storage crn") + } + + SccObjectStorageBucket = os.Getenv("IBMCLOUD_SCC_OBJECT_STORAGE_BUCKET") + if SccObjectStorageBucket == "" { + fmt.Println("[WARN] Set the environment variable IBMCLOUD_SCC_OBJECT_STORAGE_BUCKET with a valid cloud object storage bucket") + } + HostPoolID = os.Getenv("IBM_CONTAINER_DEDICATEDHOST_POOL_ID") if HostPoolID == "" { fmt.Println("[INFO] Set the environment variable IBM_CONTAINER_DEDICATEDHOST_POOL_ID for ibm_container_vpc_cluster resource to test dedicated host functionality") @@ -1577,11 +1633,15 @@ func init() { fmt.Println("[WARN] Set the environment variable IBM_SATELLITE_SSH_PUB_KEY with a ssh public key or ibm_satellite_* tests may fail") } + MqcloudConfigEndpoint = os.Getenv("IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT") + if MqcloudConfigEndpoint == "" { + fmt.Println("[INFO] Set the environment variable IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT for ibm_mqcloud service else tests will fail if this is not set correctly") + } + MqcloudInstanceID = os.Getenv("IBM_MQCLOUD_INSTANCE_ID") if MqcloudInstanceID == "" { fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_INSTANCE_ID for ibm_mqcloud_queue_manager resource or datasource else tests will fail if this is not set correctly") } - MqcloudQueueManagerID = os.Getenv("IBM_MQCLOUD_QUEUEMANAGER_ID") if MqcloudQueueManagerID == "" { fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_QUEUEMANAGER_ID for ibm_mqcloud_queue_manager resource or datasource else tests will fail if this is not set correctly") @@ -1594,6 +1654,18 @@ func init() { if MqcloudTSCertFilePath == "" { fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_TS_CERT_PATH for ibm_mqcloud_truststore_certificate resource or datasource else tests will fail if this is not set correctly") } + MqCloudQueueManagerLocation = os.Getenv(("IBM_MQCLOUD_QUEUEMANAGER_LOCATION")) + if MqCloudQueueManagerLocation == "" { + fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_QUEUEMANAGER_LOCATION for ibm_mqcloud_queue_manager resource or datasource else tests will fail if this is not set correctly") + } + MqCloudQueueManagerVersion = os.Getenv(("IBM_MQCLOUD_QUEUEMANAGER_VERSION")) + if MqCloudQueueManagerVersion == "" { + fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_QUEUEMANAGER_VERSION for ibm_mqcloud_queue_manager resource or datasource else tests will fail if this is not set correctly") + } + MqCloudQueueManagerVersionUpdate = os.Getenv(("IBM_MQCLOUD_QUEUEMANAGER_VERSIONUPDATE")) + if MqCloudQueueManagerVersionUpdate == "" { + fmt.Println("[INFO] Set the environment variable IBM_MQCLOUD_QUEUEMANAGER_VERSIONUPDATE for ibm_mqcloud_queue_manager resource or datasource else tests will fail if this is not set correctly") + } } var ( @@ -1786,7 +1858,11 @@ func TestAccPreCheckScc(t *testing.T) { } if SccProviderTypeAttributes == "" { - t.Fatal("IBMCLOUD_SCC_PROVIDER_TYPE_ATTRIBUTES missing. Set the environment variable IBMCLOUD_SCC_PROVIDER_TYPE_ATTRIBUTES with a VALID ATTRIBUTE") + t.Fatal("IBMCLOUD_SCC_PROVIDER_TYPE_ATTRIBUTES missing. Set the environment variable IBMCLOUD_SCC_PROVIDER_TYPE_ATTRIBUTES with a VALID SCC provider_type JSON object") + } + + if SccProviderTypeID == "" { + t.Fatal("IBMCLOUD_SCC_PROVIDER_TYPE_ID missing. Set the environment variable IBMCLOUD_SCC_PROVIDER_TYPE_ID with a VALID SCC provider_type ID") } if SccInstanceID == "" { @@ -1794,7 +1870,19 @@ func TestAccPreCheckScc(t *testing.T) { } if SccReportID == "" { - t.Fatal("IBMCLOUD_SCC_REPORT_ID missing. Set the environment variable IBMCLOUD_SCC_REPORT_ID with a VALID REPORT_ID") + t.Fatal("IBMCLOUD_SCC_REPORT_ID missing. Set the environment variable IBMCLOUD_SCC_REPORT_ID with a VALID SCC REPORT_ID") + } + + if SccEventNotificationsCRN == "" { + t.Fatal("IBMCLOUD_SCC_EVENT_NOTIFICATION_CRN missing. Set the environment variable IBMCLOUD_SCC_EVENT_NOTIFICATION_CRN with a valid EN CRN") + } + + if SccObjectStorageCRN == "" { + t.Fatal("IBMCLOUD_SCC_OBJECT_STORAGE_CRN missing. Set the environment variable IBMCLOUD_SCC_OBJECT_STORAGE_CRN with a valid COS CRN") + } + + if SccObjectStorageBucket == "" { + t.Fatal("IBMCLOUD_SCC_OBJECT_STORAGE_CRN missing. Set the environment variable IBMCLOUD_SCC_OBJECT_STORAGE_BUCKET with a valid COS bucket") } } @@ -1807,6 +1895,9 @@ func TestAccPreCheckSatelliteSSH(t *testing.T) { func TestAccPreCheckMqcloud(t *testing.T) { TestAccPreCheck(t) + if MqcloudConfigEndpoint == "" { + t.Fatal("IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT must be set for acceptance tests") + } if MqcloudInstanceID == "" { t.Fatal("IBM_MQCLOUD_INSTANCE_ID must be set for acceptance tests") } @@ -1819,6 +1910,15 @@ func TestAccPreCheckMqcloud(t *testing.T) { if MqcloudKSCertFilePath == "" { t.Fatal("IBM_MQCLOUD_KS_CERT_PATH must be set for acceptance tests") } + if MqCloudQueueManagerLocation == "" { + t.Fatal("IBM_MQCLOUD_QUEUEMANAGER_LOCATION must be set for acceptance tests") + } + if MqCloudQueueManagerVersion == "" { + t.Fatal("IBM_MQCLOUD_QUEUEMANAGER_VERSION must be set for acceptance tests") + } + if MqCloudQueueManagerVersionUpdate == "" { + t.Fatal("IBM_MQCLOUD_QUEUEMANAGER_VERSIONUPDATE must be set for acceptance tests") + } } func TestAccProviderFactories() map[string]func() (*schema.Provider, error) { diff --git a/ibm/conns/config.go b/ibm/conns/config.go index fd99350034..7b9a05a556 100644 --- a/ibm/conns/config.go +++ b/ibm/conns/config.go @@ -1210,9 +1210,12 @@ func (session clientSession) ProjectV1() (*project.ProjectV1, error) { // MQ on Cloud func (session clientSession) MqcloudV1() (*mqcloudv1.MqcloudV1, error) { - sessionMqcloudClient := session.mqcloudClient - sessionMqcloudClient.EnableRetries(0, 0) - return session.mqcloudClient, session.mqcloudClientErr + if session.mqcloudClientErr != nil { + sessionMqcloudClient := session.mqcloudClient + sessionMqcloudClient.EnableRetries(0, 0) + return session.mqcloudClient, session.mqcloudClientErr + } + return session.mqcloudClient.Clone(), nil } // ClientSession configures and returns a fully initialized ClientSession @@ -1881,11 +1884,14 @@ func (c *Config) ClientSession() (interface{}, error) { }) } session.pushServiceClient = pnclient + // event notifications enurl := fmt.Sprintf("https://%s.event-notifications.cloud.ibm.com/event-notifications", c.Region) - if c.Visibility == "private" { - session.eventNotificationsApiClientErr = fmt.Errorf("Event Notifications Service does not support private endpoints") + + if c.Visibility == "private" || c.Visibility == "public-and-private" { + enurl = fmt.Sprintf("https://private.%s.event-notifications.cloud.ibm.com/event-notifications", c.Region) } + if fileMap != nil && c.Visibility != "public-and-private" { enurl = fileFallBack(fileMap, c.Visibility, "IBMCLOUD_EVENT_NOTIFICATIONS_API_ENDPOINT", c.Region, enurl) } @@ -3266,23 +3272,24 @@ func (c *Config) ClientSession() (interface{}, error) { if fileMap != nil && c.Visibility != "public-and-private" { mqCloudURL = fileFallBack(fileMap, c.Visibility, "IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT", c.Region, mqCloudURL) } - + accept_language := os.Getenv("IBMCLOUD_MQCLOUD_ACCEPT_LANGUAGE") mqcloudClientOptions := &mqcloudv1.MqcloudV1Options{ - Authenticator: authenticator, - URL: EnvFallBack([]string{"IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT"}, mqCloudURL), + Authenticator: authenticator, + AcceptLanguage: core.StringPtr(accept_language), + URL: EnvFallBack([]string{"IBMCLOUD_MQCLOUD_CONFIG_ENDPOINT"}, mqCloudURL), } // Construct the service client for MQ Cloud. session.mqcloudClient, err = mqcloudv1.NewMqcloudV1(mqcloudClientOptions) - if err != nil { - session.mqcloudClientErr = fmt.Errorf("Error occurred while configuring MQ Cloud service: %q", err) - } else { + if err == nil { // Enable retries for API calls session.mqcloudClient.Service.EnableRetries(c.RetryCount, c.RetryDelay) // Add custom header for analytics session.mqcloudClient.SetDefaultHeaders(gohttp.Header{ "X-Original-User-Agent": {fmt.Sprintf("terraform-provider-ibm/%s", version.Version)}, }) + } else { + session.mqcloudClientErr = fmt.Errorf("Error occurred while configuring MQ on Cloud service: %q", err) } // Construct the service options. diff --git a/ibm/flex/structures.go b/ibm/flex/structures.go index f4370020f9..a2feb7bb9b 100644 --- a/ibm/flex/structures.go +++ b/ibm/flex/structures.go @@ -251,6 +251,15 @@ func FlattenUsersSet(userList *schema.Set) []string { return users } +func FlattenSet(set *schema.Set) []string { + setList := set.List() + elems := make([]string, 0, len(setList)) + for _, elem := range setList { + elems = append(elems, elem.(string)) + } + return elems +} + func ExpandMembers(configured []interface{}) []datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo { members := make([]datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo, 0, len(configured)) for _, lRaw := range configured { @@ -429,6 +438,19 @@ func FlattenZones(list []containerv1.WorkerPoolZoneResponse) []map[string]interf return zones } +func FlattenZonesv2(list []containerv2.ZoneResp) []map[string]interface{} { + zones := make([]map[string]interface{}, len(list)) + for i, zone := range list { + l := map[string]interface{}{ + "zone": zone.ID, + "subnets": zone.Subnets, + "worker_count": zone.WorkerCount, + } + zones[i] = l + } + return zones +} + func FlattenWorkerPools(list []containerv1.WorkerPoolResponse) []map[string]interface{} { workerPools := make([]map[string]interface{}, len(list)) for i, workerPool := range list { @@ -539,9 +561,9 @@ func FlattenVlans(list []containerv1.Vlan) []map[string]interface{} { return vlans } -func FlattenIcdGroups(grouplist icdv4.GroupList) []map[string]interface{} { - groups := make([]map[string]interface{}, len(grouplist.Groups)) - for i, group := range grouplist.Groups { +func FlattenIcdGroups(groupResponse *clouddatabasesv5.ListDeploymentScalingGroupsResponse) []map[string]interface{} { + groups := make([]map[string]interface{}, len(groupResponse.Groups)) + for i, group := range groupResponse.Groups { memorys := make([]map[string]interface{}, 1) memory := make(map[string]interface{}) memory["units"] = group.Memory.Units @@ -554,12 +576,12 @@ func FlattenIcdGroups(grouplist icdv4.GroupList) []map[string]interface{} { cpus := make([]map[string]interface{}, 1) cpu := make(map[string]interface{}) - cpu["units"] = group.Cpu.Units - cpu["allocation_count"] = group.Cpu.AllocationCount - cpu["minimum_count"] = group.Cpu.MinimumCount - cpu["step_size_count"] = group.Cpu.StepSizeCount - cpu["is_adjustable"] = group.Cpu.IsAdjustable - cpu["can_scale_down"] = group.Cpu.CanScaleDown + cpu["units"] = group.CPU.Units + cpu["allocation_count"] = group.CPU.AllocationCount + cpu["minimum_count"] = group.CPU.MinimumCount + cpu["step_size_count"] = group.CPU.StepSizeCount + cpu["is_adjustable"] = group.CPU.IsAdjustable + cpu["can_scale_down"] = group.CPU.CanScaleDown cpus[0] = cpu disks := make([]map[string]interface{}, 1) @@ -572,12 +594,23 @@ func FlattenIcdGroups(grouplist icdv4.GroupList) []map[string]interface{} { disk["can_scale_down"] = group.Disk.CanScaleDown disks[0] = disk + hostflavors := make([]map[string]interface{}, 0) + if group.HostFlavor != nil { + hostflavors = make([]map[string]interface{}, 1) + hostflavor := make(map[string]interface{}) + hostflavor["id"] = group.HostFlavor.ID + hostflavor["name"] = group.HostFlavor.Name + hostflavor["hosting_size"] = group.HostFlavor.HostingSize + hostflavors[0] = hostflavor + } + l := map[string]interface{}{ - "group_id": group.Id, - "count": group.Count, - "memory": memorys, - "cpu": cpus, - "disk": disks, + "group_id": group.ID, + "count": group.Count, + "memory": memorys, + "cpu": cpus, + "disk": disks, + "host_flavor": hostflavors, } groups[i] = l } @@ -1171,6 +1204,10 @@ func PtrToString(s string) *string { return &s } +func PtrToBool(b bool) *bool { + return &b +} + func IntValue(i64 *int64) (i int) { if i64 != nil { i = int(*i64) @@ -1602,7 +1639,11 @@ func FlattenV2PolicyResource(resource iampolicymanagementv1.V2PolicyResource) [] if len(customAttributes) > 0 { out := make(map[string]string) for _, a := range customAttributes { - out[*a.Key] = fmt.Sprint(a.Value) + if *a.Operator == "stringExists" && a.Value == true { + out[*a.Key] = fmt.Sprint("*") + } else if *a.Operator == "stringMatch" || *a.Operator == "stringEquals" { + out[*a.Key] = fmt.Sprint(a.Value) + } } l["attributes"] = out } @@ -3242,13 +3283,13 @@ func FlattenOpaqueSecret(fields containerv2.Fields) []map[string]interface{} { return flattenedOpaqueSecret } -// flattenHostLabels .. -func FlattenHostLabels(hostLabels []interface{}) map[string]string { +// flatten the provided key-value pairs +func FlattenKeyValues(keyValues []interface{}) map[string]string { labels := make(map[string]string) - for _, v := range hostLabels { + for _, v := range keyValues { parts := strings.Split(v.(string), ":") if len(parts) != 2 { - log.Fatal("Entered label " + v.(string) + "is in incorrect format.") + log.Fatal("Entered key-value " + v.(string) + "is in incorrect format.") } labels[parts[0]] = parts[1] } @@ -3308,10 +3349,12 @@ func GetResourceAttribute(name string, r iampolicymanagementv1.PolicyResource) * func GetV2PolicyResourceAttribute(key string, r iampolicymanagementv1.V2PolicyResource) string { for _, a := range r.Attributes { - if *a.Key == key && - (*a.Operator == "stringMatch" || - *a.Operator == "stringEquals") { - return a.Value.(string) + if *a.Key == key { + if *a.Operator == "stringExists" && a.Value == true { + return fmt.Sprint("*") + } else if *a.Operator == "stringMatch" || *a.Operator == "stringEquals" { + return a.Value.(string) + } } } return *core.StringPtr("") @@ -3326,7 +3369,7 @@ func GetSubjectAttribute(name string, s iampolicymanagementv1.PolicySubject) *st return core.StringPtr("") } -func GetV2PolicySubjectAttribute(key string, s iampolicymanagementv1.V2PolicySubject) *string { +func GetV2PolicySubjectAttribute(key string, s iampolicymanagementv1.V2PolicySubject) interface{} { for _, a := range s.Attributes { if *a.Key == key && (*a.Operator == "stringMatch" || @@ -3334,7 +3377,7 @@ func GetV2PolicySubjectAttribute(key string, s iampolicymanagementv1.V2PolicySub return a.Value } } - return core.StringPtr("") + return interface{}(core.StringPtr("")) } func SetResourceAttribute(name *string, value *string, r []iampolicymanagementv1.ResourceAttribute) []iampolicymanagementv1.ResourceAttribute { @@ -3475,6 +3518,7 @@ func GetRoleNamesFromPolicyResponse(policy iampolicymanagementv1.V2PolicyTemplat controlResponse := policy.Control.(*iampolicymanagementv1.ControlResponse) policyRoles := MapRolesToPolicyRoles(controlResponse.Grant.Roles) resourceAttributes := policy.Resource.Attributes + subjectAttributes := policy.Subject.Attributes userDetails, err := meta.(conns.ClientSession).BluemixUserDetails() if err != nil { @@ -3482,11 +3526,20 @@ func GetRoleNamesFromPolicyResponse(policy iampolicymanagementv1.V2PolicyTemplat } var ( - serviceName string - resourceType string - serviceGroupID string + serviceName string + sourceServiceName string + resourceType string + serviceGroupID string ) + for _, a := range subjectAttributes { + if *a.Key == "serviceName" && + (*a.Operator == "stringMatch" || + *a.Operator == "stringEquals") { + sourceServiceName = a.Value.(string) + } + } + for _, a := range resourceAttributes { if *a.Key == "serviceName" && (*a.Operator == "stringMatch" || @@ -3513,6 +3566,11 @@ func GetRoleNamesFromPolicyResponse(policy iampolicymanagementv1.V2PolicyTemplat if accountManagement, ok := d.GetOk("account_management"); ok { isAccountManagementPolicy = accountManagement.(bool) } + + if serviceName == "" && resourceType == "resource-group" { + serviceName = "resource-controller" + } + if serviceName == "" && // no specific service specified !isAccountManagementPolicy && // not all account management services resourceType != "resource-group" && // not to a resource group @@ -3528,6 +3586,14 @@ func GetRoleNamesFromPolicyResponse(policy iampolicymanagementv1.V2PolicyTemplat listRoleOptions.ServiceGroupID = &serviceGroupID } + if sourceServiceName != "" { + listRoleOptions.SourceServiceName = &sourceServiceName + } + + if *policy.Type != "" { + listRoleOptions.PolicyType = policy.Type + } + roleList, _, err := iamPolicyManagementClient.ListRoles(listRoleOptions) if err != nil { diff --git a/ibm/provider/provider.go b/ibm/provider/provider.go index 0bf1e29b21..9f5ff9a6e8 100644 --- a/ibm/provider/provider.go +++ b/ibm/provider/provider.go @@ -619,6 +619,7 @@ func Provider() *schema.Provider { "ibm_pi_system_pools": power.DataSourceIBMPISystemPools(), "ibm_pi_tenant": power.DataSourceIBMPITenant(), "ibm_pi_volume": power.DataSourceIBMPIVolume(), + "ibm_pi_volume_clone": power.DataSourceIBMPIVolumeClone(), "ibm_pi_volume_group": power.DataSourceIBMPIVolumeGroup(), "ibm_pi_volume_groups": power.DataSourceIBMPIVolumeGroups(), "ibm_pi_volume_group_details": power.DataSourceIBMPIVolumeGroupDetails(), @@ -1164,6 +1165,7 @@ func Provider() *schema.Provider { "ibm_pi_volume": power.ResourceIBMPIVolume(), "ibm_pi_volume_onboarding": power.ResourceIBMPIVolumeOnboarding(), "ibm_pi_volume_group": power.ResourceIBMPIVolumeGroup(), + "ibm_pi_volume_clone": power.ResourceIBMPIVolumeClone(), "ibm_pi_volume_group_action": power.ResourceIBMPIVolumeGroupAction(), "ibm_pi_network": power.ResourceIBMPINetwork(), "ibm_pi_instance": power.ResourceIBMPIInstance(), @@ -1305,6 +1307,7 @@ func Provider() *schema.Provider { "ibm_scc_template_attachment": scc.ResourceIBMSccTemplateAttachment(), // Security and Compliance Center + "ibm_scc_instance_settings": scc.ResourceIbmSccInstanceSettings(), "ibm_scc_rule": scc.ResourceIbmSccRule(), "ibm_scc_control_library": scc.ResourceIbmSccControlLibrary(), "ibm_scc_profile": scc.ResourceIbmSccProfile(), @@ -1576,6 +1579,7 @@ func Validator() validate.ValidatorDict { "ibm_satellite_host": satellite.ResourceIBMSatelliteHostValidator(), // Added for SCC + "ibm_scc_instance_settings": scc.ResourceIbmSccInstanceSettingsValidator(), "ibm_scc_rule": scc.ResourceIbmSccRuleValidator(), "ibm_scc_control_library": scc.ResourceIbmSccControlLibraryValidator(), "ibm_scc_profile": scc.ResourceIbmSccProfileValidator(), diff --git a/ibm/service/atracker/resource_ibm_atracker_settings_test.go b/ibm/service/atracker/resource_ibm_atracker_settings_test.go index 07d7ad0efe..29f9c5138e 100644 --- a/ibm/service/atracker/resource_ibm_atracker_settings_test.go +++ b/ibm/service/atracker/resource_ibm_atracker_settings_test.go @@ -17,9 +17,8 @@ import ( func TestAccIBMAtrackerSettingsBasic(t *testing.T) { var conf atrackerv2.Settings - metadataRegionPrimary := "us-east" + metadataRegionPrimary := "us-south" privateAPIEndpointOnly := "false" - metadataRegionPrimaryUpdate := "us-south" resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -36,15 +35,6 @@ func TestAccIBMAtrackerSettingsBasic(t *testing.T) { resource.TestCheckResourceAttr("ibm_atracker_settings.atracker_settings", "private_api_endpoint_only", privateAPIEndpointOnly), ), }, - resource.TestStep{ - Config: testAccCheckIBMAtrackerSettingsConfigBasic(metadataRegionPrimaryUpdate, - metadataRegionPrimary, privateAPIEndpointOnly), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_atracker_settings.atracker_settings", "metadata_region_primary", metadataRegionPrimaryUpdate), - resource.TestCheckResourceAttr("ibm_atracker_settings.atracker_settings", "metadata_region_backup", metadataRegionPrimary), - resource.TestCheckResourceAttr("ibm_atracker_settings.atracker_settings", "private_api_endpoint_only", privateAPIEndpointOnly), - ), - }, }, }) } diff --git a/ibm/service/cdtoolchain/data_source_ibm_cd_toolchain_tool_securitycompliance.go b/ibm/service/cdtoolchain/data_source_ibm_cd_toolchain_tool_securitycompliance.go index 143fe95bb7..8f7ab24ed1 100644 --- a/ibm/service/cdtoolchain/data_source_ibm_cd_toolchain_tool_securitycompliance.go +++ b/ibm/service/cdtoolchain/data_source_ibm_cd_toolchain_tool_securitycompliance.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package cdtoolchain @@ -94,12 +94,12 @@ func DataSourceIBMCdToolchainToolSecuritycompliance() *schema.Resource { "evidence_namespace": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment pipeline, or `cc` which will use evidence generated by a Continuous Compliance pipeline.", + Description: "The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment (CD) pipeline, or `cc` which will use evidence generated by a Continuous Compliance (CC) pipeline. The default behavior is to use the CD evidence.", }, "use_profile_attachment": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service. When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`.", + Description: "Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service to perform pre-deploy validation against compliance rules for Continuous Deployment (CD) and compliance monitoring for Continuous Compliance (CC). When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`.", }, "scc_api_key": &schema.Schema{ Type: schema.TypeString, diff --git a/ibm/service/cdtoolchain/resource_ibm_cd_toolchain_tool_securitycompliance.go b/ibm/service/cdtoolchain/resource_ibm_cd_toolchain_tool_securitycompliance.go index be80157483..d6d382ba56 100644 --- a/ibm/service/cdtoolchain/resource_ibm_cd_toolchain_tool_securitycompliance.go +++ b/ibm/service/cdtoolchain/resource_ibm_cd_toolchain_tool_securitycompliance.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package cdtoolchain @@ -58,12 +58,12 @@ func ResourceIBMCdToolchainToolSecuritycompliance() *schema.Resource { "evidence_namespace": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment pipeline, or `cc` which will use evidence generated by a Continuous Compliance pipeline.", + Description: "The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment (CD) pipeline, or `cc` which will use evidence generated by a Continuous Compliance (CC) pipeline. The default behavior is to use the CD evidence.", }, "use_profile_attachment": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service. When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`.", + Description: "Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service to perform pre-deploy validation against compliance rules for Continuous Deployment (CD) and compliance monitoring for Continuous Compliance (CC). When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`.", }, "scc_api_key": &schema.Schema{ Type: schema.TypeString, diff --git a/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_rule_test.go b/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_rule_test.go index b2fa07dd41..f8c6982b37 100644 --- a/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_rule_test.go +++ b/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_rule_test.go @@ -13,6 +13,11 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" ) +const ( + testAccountID = "12ab34cd56ef78ab90cd12ef34ab56cd" + testZoneID = "559052eb8f43302824e7ae490c0281eb" +) + func TestAccIBMCbrRuleDataSourceBasic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -77,13 +82,13 @@ func testAccCheckIBMCbrRuleDataSourceConfigBasic() string { contexts { attributes { name = "networkZoneId" - value = "559052eb8f43302824e7ae490c0281eb" + value = "%s" } } resources { attributes { name = "accountId" - value = "12ab34cd56ef78ab90cd12ef34ab56cd" + value = "%s" } attributes { name = "serviceName" @@ -94,7 +99,7 @@ func testAccCheckIBMCbrRuleDataSourceConfigBasic() string { data "ibm_cbr_rule" "cbr_rule" { rule_id = ibm_cbr_rule.cbr_rule.id } - `) + `, testZoneID, testAccountID) } func testAccCheckIBMCbrRuleDataSourceConfig(ruleDescription string, ruleEnforcementMode string) string { @@ -104,13 +109,13 @@ func testAccCheckIBMCbrRuleDataSourceConfig(ruleDescription string, ruleEnforcem contexts { attributes { name = "networkZoneId" - value = "559052eb8f43302824e7ae490c0281eb" + value = "%s" } } resources { attributes { name = "accountId" - value = "12ab34cd56ef78ab90cd12ef34ab56cd" + value = "%s" } attributes { name = "serviceName" @@ -133,5 +138,5 @@ func testAccCheckIBMCbrRuleDataSourceConfig(ruleDescription string, ruleEnforcem data "ibm_cbr_rule" "cbr_rule" { rule_id = ibm_cbr_rule.cbr_rule.id } - `, ruleDescription, ruleEnforcementMode) + `, ruleDescription, testZoneID, testAccountID, ruleEnforcementMode) } diff --git a/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_zone_test.go b/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_zone_test.go index 1a1e027648..22817af9fb 100644 --- a/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_zone_test.go +++ b/ibm/service/contextbasedrestrictions/data_source_ibm_cbr_zone_test.go @@ -45,7 +45,7 @@ func TestAccIBMCbrZoneDataSourceBasic(t *testing.T) { func TestAccIBMCbrZoneDataSourceAllArgs(t *testing.T) { zoneName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - zoneAccountID := "12ab34cd56ef78ab90cd12ef34ab56cd" + zoneAccountID := testAccountID zoneDescription := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ @@ -86,7 +86,7 @@ func testAccCheckIBMCbrZoneDataSourceConfigBasic() string { resource "ibm_cbr_zone" "cbr_zone" { name = "Test Zone Data Source Config Basic" description = "Test Zone Data Source Config Basic" - account_id = "12ab34cd56ef78ab90cd12ef34ab56cd" + account_id = "%s" addresses { type = "ipRange" value = "169.23.22.0-169.23.22.255" @@ -96,7 +96,7 @@ func testAccCheckIBMCbrZoneDataSourceConfigBasic() string { data "ibm_cbr_zone" "cbr_zone" { zone_id = ibm_cbr_zone.cbr_zone.id } - `) + `, testAccountID) } func testAccCheckIBMCbrZoneDataSourceConfig(zoneName string, zoneAccountID string, zoneDescription string) string { diff --git a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule.go b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule.go index dfcbd65ddb..18c7154623 100644 --- a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule.go +++ b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule.go @@ -35,7 +35,7 @@ func ResourceIBMCbrRule() *schema.Resource { }, "contexts": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, Description: "The contexts this rule applies to.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -122,6 +122,7 @@ func ResourceIBMCbrRule() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, Description: "The operations this rule applies to.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -144,7 +145,7 @@ func ResourceIBMCbrRule() *schema.Resource { "enforcement_mode": &schema.Schema{ Type: schema.TypeString, Optional: true, - Default: "enabled", + Computed: true, ValidateFunc: validate.InvokeValidator("ibm_cbr_rule", "enforcement_mode"), Description: "The rule enforcement mode: * `enabled` - The restrictions are enforced and reported. This is the default. * `disabled` - The restrictions are disabled. Nothing is enforced or reported. * `report` - The restrictions are evaluated and reported, but not enforced.", }, @@ -252,8 +253,8 @@ func resourceIBMCbrRuleCreate(context context.Context, d *schema.ResourceData, m if _, ok := d.GetOk("description"); ok { createRuleOptions.SetDescription(d.Get("description").(string)) } + contexts := []contextbasedrestrictionsv1.RuleContext{} if _, ok := d.GetOk("contexts"); ok { - var contexts []contextbasedrestrictionsv1.RuleContext for _, e := range d.Get("contexts").([]interface{}) { value := e.(map[string]interface{}) contextsItem, err := resourceIBMCbrRuleMapToRuleContext(value) @@ -262,8 +263,8 @@ func resourceIBMCbrRuleCreate(context context.Context, d *schema.ResourceData, m } contexts = append(contexts, *contextsItem) } - createRuleOptions.SetContexts(contexts) } + createRuleOptions.SetContexts(contexts) if _, ok := d.GetOk("resources"); ok { var resources []contextbasedrestrictionsv1.Resource for _, e := range d.Get("resources").([]interface{}) { @@ -408,8 +409,8 @@ func resourceIBMCbrRuleUpdate(context context.Context, d *schema.ResourceData, m if _, ok := d.GetOk("description"); ok { replaceRuleOptions.SetDescription(d.Get("description").(string)) } + contexts := []contextbasedrestrictionsv1.RuleContext{} if _, ok := d.GetOk("contexts"); ok { - var contexts []contextbasedrestrictionsv1.RuleContext for _, e := range d.Get("contexts").([]interface{}) { value := e.(map[string]interface{}) contextsItem, err := resourceIBMCbrRuleMapToRuleContext(value) @@ -418,8 +419,8 @@ func resourceIBMCbrRuleUpdate(context context.Context, d *schema.ResourceData, m } contexts = append(contexts, *contextsItem) } - replaceRuleOptions.SetContexts(contexts) } + replaceRuleOptions.SetContexts(contexts) if _, ok := d.GetOk("resources"); ok { var resources []contextbasedrestrictionsv1.Resource for _, e := range d.Get("resources").([]interface{}) { diff --git a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule_test.go b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule_test.go index 8434734ae3..a790388835 100644 --- a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule_test.go +++ b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_rule_test.go @@ -78,13 +78,13 @@ func testAccCheckIBMCbrRuleConfigBasic() string { contexts { attributes { name = "networkZoneId" - value = "559052eb8f43302824e7ae490c0281eb" + value = "%s" } } resources { attributes { name = "accountId" - value = "12ab34cd56ef78ab90cd12ef34ab56cd" + value = "%s" } attributes { name = "serviceName" @@ -97,7 +97,7 @@ func testAccCheckIBMCbrRuleConfigBasic() string { } enforcement_mode = "disabled" } - `) + `, testZoneID, testAccountID) } func testAccCheckIBMCbrRuleConfig(description string, enforcementMode string) string { @@ -108,13 +108,13 @@ func testAccCheckIBMCbrRuleConfig(description string, enforcementMode string) st contexts { attributes { name = "networkZoneId" - value = "559052eb8f43302824e7ae490c0281eb" + value = "%s" } } resources { attributes { name = "accountId" - value = "12ab34cd56ef78ab90cd12ef34ab56cd" + value = "%s" } attributes { name = "serviceName" @@ -133,7 +133,7 @@ func testAccCheckIBMCbrRuleConfig(description string, enforcementMode string) st } enforcement_mode = "%s" } - `, description, enforcementMode) + `, description, testZoneID, testAccountID, enforcementMode) } func testAccCheckIBMCbrRuleExists(n string, obj contextbasedrestrictionsv1.Rule) resource.TestCheckFunc { diff --git a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone.go b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone.go index d525000caa..f8be760a0a 100644 --- a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone.go +++ b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone.go @@ -47,7 +47,7 @@ func ResourceIBMCbrZone() *schema.Resource { }, "addresses": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, Description: "The list of addresses in the zone.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -249,8 +249,8 @@ func resourceIBMCbrZoneCreate(context context.Context, d *schema.ResourceData, m if _, ok := d.GetOk("description"); ok { createZoneOptions.SetDescription(d.Get("description").(string)) } + addresses := []contextbasedrestrictionsv1.AddressIntf{} if _, ok := d.GetOk("addresses"); ok { - var addresses []contextbasedrestrictionsv1.AddressIntf for _, e := range d.Get("addresses").([]interface{}) { value := e.(map[string]interface{}) addressesItem, err := resourceIBMCbrZoneMapToAddress(value) @@ -259,8 +259,8 @@ func resourceIBMCbrZoneCreate(context context.Context, d *schema.ResourceData, m } addresses = append(addresses, addressesItem) } - createZoneOptions.SetAddresses(addresses) } + createZoneOptions.SetAddresses(addresses) if _, ok := d.GetOk("excluded"); ok { var excluded []contextbasedrestrictionsv1.AddressIntf for _, e := range d.Get("excluded").([]interface{}) { @@ -401,8 +401,8 @@ func resourceIBMCbrZoneUpdate(context context.Context, d *schema.ResourceData, m if _, ok := d.GetOk("description"); ok { replaceZoneOptions.SetDescription(d.Get("description").(string)) } + addresses := []contextbasedrestrictionsv1.AddressIntf{} if _, ok := d.GetOk("addresses"); ok { - var addresses []contextbasedrestrictionsv1.AddressIntf for _, e := range d.Get("addresses").([]interface{}) { value := e.(map[string]interface{}) addressesItem, err := resourceIBMCbrZoneMapToAddress(value) @@ -411,8 +411,8 @@ func resourceIBMCbrZoneUpdate(context context.Context, d *schema.ResourceData, m } addresses = append(addresses, addressesItem) } - replaceZoneOptions.SetAddresses(addresses) } + replaceZoneOptions.SetAddresses(addresses) if _, ok := d.GetOk("excluded"); ok { var excluded []contextbasedrestrictionsv1.AddressIntf for _, e := range d.Get("excluded").([]interface{}) { diff --git a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone_test.go b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone_test.go index 9910b07885..c627dd1a5c 100644 --- a/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone_test.go +++ b/ibm/service/contextbasedrestrictions/resource_ibm_cbr_zone_test.go @@ -37,10 +37,10 @@ func TestAccIBMCbrZoneBasic(t *testing.T) { func TestAccIBMCbrZoneAllArgs(t *testing.T) { var conf contextbasedrestrictionsv1.Zone name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - accountID := fmt.Sprintf("12ab34cd56ef78ab90cd12ef34ab56cd") + accountID := testAccountID description := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - accountIDUpdate := fmt.Sprintf("12ab34cd56ef78ab90cd12ef34ab56cd") + accountIDUpdate := testAccountID descriptionUpdate := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ @@ -79,13 +79,13 @@ func testAccCheckIBMCbrZoneConfigBasic() string { resource "ibm_cbr_zone" "cbr_zone" { name = "Test Zone Resource Config Basic" description = "Test Zone Resource Config Basic" - account_id = "12ab34cd56ef78ab90cd12ef34ab56cd" + account_id = "%s" addresses { type = "ipRange" value = "169.23.22.0-169.23.22.255" } } - `) + `, testAccountID) } func testAccCheckIBMCbrZoneConfig(name string, accountID string, description string) string { diff --git a/ibm/service/cos/resource_ibm_cos_bucket.go b/ibm/service/cos/resource_ibm_cos_bucket.go index abcc543635..fabf40bc9d 100644 --- a/ibm/service/cos/resource_ibm_cos_bucket.go +++ b/ibm/service/cos/resource_ibm_cos_bucket.go @@ -20,6 +20,7 @@ import ( token "github.com/IBM/ibm-cos-sdk-go/aws/credentials/ibmiam/token" "github.com/IBM/ibm-cos-sdk-go/aws/session" "github.com/IBM/ibm-cos-sdk-go/service/s3" + rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -1550,7 +1551,26 @@ func resourceIBMCOSBucketExists(d *schema.ResourceData, meta interface{}) (bool, if len(bucket_meta) < 2 || len(strings.Split(bucket_meta[1], ":")) < 2 { return false, fmt.Errorf("[ERROR] Error parsing bucket ID. Bucket ID format must be: $CRN:meta:$buckettype:$bucketlocation") } - + resourceInstanceId := strings.Split(d.Id(), ":bucket:")[0] + resourceInstanceIdInput := resourceInstanceId + "::" + resourceInstanceGet := rc.GetResourceInstanceOptions{ + ID: &resourceInstanceIdInput, + } + rsConClientV2, errConf := meta.(conns.ClientSession).ResourceControllerV2API() + if errConf != nil { + return false, errConf + } + instance, resp, err := rsConClientV2.GetResourceInstance(&resourceInstanceGet) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("[WARN] Error getting resource instance from cos bucket: %s with resp code: %s", err, resp) + } + if instance != nil && (strings.Contains(*instance.State, "removed") || strings.Contains(*instance.State, "pending_reclamation")) { + log.Printf("[WARN] Removing instance from state because it's in removed or pending_reclamation state from the cos bucket resource") + return false, nil + } bucketName := parseBucketId(d.Id(), "bucketName") serviceID := parseBucketId(d.Id(), "serviceID") diff --git a/ibm/service/database/data_source_ibm_database.go b/ibm/service/database/data_source_ibm_database.go index be79a83bf2..2ab901252f 100644 --- a/ibm/service/database/data_source_ibm_database.go +++ b/ibm/service/database/data_source_ibm_database.go @@ -280,6 +280,29 @@ func DataSourceIBMDatabaseInstance() *schema.Resource { }, }, }, + "host_flavor": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor name", + }, + "hosting_size": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor size", + }, + }, + }, + }, }, }, }, @@ -624,12 +647,6 @@ func dataSourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) } - icdClient, err := meta.(conns.ClientSession).ICDAPI() - if err != nil { - return fmt.Errorf("[ERROR] Error getting database client settings: %s", err) - } - - icdId := flex.EscapeUrlParm(instance.ID) getDeploymentInfoOptions := &clouddatabasesv5.GetDeploymentInfoOptions{ ID: core.StringPtr(instance.ID), } @@ -651,7 +668,11 @@ func dataSourceIBMDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("platform_options", flex.ExpandPlatformOptions(*deployment)) } - groupList, err := icdClient.Groups().GetGroups(icdId) + listDeploymentScalingGroupsOptions := &clouddatabasesv5.ListDeploymentScalingGroupsOptions{ + ID: core.StringPtr(instance.ID), + } + + groupList, _, err := cloudDatabasesClient.ListDeploymentScalingGroups(listDeploymentScalingGroupsOptions) if err != nil { return fmt.Errorf("[ERROR] Error getting database groups: %s", err) } diff --git a/ibm/service/database/resource_ibm_database.go b/ibm/service/database/resource_ibm_database.go index aa2808dcbe..180823b07f 100644 --- a/ibm/service/database/resource_ibm_database.go +++ b/ibm/service/database/resource_ibm_database.go @@ -14,6 +14,7 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "time" @@ -57,20 +58,58 @@ const ( ) const ( - redisRBACRoleRegexPattern = `([+-][a-z]+\s?)+` + redisRBACRoleRegexPattern = `[+-]@(?P[a-z]+)` ) type DatabaseUser struct { Username string Password string - Role string + Role *string Type string } +type databaseUserValidationError struct { + user *DatabaseUser + errs []error +} + +func (e *databaseUserValidationError) Error() string { + if len(e.errs) == 0 { + return "" + } + + var b []byte + for i, err := range e.errs { + if i > 0 { + b = append(b, '\n') + } + b = append(b, err.Error()...) + } + + return fmt.Sprintf("database user (%s) validation error:\n%s", e.user.Username, string(b)) +} + +func (e *databaseUserValidationError) Unwrap() error { + if e == nil || len(e.errs) == 0 { + return nil + } + + // only return the first + return e.errs[0] +} + type userChange struct { Old, New *DatabaseUser } +func redisRBACAllowedRoles() []string { + return []string{"all", "admin", "read", "write"} +} + +func opsManagerRoles() []string { + return []string{"group_read_only", "group_data_access_admin"} +} + func retry(f func() error) (err error) { attempts := 3 @@ -296,11 +335,10 @@ func ResourceIBMDatabaseInstance() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"database", "ops_manager", "read_only_replica"}, false), }, "role": { - Description: "User role. Only available for ops_manager user type.", - Type: schema.TypeString, - Optional: true, - Sensitive: false, - ValidateFunc: validation.StringInSlice([]string{"group_read_only", "group_data_access_admin"}, false), + Description: "User role. Only available for ops_manager user type and Redis 6.0 and above.", + Type: schema.TypeString, + Optional: true, + Sensitive: false, }, }, }, @@ -492,6 +530,19 @@ func ResourceIBMDatabaseInstance() *schema.Resource { }, }, }, + "host_flavor": { + Optional: true, + Type: schema.TypeSet, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, }, }, }, @@ -624,6 +675,29 @@ func ResourceIBMDatabaseInstance() *schema.Resource { }, }, }, + "host_flavor": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor id", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor name", + }, + "hosting_size": { + Type: schema.TypeString, + Computed: true, + Description: "The host flavor size", + }, + }, + }, + }, }, }, }, @@ -893,6 +967,7 @@ type Params struct { Memory int `json:"members_memory_allocation_mb,omitempty"` Disk int `json:"members_disk_allocation_mb,omitempty"` CPU int `json:"members_cpu_allocation_count,omitempty"` + HostFlavor string `json:"members_host_flavor,omitempty"` KeyProtectInstance string `json:"disk_encryption_instance_crn,omitempty"` ServiceEndpoints string `json:"service-endpoints,omitempty"` BackupID string `json:"backup-id,omitempty"` @@ -903,11 +978,12 @@ type Params struct { } type Group struct { - ID string - Members *GroupResource - Memory *GroupResource - Disk *GroupResource - CPU *GroupResource + ID string + Members *GroupResource + Memory *GroupResource + Disk *GroupResource + CPU *GroupResource + HostFlavor *HostFlavorGroupResource } type GroupResource struct { @@ -921,6 +997,10 @@ type GroupResource struct { CanScaleDown bool } +type HostFlavorGroupResource struct { + ID string +} + func getDefaultScalingGroups(_service string, _plan string, meta interface{}) (groups []clouddatabasesv5.Group, err error) { cloudDatabasesClient, err := meta.(conns.ClientSession).CloudDatabasesV5() if err != nil { @@ -1174,6 +1254,10 @@ func resourceIBMDatabaseInstanceCreate(context context.Context, d *schema.Resour if memberGroup.CPU != nil { params.CPU = memberGroup.CPU.Allocation * initialNodeCount } + + if memberGroup.HostFlavor != nil { + params.HostFlavor = memberGroup.HostFlavor.ID + } } } if version, ok := d.GetOk("version"); ok { @@ -1278,8 +1362,11 @@ func resourceIBMDatabaseInstanceCreate(context context.Context, d *schema.Resour if g.CPU != nil && g.CPU.Allocation*nodeCount != currentGroup.CPU.Allocation { groupScaling.CPU = &clouddatabasesv5.GroupScalingCPU{AllocationCount: core.Int64Ptr(int64(g.CPU.Allocation * nodeCount))} } + if g.HostFlavor != nil && g.HostFlavor.ID != currentGroup.HostFlavor.ID { + groupScaling.HostFlavor = &clouddatabasesv5.GroupScalingHostFlavor{ID: core.StringPtr(g.HostFlavor.ID)} + } - if groupScaling.Members != nil || groupScaling.Memory != nil || groupScaling.Disk != nil || groupScaling.CPU != nil { + if groupScaling.Members != nil || groupScaling.Memory != nil || groupScaling.Disk != nil || groupScaling.CPU != nil || groupScaling.HostFlavor != nil { setDeploymentScalingGroupOptions := &clouddatabasesv5.SetDeploymentScalingGroupOptions{ ID: instance.ID, GroupID: &g.ID, @@ -1330,23 +1417,23 @@ func resourceIBMDatabaseInstanceCreate(context context.Context, d *schema.Resour adminUser := deployment.AdminUsernames["database"] - user := &clouddatabasesv5.APasswordSettingUser{ + user := &clouddatabasesv5.UserUpdatePasswordSetting{ Password: &adminPassword, } - changeUserPasswordOptions := &clouddatabasesv5.ChangeUserPasswordOptions{ + updateUserOptions := &clouddatabasesv5.UpdateUserOptions{ ID: core.StringPtr(instanceID), UserType: core.StringPtr("database"), Username: core.StringPtr(adminUser), User: user, } - changeUserPasswordResponse, response, err := cloudDatabasesClient.ChangeUserPassword(changeUserPasswordOptions) + updateUserResponse, response, err := cloudDatabasesClient.UpdateUser(updateUserOptions) if err != nil { - return diag.FromErr(fmt.Errorf("[ERROR] ChangeUserPassword (%s) failed %s\n%s", *changeUserPasswordOptions.Username, err, response)) + return diag.FromErr(fmt.Errorf("[ERROR] UpdateUser (%s) failed %s\n%s", *updateUserOptions.Username, err, response)) } - taskID := *changeUserPasswordResponse.Task.ID + taskID := *updateUserResponse.Task.ID _, err = waitForDatabaseTaskComplete(taskID, d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { @@ -1631,11 +1718,14 @@ func resourceIBMDatabaseInstanceRead(context context.Context, d *schema.Resource d.Set("adminuser", deployment.AdminUsernames["database"]) d.Set("version", deployment.Version) - groupList, err := icdClient.Groups().GetGroups(icdId) + listDeploymentScalingGroupsOptions := &clouddatabasesv5.ListDeploymentScalingGroupsOptions{ + ID: core.StringPtr(instanceID), + } + groupList, _, err := cloudDatabasesClient.ListDeploymentScalingGroups(listDeploymentScalingGroupsOptions) if err != nil { return diag.FromErr(fmt.Errorf("[ERROR] Error getting database groups: %s", err)) } - if groupList.Groups[0].Members.AllocationCount == 0 { + if len(groupList.Groups) == 0 || groupList.Groups[0].Members == nil || groupList.Groups[0].Members.AllocationCount == nil || *groupList.Groups[0].Members.AllocationCount == 0 { return diag.FromErr(fmt.Errorf("[ERROR] This database appears to have have 0 members. Unable to proceed")) } @@ -1844,8 +1934,11 @@ func resourceIBMDatabaseInstanceUpdate(context context.Context, d *schema.Resour if group.CPU != nil && group.CPU.Allocation*nodeCount != currentGroup.CPU.Allocation { groupScaling.CPU = &clouddatabasesv5.GroupScalingCPU{AllocationCount: core.Int64Ptr(int64(group.CPU.Allocation * nodeCount))} } + if group.HostFlavor != nil && group.HostFlavor.ID != currentGroup.HostFlavor.ID { + groupScaling.HostFlavor = &clouddatabasesv5.GroupScalingHostFlavor{ID: core.StringPtr(group.HostFlavor.ID)} + } - if groupScaling.Members != nil || groupScaling.Memory != nil || groupScaling.Disk != nil || groupScaling.CPU != nil { + if groupScaling.Members != nil || groupScaling.Memory != nil || groupScaling.Disk != nil || groupScaling.CPU != nil || groupScaling.HostFlavor != nil { setDeploymentScalingGroupOptions := &clouddatabasesv5.SetDeploymentScalingGroupOptions{ ID: &instanceID, GroupID: &group.ID, @@ -1919,23 +2012,24 @@ func resourceIBMDatabaseInstanceUpdate(context context.Context, d *schema.Resour if d.HasChange("adminpassword") { adminUser := d.Get("adminuser").(string) password := d.Get("adminpassword").(string) - user := &clouddatabasesv5.APasswordSettingUser{ + + user := &clouddatabasesv5.UserUpdatePasswordSetting{ Password: &password, } - changeUserPasswordOptions := &clouddatabasesv5.ChangeUserPasswordOptions{ + updateUserOptions := &clouddatabasesv5.UpdateUserOptions{ ID: core.StringPtr(instanceID), UserType: core.StringPtr("database"), Username: core.StringPtr(adminUser), User: user, } - changeUserPasswordResponse, response, err := cloudDatabasesClient.ChangeUserPassword(changeUserPasswordOptions) + updateUserResponse, response, err := cloudDatabasesClient.UpdateUser(updateUserOptions) if err != nil { - return diag.FromErr(fmt.Errorf("[ERROR] ChangeUserPassword (%s) failed %s\n%s", *changeUserPasswordOptions.Username, err, response)) + return diag.FromErr(fmt.Errorf("[ERROR] UpdateUser (%s) failed %s\n%s", *updateUserOptions.Username, err, response)) } - taskID := *changeUserPasswordResponse.Task.ID + taskID := *updateUserResponse.Task.ID _, err = waitForDatabaseTaskComplete(taskID, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { @@ -2256,7 +2350,7 @@ func waitForICDReady(meta interface{}, instanceID string) error { if apiErr, ok := err.(bmxerror.RequestFailure); ok && apiErr.StatusCode() == 404 { return fmt.Errorf("[ERROR] The database instance was not found in the region set for the Provider, or the default of us-south. Specify the correct region in the provider definition, or create a provider alias for the correct region. %v", err) } - return fmt.Errorf("[ERROR] Error getting database config for: %s with error %s\n", icdId, err) + return fmt.Errorf("[ERROR] Error getting database config for: %s with error %s\n", icdId, cdbErr) } return nil }) @@ -2640,6 +2734,11 @@ func normalizeGroups(_groups []clouddatabasesv5.Group) (groups []Group) { CanScaleDown: *g.CPU.CanScaleDown, } + group.HostFlavor = &HostFlavorGroupResource{} + if g.HostFlavor != nil { + group.HostFlavor.ID = *g.HostFlavor.ID + } + groups = append(groups, group) } @@ -2685,6 +2784,13 @@ func expandGroups(_groups []interface{}) []*Group { } } + if hostflavorSet, ok := tfGroup["host_flavor"].(*schema.Set); ok { + hostflavor := hostflavorSet.List() + if len(hostflavor) != 0 { + group.HostFlavor = &HostFlavorGroupResource{ID: hostflavor[0].(map[string]interface{})["id"].(string)} + } + } + groups = append(groups, &group) } } @@ -2724,6 +2830,13 @@ func validateGroupScaling(groupId string, resourceName string, value int, resour return nil } +func validateGroupHostFlavor(groupId string, resourceName string, group *Group) error { + if group.CPU != nil || group.Memory != nil { + return fmt.Errorf("%s must not be set with cpu and memory", resourceName) + } + return nil +} + func validateGroupsDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) (err error) { instanceID := diff.Id() service := diff.Get("service").(string) @@ -2786,6 +2899,13 @@ func validateGroupsDiff(_ context.Context, diff *schema.ResourceDiff, meta inter } } + if group.HostFlavor != nil && group.HostFlavor.ID != "" && group.HostFlavor.ID != "multitenant" { + err = validateGroupHostFlavor(groupId, "host_flavor", group) + if err != nil { + return err + } + } + if group.Memory != nil { err = validateGroupScaling(groupId, "memory", group.Memory.Allocation, groupDefaults.Memory, nodeCount) if err != nil { @@ -2813,6 +2933,28 @@ func validateGroupsDiff(_ context.Context, diff *schema.ResourceDiff, meta inter } func validateUsersDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) (err error) { + service := diff.Get("service").(string) + + var versionStr string + var version int + + if _version, ok := diff.GetOk("version"); ok { + versionStr = _version.(string) + } + + if versionStr == "" { + // Latest Version + version = 0 + } else { + _v, err := strconv.ParseFloat(versionStr, 64) + + if err != nil { + return fmt.Errorf("invalid version: %s", versionStr) + } + + version = int(_v) + } + oldUsers, newUsers := diff.GetChange("users") userChanges := expandUserChanges(oldUsers.(*schema.Set).List(), newUsers.(*schema.Set).List()) @@ -2822,14 +2964,34 @@ func validateUsersDiff(_ context.Context, diff *schema.ResourceDiff, meta interf } if change.isCreate() || change.isUpdate() { - err = change.New.Validate() + err = change.New.ValidatePassword() + + if err != nil { + return err + } + + // TODO: Use Capability API + // RBAC roles supported for Redis 6.0 and above + if service == "databases-for-redis" && !(version > 0 && version < 6) { + err = change.New.ValidateRBACRole() + } else if service == "databases-for-mongodb" && change.New.Type == "ops_manager" { + err = change.New.ValidateOpsManagerRole() + } else { + if change.New.Role != nil { + if *change.New.Role != "" { + err = errors.New("role is not supported for this deployment or user type") + err = &databaseUserValidationError{user: change.New, errs: []error{err}} + } + } + } + if err != nil { return err } } } - return nil + return } func expandUsers(_users []interface{}) []*DatabaseUser { @@ -2845,10 +3007,17 @@ func expandUsers(_users []interface{}) []*DatabaseUser { user := DatabaseUser{ Username: tfUser["name"].(string), Password: tfUser["password"].(string), - Role: tfUser["role"].(string), Type: tfUser["type"].(string), } + // NOTE: cannot differentiate nil vs empty string + // https://github.com/hashicorp/terraform-plugin-sdk/issues/741 + if role, ok := tfUser["role"].(string); ok { + if tfUser["role"] != "" { + user.Role = &role + } + } + users = append(users, &user) } } @@ -2875,8 +3044,8 @@ func expandUserChanges(_oldUsers []interface{}, _newUsers []interface{}) (userCh userChanges = make([]*userChange, 0, len(userChangeMap)) - for _, user := range userChangeMap { - userChanges = append(userChanges, user) + for _, change := range userChangeMap { + userChanges = append(userChanges, change) } return userChanges @@ -2913,9 +3082,9 @@ func (u *DatabaseUser) Create(instanceID string, d *schema.ResourceData, meta in Password: core.StringPtr(u.Password), } - // User Role only for ops_manager user type - if u.Type == "ops_manager" && u.Role != "" { - userEntry.Role = core.StringPtr(u.Role) + // User Role only for ops_manager user type and Redis 6.0 and above + if u.Role != nil { + userEntry.Role = u.Role } createDatabaseUserOptions := &clouddatabasesv5.CreateDatabaseUserOptions{ @@ -2947,30 +3116,34 @@ func (u *DatabaseUser) Update(instanceID string, d *schema.ResourceData, meta in } // Attempt to update user password - passwordSettingUser := &clouddatabasesv5.APasswordSettingUser{ + user := &clouddatabasesv5.UserUpdate{ Password: core.StringPtr(u.Password), } - changeUserPasswordOptions := &clouddatabasesv5.ChangeUserPasswordOptions{ + if u.Role != nil { + user.Role = u.Role + } + + updateUserOptions := &clouddatabasesv5.UpdateUserOptions{ ID: &instanceID, UserType: core.StringPtr(u.Type), Username: core.StringPtr(u.Username), - User: passwordSettingUser, + User: user, } - changeUserPasswordResponse, response, err := cloudDatabasesClient.ChangeUserPassword(changeUserPasswordOptions) + updateUserResponse, response, err := cloudDatabasesClient.UpdateUser(updateUserOptions) // user was found but an error occurs while triggering task if err != nil || (response.StatusCode < 200 || response.StatusCode >= 300) { - return fmt.Errorf("[ERROR] ChangeUserPassword (%s) failed %w\n%s", *changeUserPasswordOptions.Username, err, response) + return fmt.Errorf("[ERROR] UpdateUser (%s) failed %w\n%s", *updateUserOptions.Username, err, response) } - taskID := *changeUserPasswordResponse.Task.ID + taskID := *updateUserResponse.Task.ID _, err = waitForDatabaseTaskComplete(taskID, d, meta, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf( - "[ERROR] Error waiting for database (%s) user (%s) create task to complete: %w", instanceID, *changeUserPasswordOptions.Username, err) + "[ERROR] Error waiting for database (%s) user (%s) create task to complete: %w", instanceID, *updateUserOptions.Username, err) } return nil @@ -3011,7 +3184,7 @@ func (u *DatabaseUser) isUpdatable() bool { return u.Type != "ops_manager" } -func (u *DatabaseUser) Validate() error { +func (u *DatabaseUser) ValidatePassword() (err error) { var errs []error var specialChars string @@ -3063,24 +3236,84 @@ func (u *DatabaseUser) Validate() error { } if len(errs) == 0 { - return nil + return } - var b []byte - for i, err := range errs { - if i > 0 { - b = append(b, '\n') + return &databaseUserValidationError{user: u, errs: errs} +} + +func (u *DatabaseUser) ValidateRBACRole() (err error) { + var errs []error + + if u.Role == nil || *u.Role == "" { + return + } + + if u.Type != "database" { + errs = append(errs, errors.New("role is only allowed for the database user")) + return &databaseUserValidationError{user: u, errs: errs} + } + + redisRBACCategoryRegex := regexp.MustCompile(redisRBACRoleRegexPattern) + redisRBACRoleRegex := regexp.MustCompile(fmt.Sprintf(`^(%s\s?)+$`, redisRBACRoleRegexPattern)) + + if !redisRBACRoleRegex.MatchString(*u.Role) { + errs = append(errs, errors.New("role must be in the format +@category or -@category")) + } + + matches := redisRBACCategoryRegex.FindAllStringSubmatch(*u.Role, -1) + + for _, match := range matches { + valid := false + role := match[1] + for _, allowed := range redisRBACAllowedRoles() { + if role == allowed { + valid = true + break + } } - b = append(b, err.Error()...) + + if !valid { + errs = append(errs, fmt.Errorf("role must contain only allowed categories: %s", strings.Join(redisRBACAllowedRoles()[:], ","))) + break + } + } + + if len(errs) == 0 { + return + } + + return &databaseUserValidationError{user: u, errs: errs} +} + +func (u *DatabaseUser) ValidateOpsManagerRole() (err error) { + if u.Role == nil { + return + } + + if u.Type != "ops_manager" { + return + } + + if *u.Role == "" { + return } - return fmt.Errorf("database user (%s) validation error:\n%w", u.Username, errors.New(string(b))) + for _, str := range opsManagerRoles() { + if *u.Role == str { + return + } + } + + err = fmt.Errorf("role must be a valid ops_manager role: %s", strings.Join(opsManagerRoles()[:], ",")) + + return &databaseUserValidationError{user: u, errs: []error{err}} } func DatabaseUserPasswordValidator(userType string) schema.SchemaValidateFunc { return func(i interface{}, k string) (warnings []string, errors []error) { user := &DatabaseUser{Username: "admin", Type: userType, Password: i.(string)} - err := user.Validate() + err := user.ValidatePassword() if err != nil { errors = append(errors, err) } diff --git a/ibm/service/database/resource_ibm_database_edb_test.go b/ibm/service/database/resource_ibm_database_edb_test.go index f4ec044be2..b4a7d149d9 100644 --- a/ibm/service/database/resource_ibm_database_edb_test.go +++ b/ibm/service/database/resource_ibm_database_edb_test.go @@ -36,7 +36,7 @@ func TestAccIBMEDBDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), resource.TestCheckResourceAttr(name, "service_endpoints", "public"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), @@ -56,7 +56,7 @@ func TestAccIBMEDBDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-enterprisedb"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "6144"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "98304"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "92160"), resource.TestCheckResourceAttr(name, "service_endpoints", "public-and-private"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), @@ -79,7 +79,7 @@ func TestAccIBMEDBDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-enterprisedb"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "92160"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), @@ -113,8 +113,8 @@ func testAccCheckIBMDatabaseInstanceEDBBasic(databaseResourceGroup string, name adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 20480 @@ -153,15 +153,12 @@ func testAccCheckIBMDatabaseInstanceEDBFullyspecified(databaseResourceGroup stri adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 2048 + host_flavor { + id = "b3c.8x32.encrypted" } disk { allocation_mb = 30720 } - cpu { - allocation_count = 4 - } } service_endpoints = "public-and-private" tags = ["one:two"] @@ -205,8 +202,8 @@ func testAccCheckIBMDatabaseInstanceEDBReduced(databaseResourceGroup string, nam adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 30720 diff --git a/ibm/service/database/resource_ibm_database_elasticsearch_platinum_test.go b/ibm/service/database/resource_ibm_database_elasticsearch_platinum_test.go index 114400145f..14c18b201d 100644 --- a/ibm/service/database/resource_ibm_database_elasticsearch_platinum_test.go +++ b/ibm/service/database/resource_ibm_database_elasticsearch_platinum_test.go @@ -65,7 +65,7 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Basic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), @@ -80,7 +80,7 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Basic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), resource.TestCheckResourceAttr(name, "whitelist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), @@ -118,10 +118,6 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Node(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "5120"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), resource.TestCheckResourceAttr(name, "users.#", "1"), @@ -139,10 +135,6 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Node(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), resource.TestCheckResourceAttr(name, "users.#", "2"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), @@ -157,10 +149,6 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Node(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -174,10 +162,6 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Node(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "4"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -214,10 +198,6 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Group(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "5120"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), resource.TestCheckResourceAttr(name, "users.#", "1"), @@ -235,14 +215,10 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Group(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "groups.0.count", "3"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), - resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "12"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), resource.TestCheckResourceAttr(name, "users.#", "2"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), @@ -257,14 +233,10 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Group(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "3"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "groups.0.count", "3"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), - resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "12"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -278,14 +250,10 @@ func TestAccIBMDatabaseInstance_ElasticsearchPlatinum_Group(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "platinum"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "node_count", "4"), - resource.TestCheckResourceAttr(name, "node_memory_allocation_mb", "1024"), - resource.TestCheckResourceAttr(name, "node_disk_allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "node_cpu_allocation_count", "3"), resource.TestCheckResourceAttr(name, "groups.0.count", "4"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "4096"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "65536"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "24576"), - resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "12"), + resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "16"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -355,8 +323,8 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumBasic(databaseResourceG group { group_id = "member" - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 @@ -394,6 +362,13 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumFullyspecified(database plan = "platinum" location = "%[3]s" adminpassword = "password12345678" + group { + group_id = "member" + + host_flavor { + id = "b3c.4x16.encrypted" + } + } users { name = "user123" password = "password12345678" @@ -435,6 +410,13 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumReduced(databaseResourc plan = "platinum" location = "%[3]s" adminpassword = "password12345678" + group { + group_id = "member" + + host_flavor { + id = "b3c.4x16.encrypted" + } + } timeouts { create = "120m" @@ -463,8 +445,8 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumGroupMigration(database group { group_id = "member" - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 @@ -499,15 +481,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumNodeBasic(databaseResou members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 5120 } - cpu { - allocation_count = 3 - } } users { @@ -547,15 +526,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumNodeFullyspecified(data members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } users { name = "user123" @@ -602,15 +578,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumNodeReduced(databaseRes members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } timeouts { @@ -641,15 +614,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumNodeScaleOut(databaseRe members { allocation_count = 4 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } timeouts { @@ -681,15 +651,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumGroupBasic(databaseReso members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 5120 } - cpu { - allocation_count = 3 - } } users { @@ -730,15 +697,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumGroupFullyspecified(dat members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } users { name = "user123" @@ -787,15 +751,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumGroupReduced(databaseRe members { allocation_count = 3 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } timeouts { @@ -827,15 +788,12 @@ func testAccCheckIBMDatabaseInstanceElasticsearchPlatinumGroupScaleOut(databaseR members { allocation_count = 4 } - memory { - allocation_mb = 1024 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 6144 } - cpu { - allocation_count = 3 - } } timeouts { create = "120m" diff --git a/ibm/service/database/resource_ibm_database_elasticsearch_test.go b/ibm/service/database/resource_ibm_database_elasticsearch_test.go index 2a6059f36a..213463b0ee 100644 --- a/ibm/service/database/resource_ibm_database_elasticsearch_test.go +++ b/ibm/service/database/resource_ibm_database_elasticsearch_test.go @@ -78,7 +78,7 @@ func TestAccIBMDatabaseInstance_Elasticsearch_Basic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-elasticsearch"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "6144"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "18432"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -326,6 +326,16 @@ func testAccCheckIBMDatabaseInstanceElasticsearchBasic(databaseResourceGroup str description = "desc1" } + group { + group_id = "member" + memory { + allocation_mb = 2048 + } + host_flavor { + id = "multitenant" + } + } + timeouts { create = "120m" update = "120m" @@ -366,6 +376,16 @@ func testAccCheckIBMDatabaseInstanceElasticsearchFullyspecified(databaseResource description = "desc" } + group { + group_id = "member" + memory { + allocation_mb = 2048 + } + host_flavor { + id = "multitenant" + } + } + timeouts { create = "120m" update = "120m" @@ -391,6 +411,16 @@ func testAccCheckIBMDatabaseInstanceElasticsearchReduced(databaseResourceGroup s location = "%[3]s" adminpassword = "password12345678" + group { + group_id = "member" + memory { + allocation_mb = 2048 + } + host_flavor { + id = "multitenant" + } + } + timeouts { create = "120m" update = "120m" @@ -419,7 +449,10 @@ func testAccCheckIBMDatabaseInstanceElasticsearchGroupMigration(databaseResource group_id = "member" memory { - allocation_mb = 1024 + allocation_mb = 2048 + } + host_flavor { + id = "multitenant" } disk { allocation_mb = 6144 @@ -464,6 +497,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeBasic(databaseResourceGroup cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { name = "user123" @@ -511,6 +547,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeFullyspecified(databaseReso cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { name = "user123" @@ -566,6 +605,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeReduced(databaseResourceGro cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } timeouts { @@ -605,6 +647,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchNodeScaleOut(databaseResourceGr cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } timeouts { @@ -645,6 +690,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchGroupBasic(databaseResourceGrou cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { @@ -694,6 +742,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchGroupFullyspecified(databaseRes cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { name = "user123" @@ -751,6 +802,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchGroupReduced(databaseResourceGr cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } timeouts { @@ -791,6 +845,9 @@ func testAccCheckIBMDatabaseInstanceElasticsearchGroupScaleOut(databaseResourceG cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } timeouts { create = "120m" diff --git a/ibm/service/database/resource_ibm_database_etcd_test.go b/ibm/service/database/resource_ibm_database_etcd_test.go index 07e2eea5e6..b289705b93 100644 --- a/ibm/service/database/resource_ibm_database_etcd_test.go +++ b/ibm/service/database/resource_ibm_database_etcd_test.go @@ -55,7 +55,7 @@ func TestAccIBMDatabaseInstance_Etcd_Basic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-etcd"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "18432"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "193536"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), resource.TestCheckResourceAttr(name, "users.#", "2"), @@ -137,6 +137,9 @@ func testAccCheckIBMDatabaseInstanceEtcdBasic(databaseResourceGroup string, name memory { allocation_mb = 3072 } + host_flavor { + id = "multitenant" + } disk { allocation_mb = 61440 } @@ -169,8 +172,8 @@ func testAccCheckIBMDatabaseInstanceEtcdFullyspecified(databaseResourceGroup str adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 6144 + host_flavor { + id = "b3c.4x16.encrypted" } disk { allocation_mb = 64512 @@ -216,6 +219,9 @@ func testAccCheckIBMDatabaseInstanceEtcdReduced(databaseResourceGroup string, na memory { allocation_mb = 3072 } + host_flavor { + id = "multitenant" + } disk { allocation_mb = 64512 } diff --git a/ibm/service/database/resource_ibm_database_mongodb_enterprise_test.go b/ibm/service/database/resource_ibm_database_mongodb_enterprise_test.go index d3ed077681..b9642cb1b1 100644 --- a/ibm/service/database/resource_ibm_database_mongodb_enterprise_test.go +++ b/ibm/service/database/resource_ibm_database_mongodb_enterprise_test.go @@ -37,7 +37,7 @@ func TestAccIBMMongoDBEnterpriseDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), resource.TestCheckResourceAttr(name, "service_endpoints", "public"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "43008"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), resource.TestCheckResourceAttr(name, "users.#", "1"), @@ -57,7 +57,7 @@ func TestAccIBMMongoDBEnterpriseDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "enterprise"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "service_endpoints", "public"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "86016"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "98304"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "122880"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), resource.TestCheckResourceAttr(name, "users.#", "2"), @@ -81,7 +81,7 @@ func TestAccIBMMongoDBEnterpriseDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "enterprise"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "43008"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "122880"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -122,7 +122,7 @@ func TestAccIBMMongoDBEnterpriseDatabaseInstanceGroupBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), resource.TestCheckResourceAttr(name, "service_endpoints", "public"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "43008"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "49152"), resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), resource.TestCheckResourceAttr(name, "connectionstrings.0.name", "admin"), @@ -210,10 +210,10 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseBasic(databaseResourceGroup tags = ["one:two"] group { group_id = "member" - memory { - allocation_mb = 14336 + host_flavor { + id = "b3c.4x16.encrypted" } - disk { + disk { allocation_mb = 20480 } } @@ -251,15 +251,12 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseFullyspecified(databaseReso tags = ["one:two"] group { group_id = "member" - memory { - allocation_mb = 28672 + host_flavor { + id = "b3c.8x32.encrypted" } - disk { + disk { allocation_mb = 40960 } - cpu { - allocation_count = 9 - } } users { name = "user123" @@ -305,10 +302,10 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseReduced(databaseResourceGro tags = ["one:two"] group { group_id = "member" - memory { - allocation_mb = 14336 + host_flavor { + id = "b3c.4x16.encrypted" } - disk { + disk { allocation_mb = 40960 } } @@ -339,10 +336,10 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseGroupBasic(databaseResource group { group_id = "member" - memory { - allocation_mb = 14336 + host_flavor { + id = "b3c.4x16.encrypted" } - disk { + disk { allocation_mb = 20480 } } @@ -385,6 +382,13 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseMinimal(databaseResourceGro plan = "enterprise" location = "%[3]s" + group { + group_id = "member" + + host_flavor { + id = "b3c.4x16.encrypted" + } + } timeouts { create = "4h" update = "4h" @@ -412,7 +416,15 @@ func testAccCheckIBMDatabaseInstanceMongoDBEnterpriseMinimal_PITR(databaseResour location = "%[3]s" point_in_time_recovery_deployment_id = ibm_database.%[2]s.id point_in_time_recovery_time = "" - offline_restore = true + offline_restore = true + + group { + group_id = "member" + + host_flavor { + id = "b3c.4x16.encrypted" + } + } timeouts { create = "4h" diff --git a/ibm/service/database/resource_ibm_database_mongodb_sharding_test.go b/ibm/service/database/resource_ibm_database_mongodb_sharding_test.go index 1cce4bc9a4..20b90c13a5 100644 --- a/ibm/service/database/resource_ibm_database_mongodb_sharding_test.go +++ b/ibm/service/database/resource_ibm_database_mongodb_sharding_test.go @@ -36,8 +36,8 @@ func TestAccIBMMongoDBShardingDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "enterprise-sharding"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "43008"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "61440"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "98304"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "122880"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), resource.TestCheckResourceAttr(name, "users.#", "1"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "2"), @@ -54,8 +54,8 @@ func TestAccIBMMongoDBShardingDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-mongodb"), resource.TestCheckResourceAttr(name, "plan", "enterprise-sharding"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "86016"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "122880"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "196608"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "245760"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), resource.TestCheckResourceAttr(name, "users.#", "2"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "3"), @@ -71,8 +71,8 @@ func TestAccIBMMongoDBShardingDatabaseInstanceBasic(t *testing.T) { resource.TestCheckResourceAttr(name, "service", "databases-for-mongodb"), resource.TestCheckResourceAttr(name, "plan", "enterprise-sharding"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "43008"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "122880"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "98304"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "245760"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), resource.TestCheckResourceAttr(name, "connectionstrings.#", "1"), @@ -97,10 +97,10 @@ func testAccCheckIBMDatabaseInstanceMongoDBShardingBasic(databaseResourceGroup s adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 14336 + host_flavor { + id = "b3c.4x16.encrypted" } - disk { + disk { allocation_mb = 20480 } } @@ -137,15 +137,12 @@ func testAccCheckIBMDatabaseInstanceMongoDBShardingFullyspecified(databaseResour adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 28672 + host_flavor { + id = "b3c.8x32.encrypted" } - disk { + disk { allocation_mb = 40960 } - cpu { - allocation_count = 9 - } } users { name = "user123" @@ -189,10 +186,10 @@ func testAccCheckIBMDatabaseInstanceMongoDBShardingReduced(databaseResourceGroup adminpassword = "password12345678" group { group_id = "member" - memory { - allocation_mb = 14336 + host_flavor { + id = "b3c.4x16.encrypted" } - disk { + disk { allocation_mb = 40960 } } diff --git a/ibm/service/database/resource_ibm_database_mongodb_test.go b/ibm/service/database/resource_ibm_database_mongodb_test.go index 81fd0cd1f7..83e6e2266d 100644 --- a/ibm/service/database/resource_ibm_database_mongodb_test.go +++ b/ibm/service/database/resource_ibm_database_mongodb_test.go @@ -141,7 +141,10 @@ func testAccCheckIBMDatabaseInstanceMongodbBasic(databaseResourceGroup string, n memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 10240 } } @@ -175,7 +178,10 @@ func testAccCheckIBMDatabaseInstanceMongodbFullyspecified(databaseResourceGroup memory { allocation_mb = 2048 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 10240 } } @@ -217,7 +223,10 @@ func testAccCheckIBMDatabaseInstanceMongodbReduced(databaseResourceGroup string, memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 10240 } } diff --git a/ibm/service/database/resource_ibm_database_mysql_test.go b/ibm/service/database/resource_ibm_database_mysql_test.go index ddb33d5ffe..eb5030098b 100644 --- a/ibm/service/database/resource_ibm_database_mysql_test.go +++ b/ibm/service/database/resource_ibm_database_mysql_test.go @@ -92,6 +92,9 @@ func testAccCheckIBMDatabaseInstanceMysqlBasic(databaseResourceGroup string, nam memory { allocation_mb = 1024 } + host_flavor { + id = "multitenant" + } disk { allocation_mb = 20480 } @@ -138,6 +141,9 @@ func testAccCheckIBMDatabaseInstanceMysqlFullyspecified(databaseResourceGroup st cpu { allocation_count = 4 } + host_flavor { + id = "multitenant" + } } service_endpoints = "public-and-private" tags = ["one:two"] diff --git a/ibm/service/database/resource_ibm_database_postgresql_test.go b/ibm/service/database/resource_ibm_database_postgresql_test.go index dc34b287c4..a75ee7fa40 100644 --- a/ibm/service/database/resource_ibm_database_postgresql_test.go +++ b/ibm/service/database/resource_ibm_database_postgresql_test.go @@ -127,8 +127,8 @@ func TestAccIBMDatabaseInstancePostgresGroup(t *testing.T) { resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "adminuser", "admin"), resource.TestCheckResourceAttr(name, "groups.0.count", "2"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "4096"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "20480"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2048"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "10240"), resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), resource.TestCheckResourceAttr(name, "service_endpoints", "public"), resource.TestCheckResourceAttr(name, "allowlist.#", "1"), @@ -149,8 +149,8 @@ func TestAccIBMDatabaseInstancePostgresGroup(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "groups.0.count", "2"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "4608"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "28672"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2304"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "14336"), resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), resource.TestCheckResourceAttr(name, "service_endpoints", "public-and-private"), resource.TestCheckResourceAttr(name, "allowlist.#", "2"), @@ -174,8 +174,8 @@ func TestAccIBMDatabaseInstancePostgresGroup(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "groups.0.count", "2"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "4096"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "28672"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2048"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "14336"), resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "6"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), @@ -192,8 +192,8 @@ func TestAccIBMDatabaseInstancePostgresGroup(t *testing.T) { resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), resource.TestCheckResourceAttr(name, "groups.0.count", "3"), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "6144"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "43008"), + resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "3072"), + resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "21504"), resource.TestCheckResourceAttr(name, "groups.0.cpu.0.allocation_count", "9"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), resource.TestCheckResourceAttr(name, "users.#", "0"), @@ -421,6 +421,9 @@ func testAccCheckIBMDatabaseInstancePostgresBasic(databaseResourceGroup string, memory { allocation_mb = 2048 } + host_flavor { + id = "multitenant" + } disk { allocation_mb = 10240 } @@ -474,6 +477,9 @@ func testAccCheckIBMDatabaseInstancePostgresFullyspecified(databaseResourceGroup cpu { allocation_count = 6 } + host_flavor { + id = "multitenant" + } } service_endpoints = "public-and-private" tags = ["one:two"] @@ -540,12 +546,15 @@ func testAccCheckIBMDatabaseInstancePostgresGroupBasic(databaseResourceGroup str memory { allocation_mb = 1024 } - disk { + disk { allocation_mb = 5120 } cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { name = "user123" @@ -582,12 +591,15 @@ func testAccCheckIBMDatabaseInstancePostgresGroupFullyspecified(databaseResource memory { allocation_mb = 1152 } - disk { + disk { allocation_mb = 7168 } cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } users { name = "user123" @@ -632,12 +644,15 @@ func testAccCheckIBMDatabaseInstancePostgresGroupReduced(databaseResourceGroup s memory { allocation_mb = 1024 } - disk { + disk { allocation_mb = 7168 } cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } } `, databaseResourceGroup, name, acc.Region()) @@ -664,12 +679,15 @@ func testAccCheckIBMDatabaseInstancePostgresGroupScaleOut(databaseResourceGroup memory { allocation_mb = 1024 } - disk { + disk { allocation_mb = 7168 } cpu { allocation_count = 3 } + host_flavor { + id = "multitenant" + } } service_endpoints = "public" tags = ["one:two"] diff --git a/ibm/service/database/resource_ibm_database_rabbitmq_test.go b/ibm/service/database/resource_ibm_database_rabbitmq_test.go index 6c58da2763..abc7c94ad2 100644 --- a/ibm/service/database/resource_ibm_database_rabbitmq_test.go +++ b/ibm/service/database/resource_ibm_database_rabbitmq_test.go @@ -142,7 +142,10 @@ func testAccCheckIBMDatabaseInstanceRabbitmqBasic(databaseResourceGroup string, memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 1024 } } @@ -182,7 +185,10 @@ func testAccCheckIBMDatabaseInstanceRabbitmqFullyspecified(databaseResourceGroup memory { allocation_mb = 2048 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 2048 } } @@ -226,7 +232,10 @@ func testAccCheckIBMDatabaseInstanceRabbitmqReduced(databaseResourceGroup string memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 2048 } } diff --git a/ibm/service/database/resource_ibm_database_redis_test.go b/ibm/service/database/resource_ibm_database_redis_test.go index 64eec792a7..06749e1121 100644 --- a/ibm/service/database/resource_ibm_database_redis_test.go +++ b/ibm/service/database/resource_ibm_database_redis_test.go @@ -70,14 +70,15 @@ func TestAccIBMDatabaseInstance_Redis_Basic(t *testing.T) { ), }, { - Config: testAccCheckIBMDatabaseInstanceRedisGroupMigration(databaseResourceGroup, testName), + Config: testAccCheckIBMDatabaseInstanceRedisUserRole(databaseResourceGroup, testName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(name, "name", testName), resource.TestCheckResourceAttr(name, "service", "databases-for-redis"), resource.TestCheckResourceAttr(name, "plan", "standard"), resource.TestCheckResourceAttr(name, "location", acc.Region()), - resource.TestCheckResourceAttr(name, "groups.0.memory.0.allocation_mb", "2048"), - resource.TestCheckResourceAttr(name, "groups.0.disk.0.allocation_mb", "4096"), + resource.TestCheckResourceAttr(name, "users.#", "1"), + resource.TestCheckResourceAttr(name, "users.0.name", "coolguy"), + resource.TestCheckResourceAttr(name, "users.0.role", "-@all +@read"), resource.TestCheckResourceAttr(name, "allowlist.#", "0"), ), }, @@ -174,7 +175,10 @@ func testAccCheckIBMDatabaseInstanceRedisBasic(databaseResourceGroup string, nam memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 1024 } } @@ -214,7 +218,10 @@ func testAccCheckIBMDatabaseInstanceRedisFullyspecified(databaseResourceGroup st memory { allocation_mb = 1152 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 2048 } } @@ -249,7 +256,10 @@ func testAccCheckIBMDatabaseInstanceRedisReduced(databaseResourceGroup string, n memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 2048 } } @@ -257,7 +267,7 @@ func testAccCheckIBMDatabaseInstanceRedisReduced(databaseResourceGroup string, n `, databaseResourceGroup, name, acc.Region()) } -func testAccCheckIBMDatabaseInstanceRedisGroupMigration(databaseResourceGroup string, name string) string { +func testAccCheckIBMDatabaseInstanceRedisUserRole(databaseResourceGroup string, name string) string { return fmt.Sprintf(` data "ibm_resource_group" "test_acc" { is_default = true @@ -277,11 +287,20 @@ func testAccCheckIBMDatabaseInstanceRedisGroupMigration(databaseResourceGroup st memory { allocation_mb = 1024 } - disk { + host_flavor { + id = "multitenant" + } + disk { allocation_mb = 2048 } - } - } + } + + users { + name = "coolguy" + password = "securepassword123" + role = "-@all +@read" + } + } `, databaseResourceGroup, name, acc.Region()) } diff --git a/ibm/service/database/resource_ibm_database_test.go b/ibm/service/database/resource_ibm_database_test.go index d5633469a8..5e58458d58 100644 --- a/ibm/service/database/resource_ibm_database_test.go +++ b/ibm/service/database/resource_ibm_database_test.go @@ -4,6 +4,7 @@ package database import ( + "github.com/IBM/go-sdk-core/v5/core" "gotest.tools/assert" "testing" ) @@ -95,7 +96,7 @@ func TestValidateUserPassword(t *testing.T) { }, } for _, tc := range testcases { - err := tc.user.Validate() + err := tc.user.ValidatePassword() if tc.expectedError == "" { if err != nil { t.Errorf("TestValidateUserPassword: %q, %q unexpected error: %q", tc.user.Username, tc.user.Password, err.Error()) @@ -105,3 +106,90 @@ func TestValidateUserPassword(t *testing.T) { } } } + +func TestValidateRBACRole(t *testing.T) { + testcases := []struct { + user DatabaseUser + expectedError string + }{ + { + user: DatabaseUser{ + Username: "invalid_format", + Password: "", + Type: "database", + Role: core.StringPtr("+admin -all"), + }, + expectedError: "database user (invalid_format) validation error:\nrole must be in the format +@category or -@category", + }, + { + user: DatabaseUser{ + Username: "invalid_operation", + Password: "", + Type: "database", + Role: core.StringPtr("~@admin"), + }, + expectedError: "database user (invalid_operation) validation error:\nrole must be in the format +@category or -@category", + }, + { + user: DatabaseUser{ + Username: "invalid_category", + Password: "", + Type: "database", + Role: core.StringPtr("+@catfood -@dogfood"), + }, + expectedError: "database user (invalid_category) validation error:\nrole must contain only allowed categories: all,admin,read,write", + }, + { + user: DatabaseUser{ + Username: "one_bad_apple", + Password: "", + Type: "database", + Role: core.StringPtr("-@jazz +@read"), + }, + expectedError: "database user (one_bad_apple) validation error:\nrole must contain only allowed categories: all,admin,read,write", + }, + { + user: DatabaseUser{ + Username: "invalid_user_type", + Password: "", + Type: "ops_manager", + Role: core.StringPtr("+@all"), + }, + expectedError: "database user (invalid_user_type) validation error:\nrole is only allowed for the database user", + }, + { + user: DatabaseUser{ + Username: "valid", + Password: "", + Type: "database", + Role: core.StringPtr("-@all +@read"), + }, + expectedError: "", + }, + { + user: DatabaseUser{ + Username: "blank_role", + Password: "-@all +@read", + Type: "database", + Role: core.StringPtr(""), + }, + expectedError: "", + }, + } + for _, tc := range testcases { + err := tc.user.ValidateRBACRole() + if tc.expectedError == "" { + if err != nil { + t.Errorf("TestValidateRBACRole: %q, %q unexpected error: %q", tc.user.Username, *tc.user.Role, err.Error()) + } + } else { + var errMsg string + + if err != nil { + errMsg = err.Error() + } + + assert.Equal(t, tc.expectedError, errMsg) + } + } +} diff --git a/ibm/service/iampolicy/data_source_ibm_iam_authorization_policies_test.go b/ibm/service/iampolicy/data_source_ibm_iam_authorization_policies_test.go index 213b185ac1..b33893b921 100644 --- a/ibm/service/iampolicy/data_source_ibm_iam_authorization_policies_test.go +++ b/ibm/service/iampolicy/data_source_ibm_iam_authorization_policies_test.go @@ -73,8 +73,8 @@ func testAccCheckIBMIAMAuthorizationPoliciesDataSourceMultiplePolicies() string resource "ibm_iam_authorization_policy" "policy1" { source_service_name = "is" source_resource_type = "load-balancer" - target_service_name = "cloudcerts" - roles = ["Reader"] + target_service_name = "secrets-manager" + roles = ["SecretsReader"] } ` } diff --git a/ibm/service/iampolicy/resource_ibm_iam_access_group_policy.go b/ibm/service/iampolicy/resource_ibm_iam_access_group_policy.go index 5bfcaf2be9..5dda95b0ab 100644 --- a/ibm/service/iampolicy/resource_ibm_iam_access_group_policy.go +++ b/ibm/service/iampolicy/resource_ibm_iam_access_group_policy.go @@ -481,9 +481,9 @@ func resourceIBMIAMAccessGroupPolicyRead(d *schema.ResourceData, meta interface{ return fmt.Errorf("[ERROR] Error retrieving access group policy: %s\n%s", err, res) } - retrievedAttribute := flex.GetV2PolicySubjectAttribute("access_group_id", *accessGroupPolicy.Subject) - if accessGroupId != *retrievedAttribute { - return fmt.Errorf("[ERROR] Policy %s does not belong to access group %s, retrievedAttr: %s", accessGroupPolicyId, accessGroupId, *retrievedAttribute) + retrievedAttribute := flex.GetV2PolicySubjectAttribute("access_group_id", *accessGroupPolicy.Subject).(string) + if accessGroupId != retrievedAttribute { + return fmt.Errorf("[ERROR] Policy %s does not belong to access group %s, retrievedAttr: %s", accessGroupPolicyId, accessGroupId, retrievedAttribute) } d.Set("access_group_id", accessGroupId) @@ -770,7 +770,7 @@ func resourceIBMIAMAccessGroupPolicyExists(d *schema.ResourceData, meta interfac return false, nil } - tempID := fmt.Sprintf("%s/%s", *flex.GetV2PolicySubjectAttribute("access_group_id", *accessGroupPolicy.Subject), *accessGroupPolicy.ID) + tempID := fmt.Sprintf("%s/%s", flex.GetV2PolicySubjectAttribute("access_group_id", *accessGroupPolicy.Subject), *accessGroupPolicy.ID) return tempID == d.Id(), nil } diff --git a/ibm/service/iampolicy/resource_ibm_iam_authorization_policy.go b/ibm/service/iampolicy/resource_ibm_iam_authorization_policy.go index c1facb1f30..a242a001d5 100644 --- a/ibm/service/iampolicy/resource_ibm_iam_authorization_policy.go +++ b/ibm/service/iampolicy/resource_ibm_iam_authorization_policy.go @@ -139,6 +139,11 @@ func ResourceIBMIAMAuthorizationPolicy() *schema.Resource { Required: true, Description: "Value of attribute.", }, + "operator": { + Type: schema.TypeString, + Optional: true, + Description: "Operator of attribute.", + }, }, }, }, @@ -219,8 +224,8 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf var sourceServiceName, targetServiceName string policyType := "authorization" - policySubject := &iampolicymanagementv1.PolicySubject{} - policyResource := &iampolicymanagementv1.PolicyResource{} + policySubject := &iampolicymanagementv1.V2PolicySubject{} + policyResource := &iampolicymanagementv1.V2PolicyResource{} userDetails, err := meta.(conns.ClientSession).BluemixUserDetails() if err != nil { @@ -238,23 +243,54 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf a := attribute.(map[string]interface{}) name := a["name"].(string) value := a["value"].(string) + operator := a["operator"].(string) if name == "serviceName" { sourceServiceName = value } - at := iampolicymanagementv1.SubjectAttribute{ - Name: &name, - Value: &value, + + if operator == "" && value == "*" && name == "resourceGroupId" { + at := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: &name, + Value: true, + Operator: core.StringPtr("stringExists"), + } + policySubject.Attributes = append(policySubject.Attributes, at) + } else if operator == "stringExists" { + var resourceValue bool + if value == "true" { + resourceValue = true + } else if value == "false" { + resourceValue = false + } else { + return fmt.Errorf("[ERROR] Only values \"true\" and \"false\" are allowed when operator is \"stringExists\". Received %s.", value) + } + at := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: &name, + Value: &resourceValue, + Operator: &operator, + } + policySubject.Attributes = append(policySubject.Attributes, at) + } else { + if operator == "" { + operator = "stringEquals" + } + at := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: &name, + Value: &value, + Operator: &operator, + } + policySubject.Attributes = append(policySubject.Attributes, at) } - policySubject.Attributes = append(policySubject.Attributes, at) } } else { if name, ok := d.GetOk("source_service_name"); ok { sourceServiceName = name.(string) - serviceNameSubjectAttribute := &iampolicymanagementv1.SubjectAttribute{ - Name: core.StringPtr("serviceName"), - Value: &sourceServiceName, + serviceNameSubjectAttribute := &iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("serviceName"), + Value: &sourceServiceName, + Operator: core.StringPtr("stringEquals"), } policySubject.Attributes = append(policySubject.Attributes, *serviceNameSubjectAttribute) } @@ -264,35 +300,48 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf sourceServiceAccount = account.(string) } - accountIdSubjectAttribute := &iampolicymanagementv1.SubjectAttribute{ - Name: core.StringPtr("accountId"), - Value: &sourceServiceAccount, + accountIdSubjectAttribute := &iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("accountId"), + Value: &sourceServiceAccount, + Operator: core.StringPtr("stringEquals"), } policySubject.Attributes = append(policySubject.Attributes, *accountIdSubjectAttribute) if sID, ok := d.GetOk("source_resource_instance_id"); ok { - serviceInstanceSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ - Name: core.StringPtr("serviceInstance"), - Value: core.StringPtr(sID.(string)), + serviceInstanceSubjectAttribute := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("serviceInstance"), + Value: core.StringPtr(sID.(string)), + Operator: core.StringPtr("stringEquals"), } policySubject.Attributes = append(policySubject.Attributes, serviceInstanceSubjectAttribute) } if sType, ok := d.GetOk("source_resource_type"); ok { - resourceTypeSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ - Name: core.StringPtr("resourceType"), - Value: core.StringPtr(sType.(string)), + resourceTypeSubjectAttribute := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("resourceType"), + Value: core.StringPtr(sType.(string)), + Operator: core.StringPtr("stringEquals"), } policySubject.Attributes = append(policySubject.Attributes, resourceTypeSubjectAttribute) } if sResGrpID, ok := d.GetOk("source_resource_group_id"); ok { - resourceGroupSubjectAttribute := iampolicymanagementv1.SubjectAttribute{ - Name: core.StringPtr("resourceGroupId"), - Value: core.StringPtr(sResGrpID.(string)), + if sResGrpID == "*" { + resourceGroupSubjectAttribute := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("resourceGroupId"), + Value: true, + Operator: core.StringPtr("stringExists"), + } + policySubject.Attributes = append(policySubject.Attributes, resourceGroupSubjectAttribute) + } else { + resourceGroupSubjectAttribute := iampolicymanagementv1.V2PolicySubjectAttribute{ + Key: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(sResGrpID.(string)), + Operator: core.StringPtr("stringEquals"), + } + policySubject.Attributes = append(policySubject.Attributes, resourceGroupSubjectAttribute) } - policySubject.Attributes = append(policySubject.Attributes, resourceGroupSubjectAttribute) } } @@ -309,26 +358,41 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf if name == "resourceType" && targetServiceName == "" { targetServiceName = "resource-controller" } - at := iampolicymanagementv1.ResourceAttribute{ - Name: &name, - Value: &value, - Operator: &operator, + if operator == "stringExists" { + var resourceValue bool + if value == "true" { + resourceValue = true + } else { + resourceValue = false + } + at := iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: &name, + Value: &resourceValue, + Operator: &operator, + } + policyResource.Attributes = append(policyResource.Attributes, at) + } else { + at := iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: &name, + Value: &value, + Operator: &operator, + } + policyResource.Attributes = append(policyResource.Attributes, at) } - policyResource.Attributes = append(policyResource.Attributes, at) } } else { if name, ok := d.GetOk("target_service_name"); ok { targetServiceName = name.(string) - serviceNameResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ - Name: core.StringPtr("serviceName"), + serviceNameResourceAttribute := &iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: core.StringPtr("serviceName"), Value: core.StringPtr(targetServiceName), Operator: core.StringPtr("stringEquals"), } policyResource.Attributes = append(policyResource.Attributes, *serviceNameResourceAttribute) } - accountIDResourceAttribute := &iampolicymanagementv1.ResourceAttribute{ - Name: core.StringPtr("accountId"), + accountIDResourceAttribute := &iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: core.StringPtr("accountId"), Value: core.StringPtr(userDetails.UserAccount), Operator: core.StringPtr("stringEquals"), } @@ -336,17 +400,19 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf policyResource.Attributes = append(policyResource.Attributes, *accountIDResourceAttribute) if tID, ok := d.GetOk("target_resource_instance_id"); ok { - serviceInstanceResourceAttribute := iampolicymanagementv1.ResourceAttribute{ - Name: core.StringPtr("serviceInstance"), - Value: core.StringPtr(tID.(string)), + serviceInstanceResourceAttribute := iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: core.StringPtr("serviceInstance"), + Value: core.StringPtr(tID.(string)), + Operator: core.StringPtr("stringEquals"), } policyResource.Attributes = append(policyResource.Attributes, serviceInstanceResourceAttribute) } if tType, ok := d.GetOk("target_resource_type"); ok { - resourceTypeResourceAttribute := iampolicymanagementv1.ResourceAttribute{ - Name: core.StringPtr("resourceType"), - Value: core.StringPtr(tType.(string)), + resourceTypeResourceAttribute := iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: core.StringPtr("resourceType"), + Value: core.StringPtr(tType.(string)), + Operator: core.StringPtr("stringEquals"), } policyResource.Attributes = append(policyResource.Attributes, resourceTypeResourceAttribute) if targetServiceName == "" { @@ -355,9 +421,10 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf } if tResGrpID, ok := d.GetOk("target_resource_group_id"); ok { - resourceGroupResourceAttribute := iampolicymanagementv1.ResourceAttribute{ - Name: core.StringPtr("resourceGroupId"), - Value: core.StringPtr(tResGrpID.(string)), + resourceGroupResourceAttribute := iampolicymanagementv1.V2PolicyResourceAttribute{ + Key: core.StringPtr("resourceGroupId"), + Value: core.StringPtr(tResGrpID.(string)), + Operator: core.StringPtr("stringEquals"), } policyResource.Attributes = append(policyResource.Attributes, resourceGroupResourceAttribute) } @@ -381,13 +448,21 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf return err } - createPolicyOptions := iampapClient.NewCreatePolicyOptions( + policyGrant := &iampolicymanagementv1.Grant{ + Roles: flex.MapPolicyRolesToRoles(roles), + } + policyControl := &iampolicymanagementv1.Control{ + Grant: policyGrant, + } + + createPolicyOptions := iampapClient.NewCreateV2PolicyOptions( + policyControl, "authorization", - []iampolicymanagementv1.PolicySubject{*policySubject}, - roles, - []iampolicymanagementv1.PolicyResource{*policyResource}, ) + createPolicyOptions.SetSubject(policySubject) + createPolicyOptions.SetResource(policyResource) + if description, ok := d.GetOk("description"); ok { des := description.(string) createPolicyOptions.Description = &des @@ -397,7 +472,7 @@ func resourceIBMIAMAuthorizationPolicyCreate(d *schema.ResourceData, meta interf createPolicyOptions.SetHeaders(map[string]string{"Transaction-Id": transactionID.(string)}) } - authPolicy, resp, err := iampapClient.CreatePolicy(createPolicyOptions) + authPolicy, resp, err := iampapClient.CreateV2Policy(createPolicyOptions) if err != nil { return fmt.Errorf("[ERROR] Error creating authorization policy: %s %s", err, resp) } @@ -414,19 +489,19 @@ func resourceIBMIAMAuthorizationPolicyRead(d *schema.ResourceData, meta interfac return err } - getPolicyOptions := &iampolicymanagementv1.GetPolicyOptions{ - PolicyID: core.StringPtr(d.Id()), + getPolicyOptions := &iampolicymanagementv1.GetV2PolicyOptions{ + ID: core.StringPtr(d.Id()), } if transactionID, ok := d.GetOk("transaction_id"); ok { getPolicyOptions.SetHeaders(map[string]string{"Transaction-Id": transactionID.(string)}) } - authorizationPolicy, resp, err := iampapClient.GetPolicy(getPolicyOptions) + authorizationPolicy, resp, err := iampapClient.GetV2Policy(getPolicyOptions) err = resource.Retry(5*time.Minute, func() *resource.RetryError { var err error - authorizationPolicy, resp, err = iampapClient.GetPolicy(getPolicyOptions) + authorizationPolicy, resp, err = iampapClient.GetV2Policy(getPolicyOptions) if err != nil || authorizationPolicy == nil { if resp != nil && resp.StatusCode == 404 { return resource.RetryableError(err) @@ -437,15 +512,12 @@ func resourceIBMIAMAuthorizationPolicyRead(d *schema.ResourceData, meta interfac }) if conns.IsResourceTimeoutError(err) { - authorizationPolicy, resp, err = iampapClient.GetPolicy(getPolicyOptions) + authorizationPolicy, resp, err = iampapClient.GetV2Policy(getPolicyOptions) } if err != nil || resp == nil { return fmt.Errorf("[ERROR] Error retrieving authorizationPolicy: %s %s", err, resp) } - roles := make([]string, len(authorizationPolicy.Roles)) - for i, role := range authorizationPolicy.Roles { - roles[i] = *role.DisplayName - } + roles, err := flex.GetRoleNamesFromPolicyResponse(*authorizationPolicy, d, meta) if authorizationPolicy.Description != nil { d.Set("description", *authorizationPolicy.Description) } @@ -453,21 +525,22 @@ func resourceIBMIAMAuthorizationPolicyRead(d *schema.ResourceData, meta interfac d.Set("transaction_id", resp.Headers["Transaction-Id"][0]) } d.Set("roles", roles) - source := authorizationPolicy.Subjects[0] - target := authorizationPolicy.Resources[0] - - d.Set("resource_attributes", setAuthorizationResourceAttributes(target)) - d.Set("target_resource_instance_id", flex.GetResourceAttribute("serviceInstance", target)) - d.Set("target_resource_type", flex.GetResourceAttribute("resourceType", target)) - d.Set("target_resource_group_id", flex.GetResourceAttribute("resourceGroupId", target)) - d.Set("target_service_name", flex.GetResourceAttribute("serviceName", target)) - - d.Set("subject_attributes", setAuthorizationSubjectAttributes(source)) - d.Set("source_service_name", flex.GetSubjectAttribute("serviceName", source)) - d.Set("source_resource_instance_id", flex.GetSubjectAttribute("serviceInstance", source)) - d.Set("source_resource_type", flex.GetSubjectAttribute("resourceType", source)) - d.Set("source_service_account", flex.GetSubjectAttribute("accountId", source)) - d.Set("source_resource_group_id", flex.GetSubjectAttribute("resourceGroupId", source)) + source := authorizationPolicy.Subject + target := authorizationPolicy.Resource + + d.Set("resource_attributes", setAuthorizationResourceAttributes(*target)) + d.Set("target_resource_instance_id", flex.GetV2PolicyResourceAttribute("serviceInstance", *target)) + d.Set("target_resource_type", flex.GetV2PolicyResourceAttribute("resourceType", *target)) + d.Set("target_resource_group_id", flex.GetV2PolicyResourceAttribute("resourceGroupId", *target)) + d.Set("target_service_name", flex.GetV2PolicyResourceAttribute("serviceName", *target)) + if a, ok := d.GetOk("subject_attributes"); ok { + d.Set("subject_attributes", setAuthorizationSubjectAttributes(*source, a.(*schema.Set))) + } + d.Set("source_service_name", flex.GetV2PolicySubjectAttribute("serviceName", *source)) + d.Set("source_resource_instance_id", flex.GetV2PolicySubjectAttribute("serviceInstance", *source)) + d.Set("source_resource_type", flex.GetV2PolicySubjectAttribute("resourceType", *source)) + d.Set("source_service_account", flex.GetV2PolicySubjectAttribute("accountId", *source)) + d.Set("source_resource_group_id", flex.GetV2PolicySubjectAttribute("resourceGroupId", *source)) return nil } @@ -528,12 +601,12 @@ func resourceIBMIAMAuthorizationPolicyExists(d *schema.ResourceData, meta interf return *authorizationPolicy.ID == d.Id(), nil } -func setAuthorizationResourceAttributes(list iampolicymanagementv1.PolicyResource) []map[string]interface{} { +func setAuthorizationResourceAttributes(list iampolicymanagementv1.V2PolicyResource) []map[string]interface{} { result := make([]map[string]interface{}, 0) for _, attribute := range list.Attributes { l := map[string]interface{}{ - "name": attribute.Name, - "value": attribute.Value, + "name": attribute.Key, + "value": fmt.Sprintf("%v", attribute.Value), "operator": attribute.Operator, } result = append(result, l) @@ -541,12 +614,34 @@ func setAuthorizationResourceAttributes(list iampolicymanagementv1.PolicyResourc return result } -func setAuthorizationSubjectAttributes(list iampolicymanagementv1.PolicySubject) []map[string]interface{} { +func setAuthorizationSubjectAttributes(list iampolicymanagementv1.V2PolicySubject, a *schema.Set) []map[string]interface{} { + previousOperators := make([]string, 0) + + for _, item := range a.List() { + i := item.(map[string]interface{}) + + previousOperators = append(previousOperators, i["operator"].(string)) + } + result := make([]map[string]interface{}, 0) - for _, attribute := range list.Attributes { - l := map[string]interface{}{ - "name": attribute.Name, - "value": attribute.Value, + for i, attribute := range list.Attributes { + var l map[string]interface{} + if previousOperators[i] == "" && attribute.Value == true && *attribute.Operator == "stringExists" { + l = map[string]interface{}{ + "name": attribute.Key, + "value": "*", + } + } else if previousOperators[i] == "" { + l = map[string]interface{}{ + "name": attribute.Key, + "value": fmt.Sprintf("%v", attribute.Value), + } + } else { + l = map[string]interface{}{ + "name": attribute.Key, + "value": fmt.Sprintf("%v", attribute.Value), + "operator": attribute.Operator, + } } result = append(result, l) } diff --git a/ibm/service/iampolicy/resource_ibm_iam_authorization_policy_test.go b/ibm/service/iampolicy/resource_ibm_iam_authorization_policy_test.go index fe5b7436ca..d6a62efaa7 100644 --- a/ibm/service/iampolicy/resource_ibm_iam_authorization_policy_test.go +++ b/ibm/service/iampolicy/resource_ibm_iam_authorization_policy_test.go @@ -107,7 +107,7 @@ func TestAccIBMIAMAuthorizationPolicy_ResourceType(t *testing.T) { testAccCheckIBMIAMAuthorizationPolicyExists("ibm_iam_authorization_policy.policy", conf), resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "source_service_name", "is"), resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "source_resource_type", "load-balancer"), - resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "target_service_name", "hs-crypto"), + resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "target_service_name", "secrets-manager"), ), }, }, @@ -202,6 +202,27 @@ func TestAccIBMIAMAuthorizationPolicy_SourceResourceGroupId_ResourceAttributes(t }) } +func TestAccIBMIAMAuthorizationPolicy_SourceResourceGroupId_ResourceAttributes_WildCard(t *testing.T) { + var conf iampolicymanagementv1.PolicyTemplateMetaData + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMIAMAuthorizationPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMIAMAuthorizationPolicySourceResourceGroupIdResourceAttributesWildCard(acc.Tg_cross_network_account_id, acc.Tg_cross_network_account_id), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMIAMAuthorizationPolicyExists("ibm_iam_authorization_policy.policy", conf), + resource.TestCheckResourceAttrSet("ibm_iam_authorization_policy.policy", "id"), + resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "source_service_name", ""), + resource.TestCheckResourceAttr("ibm_iam_authorization_policy.policy", "target_service_name", "cloud-object-storage"), + ), + }, + }, + }) +} + func TestAccIBMIAMAuthorizationPolicy_TargetResourceType(t *testing.T) { var conf iampolicymanagementv1.PolicyTemplateMetaData @@ -265,6 +286,25 @@ func TestAccIBMIAMAuthorizationPolicy_With_Transaction_id(t *testing.T) { }) } +func TestAccIBMIAMAuthorizationPolicy_SourceResourceGroupIdWithStringExistsInSubjectAttributes(t *testing.T) { + var conf iampolicymanagementv1.PolicyTemplateMetaData + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMIAMAuthorizationPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMIAMAuthorizationPolicySourceResourceGroupIdWithStringExistsInSubjectAttributes(acc.Tg_cross_network_account_id, acc.Tg_cross_network_account_id), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMIAMAuthorizationPolicyExists("ibm_iam_authorization_policy.policy", conf), + resource.TestCheckResourceAttrSet("ibm_iam_authorization_policy.policy", "id"), + ), + }, + }, + }) +} + func testAccCheckIBMIAMAuthorizationPolicyDestroy(s *terraform.State) error { iamPolicyManagementClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).IAMPolicyManagementV1API() if err != nil { @@ -350,9 +390,9 @@ func testAccCheckIBMIAMAuthorizationPolicyResourceInstance(instanceName string) resource "ibm_iam_authorization_policy" "policy" { source_service_name = "cloud-object-storage" - source_resource_instance_id = ibm_resource_instance.instance1.id + source_resource_instance_id = ibm_resource_instance.instance1.guid target_service_name = "kms" - target_resource_instance_id = ibm_resource_instance.instance2.id + target_resource_instance_id = ibm_resource_instance.instance2.guid roles = ["Reader"] } @@ -364,8 +404,8 @@ func testAccCheckIBMIAMAuthorizationPolicyResourceType() string { resource "ibm_iam_authorization_policy" "policy" { source_service_name = "is" source_resource_type = "load-balancer" - target_service_name = "hs-crypto" - roles = ["Reader"] + target_service_name = "secrets-manager" + roles = ["SecretsReader"] } ` } @@ -426,7 +466,7 @@ func testAccCheckIBMIAMAuthorizationPolicyResourceAttributes(sServiceInstance, t } subject_attributes { name = "serviceInstance" - value = ibm_resource_instance.cos.id + value = ibm_resource_instance.cos.guid } subject_attributes { name = "serviceName" @@ -442,7 +482,7 @@ func testAccCheckIBMIAMAuthorizationPolicyResourceAttributes(sServiceInstance, t } resource_attributes { name = "serviceInstance" - value = ibm_resource_instance.kms.id + value = ibm_resource_instance.kms.guid } } `, sServiceInstance, tServiceInstance, sAccountID, tAccountID) @@ -497,6 +537,31 @@ func testAccCheckIBMIAMAuthorizationPolicySourceResourceGroupIdResourceAttribute `, sAccountID, tAccountID) } +func testAccCheckIBMIAMAuthorizationPolicySourceResourceGroupIdResourceAttributesWildCard(sAccountID, tAccountID string) string { + return fmt.Sprintf(` + resource "ibm_iam_authorization_policy" "policy" { + roles = ["Reader"] + subject_attributes { + name = "accountId" + value = "%s" + } + subject_attributes { + name = "resourceGroupId" + value = "*" + } + + resource_attributes { + name = "serviceName" + value = "cloud-object-storage" + } + resource_attributes { + name = "accountId" + value = "%s" + } + } + `, sAccountID, tAccountID) +} + func testAccCheckIBMIAMAuthorizationPolicyTargetResourceType() string { return ` resource "ibm_iam_authorization_policy" "policy" { @@ -534,3 +599,29 @@ func testAccCheckIBMIAMAuthorizationPolicyResourceTypeAndResourceAttributes(sAcc } `, sAccountID, tAccountID) } + +func testAccCheckIBMIAMAuthorizationPolicySourceResourceGroupIdWithStringExistsInSubjectAttributes(sAccountID, tAccountID string) string { + return fmt.Sprintf(` + resource "ibm_iam_authorization_policy" "policy" { + roles = ["Reader"] + subject_attributes { + name = "accountId" + value = "%s" + } + subject_attributes { + name = "resourceGroupId" + operator = "stringExists" + value = "true" + } + + resource_attributes { + name = "serviceName" + value = "cloud-object-storage" + } + resource_attributes { + name = "accountId" + value = "%s" + } + } + `, sAccountID, tAccountID) +} diff --git a/ibm/service/kubernetes/resource_ibm_container_cluster.go b/ibm/service/kubernetes/resource_ibm_container_cluster.go index 53aecfd0d5..d67e5c63f0 100644 --- a/ibm/service/kubernetes/resource_ibm_container_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_cluster.go @@ -131,34 +131,28 @@ func ResourceIBMContainerCluster() *schema.Resource { }, }, - "worker_num": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Number of worker nodes", - ValidateFunc: validate.ValidateWorkerNum, - Deprecated: "This field is deprecated", - }, - "default_pool_size": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "The size of the default worker pool", - ValidateFunc: validate.ValidateWorkerNum, + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "The size of the default worker pool", + DiffSuppressFunc: flex.ApplyOnce, + ValidateFunc: validate.ValidateWorkerNum, }, "labels": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "list of labels to the default worker pool", + Type: schema.TypeMap, + Optional: true, + Computed: true, + DiffSuppressFunc: flex.ApplyOnce, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "list of labels to the default worker pool", }, "taints": { - Type: schema.TypeSet, - Optional: true, - Description: "WorkerPool Taints", + Type: schema.TypeSet, + Optional: true, + Description: "WorkerPool Taints", + DiffSuppressFunc: flex.ApplyOnce, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -209,11 +203,11 @@ func ResourceIBMContainerCluster() *schema.Resource { }, "disk_encryption": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: true, - Description: "disc encryption done, if set to true.", + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Default: true, + Description: "disc encryption done, if set to true.", }, "kube_version": { @@ -255,58 +249,34 @@ func ResourceIBMContainerCluster() *schema.Resource { }, "machine_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: "Machine type", + Type: schema.TypeString, + DiffSuppressFunc: flex.ApplyOnce, + Optional: true, + Description: "Machine type", }, "hardware": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: validate.ValidateAllowedStringValues([]string{hardwareShared, hardwareDedicated}), - Description: "Hardware type", + Type: schema.TypeString, + DiffSuppressFunc: flex.ApplyOnce, + Required: true, + ValidateFunc: validate.ValidateAllowedStringValues([]string{hardwareShared, hardwareDedicated}), + Description: "Hardware type", }, - "billing": { + "public_vlan_id": { Type: schema.TypeString, Optional: true, - Deprecated: "This field is deprecated", + Default: nil, DiffSuppressFunc: flex.ApplyOnce, - }, - "public_vlan_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: nil, - DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { - if o == "" { - return false - } - if o != "" && n == "" { - return true - } - return false - }, - Description: "Public VLAN ID", + Description: "Public VLAN ID", }, "private_vlan_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: nil, - DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { - if o == "" { - return false - } - if o != "" && n == "" { - return true - } - return false - }, - Description: "Private VLAN ID", + Type: schema.TypeString, + Optional: true, + Default: nil, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Private VLAN ID", }, "entitlement": { @@ -317,11 +287,11 @@ func ResourceIBMContainerCluster() *schema.Resource { }, "operating_system": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: "The operating system of the workers in the default worker pool.", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Computed: true, + Description: "The operating system of the workers in the default worker pool.", }, "wait_for_worker_update": { @@ -371,12 +341,7 @@ func ResourceIBMContainerCluster() *schema.Resource { DiffSuppressFunc: flex.ApplyOnce, Description: "Boolean value set to true when subnet creation is not required.", }, - "is_trusted": { - Type: schema.TypeBool, - Optional: true, - Deprecated: "This field is deprecated", - DiffSuppressFunc: flex.ApplyOnce, - }, + "server_url": { Type: schema.TypeString, Computed: true, @@ -426,29 +391,6 @@ func ResourceIBMContainerCluster() *schema.Resource { DiffSuppressFunc: flex.ApplyOnce, }, - "org_guid": { - Description: "The bluemix organization guid this cluster belongs to", - Type: schema.TypeString, - Optional: true, - Deprecated: "This field is deprecated", - }, - "space_guid": { - Description: "The bluemix space guid this cluster belongs to", - Type: schema.TypeString, - Optional: true, - Deprecated: "This field is deprecated", - }, - "account_guid": { - Description: "The bluemix account guid this cluster belongs to", - Type: schema.TypeString, - Optional: true, - Deprecated: "This field is deprecated", - }, - "wait_time_minutes": { - Type: schema.TypeInt, - Optional: true, - Deprecated: "This field is deprecated", - }, "tags": { Type: schema.TypeSet, Optional: true, @@ -781,6 +723,37 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) d.Set("force_delete_storage", d.Get("force_delete_storage").(bool)) + //labels + workerPoolsAPI := csClient.WorkerPools() + workerPools, err := workerPoolsAPI.ListWorkerPools(cls.ID, targetEnv) + if err != nil { + return err + } + + if len(workerPools) == 0 || !workerPoolContains(workerPools, defaultWorkerPool) { + return fmt.Errorf("[ERROR] The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes") + } + + labels := make(map[string]string) + if l, ok := d.GetOk("labels"); ok { + for k, v := range l.(map[string]interface{}) { + labels[k] = v.(string) + } + } + err = workerPoolsAPI.UpdateLabelsWorkerPool(cls.ID, defaultWorkerPool, labels, targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error updating the labels %s", err) + } + + //taints + var taints []interface{} + if taintRes, ok := d.GetOk("taints"); ok { + taints = taintRes.(*schema.Set).List() + } + if err := updateWorkerpoolTaints(d, meta, cls.ID, defaultWorkerPool, taints); err != nil { + return err + } + return resourceIBMContainerClusterUpdate(d, meta) } @@ -822,8 +795,6 @@ func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) e } } - d.Set("worker_num", workerCount) - workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) if err != nil { return err @@ -1075,149 +1046,6 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{}) d.Set("force_delete_storage", forceDeleteStorage) } - if d.HasChange("default_pool_size") && !d.IsNewResource() { - workerPoolsAPI := csClient.WorkerPools() - workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) - if err != nil { - return err - } - var poolName string - var poolContains bool - - if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { - poolName = defaultWorkerPool - - poolContains = true - } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { - poolName = computeWorkerPool - poolContains = true - } - if poolContains { - poolSize := d.Get("default_pool_size").(int) - err = workerPoolsAPI.ResizeWorkerPool(clusterID, poolName, poolSize, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error updating the default_pool_size %d: %s", poolSize, err) - } - - _, err = WaitForWorkerAvailable(d, meta, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) - } - } else { - return fmt.Errorf("[ERROR] The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes") - } - } - - if d.HasChange("labels") { - workerPoolsAPI := csClient.WorkerPools() - workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) - if err != nil { - return err - } - var poolName string - var poolContains bool - - if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { - poolName = defaultWorkerPool - poolContains = true - } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { - poolName = computeWorkerPool - poolContains = true - } - if poolContains { - labels := make(map[string]string) - if l, ok := d.GetOk("labels"); ok { - for k, v := range l.(map[string]interface{}) { - labels[k] = v.(string) - } - } - err = workerPoolsAPI.UpdateLabelsWorkerPool(clusterID, poolName, labels, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error updating the labels %s", err) - } - - _, err = WaitForWorkerAvailable(d, meta, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) - } - } else { - return fmt.Errorf("[ERROR] The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes") - } - } - - if d.HasChange("taints") { - workerPoolsAPI := csClient.WorkerPools() - workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv) - if err != nil { - return err - } - var poolName string - var poolContains bool - - if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) { - poolName = defaultWorkerPool - poolContains = true - } else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) { - poolName = computeWorkerPool - poolContains = true - } - if poolContains { - var taints []interface{} - if taintRes, ok := d.GetOk("taints"); ok { - taints = taintRes.(*schema.Set).List() - } - if err := updateWorkerpoolTaints(d, meta, clusterID, poolName, taints); err != nil { - return err - } - } else { - return fmt.Errorf("[ERROR] The default worker pool does not exist. Use ibm_container_worker_pool and ibm_container_worker_pool_zone attachment resources to make changes to your cluster, such as adding zones, adding worker nodes, or updating worker nodes") - } - } - - if d.HasChange("worker_num") { - old, new := d.GetChange("worker_num") - oldCount := old.(int) - newCount := new.(int) - if newCount > oldCount { - count := newCount - oldCount - machineType := d.Get("machine_type").(string) - publicVlanID := d.Get("public_vlan_id").(string) - privateVlanID := d.Get("private_vlan_id").(string) - hardware := d.Get("hardware").(string) - switch strings.ToLower(hardware) { - case hardwareDedicated: - hardware = isolationPrivate - case hardwareShared: - hardware = isolationPublic - } - params := v1.WorkerParam{ - WorkerNum: count, - MachineType: machineType, - PublicVlan: publicVlanID, - PrivateVlan: privateVlanID, - Isolation: hardware, - } - wrkAPI.Add(clusterID, params, targetEnv) - } else if oldCount > newCount { - count := oldCount - newCount - workerFields, err := wrkAPI.List(clusterID, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err) - } - for i := 0; i < count; i++ { - err := wrkAPI.Delete(clusterID, workerFields[i].ID, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error deleting workers of cluster (%s): %s", d.Id(), err) - } - } - } - - _, err = WaitForWorkerAvailable(d, meta, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) - } - } - if d.HasChange("workers_info") { oldWorkers, newWorkers := d.GetChange("workers_info") oldWorker := oldWorkers.([]interface{}) diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go index 944f8f074f..3371e491ab 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2017, 2022 All Rights Reserved. +// Copyright IBM Corp. 2017, 2023 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package kubernetes @@ -53,10 +53,10 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "flavor": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Cluster nodes flavour", + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Cluster nodes flavour", }, "name": { @@ -106,21 +106,24 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { }, "zones": { - Type: schema.TypeSet, - Required: true, - Description: "Zone info", + Type: schema.TypeSet, + Required: true, + Description: "Zone info", + DiffSuppressFunc: flex.ApplyOnce, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - Description: "Zone for the worker pool in a multizone cluster", + Type: schema.TypeString, + Required: true, + Description: "Zone for the worker pool in a multizone cluster", + DiffSuppressFunc: flex.ApplyOnce, }, "subnet_id": { - Type: schema.TypeString, - Required: true, - Description: "The VPC subnet to assign the cluster", + Type: schema.TypeString, + Required: true, + Description: "The VPC subnet to assign the cluster", + DiffSuppressFunc: flex.ApplyOnce, }, }, }, @@ -189,56 +192,62 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { }, "worker_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "Number of worker nodes in the cluster", + Type: schema.TypeInt, + Optional: true, + Default: 1, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Number of worker nodes in the default worker pool", }, "worker_labels": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Labels for default worker pool", + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Labels for default worker pool", }, "operating_system": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: "The operating system of the workers in the default worker pool.", + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "The operating system of the workers in the default worker pool.", }, "secondary_storage": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "The secondary storage option for the default worker pool.", + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "The secondary storage option for the default worker pool.", }, "taints": { - Type: schema.TypeSet, - Optional: true, - Description: "WorkerPool Taints", + Type: schema.TypeSet, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Taints for the default worker pool", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { - Type: schema.TypeString, - Required: true, - Description: "Key for taint", + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Key for taint", }, "value": { - Type: schema.TypeString, - Required: true, - Description: "Value for taint.", + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Value for taint.", }, "effect": { - Type: schema.TypeString, - Required: true, - Description: "Effect for taint. Accepted values are NoSchedule, PreferNoSchedule and NoExecute.", + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Effect for taint. Accepted values are NoSchedule, PreferNoSchedule and NoExecute.", ValidateFunc: validate.InvokeValidator( "ibm_container_vpc_cluster", "effect"), @@ -322,6 +331,15 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { RequiredWith: []string{"kms_instance_id", "crk"}, }, + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Description: "Allow user to set which security groups added to their workers", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: flex.ResourceIBMVPCHash, + DiffSuppressFunc: flex.ApplyOnce, + }, + //Get Cluster info Request "state": { Type: schema.TypeString, @@ -421,10 +439,10 @@ func ResourceIBMContainerVpcCluster() *schema.Resource { }, "host_pool_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The ID of the cluster's associated host pool", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "The ID of the default worker pool's associated host pool", }, flex.ResourceName: { @@ -587,6 +605,11 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface params.CosInstanceCRN = v.(string) } + if v, ok := d.GetOk("security_groups"); ok { + securityGroups := flex.FlattenSet(v.(*schema.Set)) + params.SecurityGroupIDs = securityGroups + } + targetEnv, err := getVpcClusterTargetHeader(d, meta) if err != nil { return err @@ -634,6 +657,14 @@ func resourceIBMContainerVpcClusterCreate(d *schema.ResourceData, meta interface } } + var taints []interface{} + if taintRes, ok := d.GetOk("taints"); ok { + taints = taintRes.(*schema.Set).List() + } + if err := updateWorkerpoolTaints(d, meta, cls.ID, "default", taints); err != nil { + return err + } + return resourceIBMContainerVpcClusterUpdate(d, meta) } @@ -671,7 +702,6 @@ func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface kmsConfig.Cluster = clusterID targetEnv := v2.ClusterHeader{} if kms, ok := d.GetOk("kms_config"); ok { - kmsConfiglist := kms.([]interface{}) for _, l := range kmsConfiglist { @@ -824,106 +854,6 @@ func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface } } - if d.HasChange("worker_labels") && !d.IsNewResource() { - labels := make(map[string]string) - if l, ok := d.GetOk("worker_labels"); ok { - for k, v := range l.(map[string]interface{}) { - labels[k] = v.(string) - } - } - - ClusterClient, err := meta.(conns.ClientSession).ContainerAPI() - if err != nil { - return err - } - Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} - - err = ClusterClient.WorkerPools().UpdateLabelsWorkerPool(clusterID, "default", labels, Env) - if err != nil { - return fmt.Errorf( - "[ERROR] Error updating the labels: %s", err) - } - } - - if d.HasChange("taints") { - var taints []interface{} - if taintRes, ok := d.GetOk("taints"); ok { - taints = taintRes.(*schema.Set).List() - } - if err := updateWorkerpoolTaints(d, meta, clusterID, "default", taints); err != nil { - return err - } - } - - if d.HasChange("worker_count") && !d.IsNewResource() { - count := d.Get("worker_count").(int) - ClusterClient, err := meta.(conns.ClientSession).ContainerAPI() - if err != nil { - return err - } - Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} - - err = ClusterClient.WorkerPools().ResizeWorkerPool(clusterID, "default", count, Env) - if err != nil { - return fmt.Errorf( - "[ERROR] Error updating the worker_count %d: %s", count, err) - } - } - - if d.HasChange("zones") && !d.IsNewResource() { - oldList, newList := d.GetChange("zones") - if oldList == nil { - oldList = new(schema.Set) - } - if newList == nil { - newList = new(schema.Set) - } - os := oldList.(*schema.Set) - ns := newList.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() - if len(add) > 0 { - for _, zone := range add { - newZone := zone.(map[string]interface{}) - zoneParam := v2.WorkerPoolZone{ - Cluster: clusterID, - Id: newZone["name"].(string), - SubnetID: newZone["subnet_id"].(string), - WorkerPoolID: "default", - } - err = csClient.WorkerPools().CreateWorkerPoolZone(zoneParam, targetEnv) - if err != nil { - return fmt.Errorf("[ERROR] Error adding zone to conatiner vpc cluster: %s", err) - } - _, err = WaitForWorkerPoolAvailable(d, meta, clusterID, "default", d.Timeout(schema.TimeoutCreate), targetEnv) - if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for workerpool (%s) to become ready: %s", d.Id(), err) - } - - } - } - if len(remove) > 0 { - for _, zone := range remove { - oldZone := zone.(map[string]interface{}) - ClusterClient, err := meta.(conns.ClientSession).ContainerAPI() - if err != nil { - return err - } - Env := v1.ClusterTargetHeader{ResourceGroup: targetEnv.ResourceGroup} - err = ClusterClient.WorkerPools().RemoveZone(clusterID, oldZone["name"].(string), "default", Env) - if err != nil { - return fmt.Errorf("[ERROR] Error deleting zone to conatiner vpc cluster: %s", err) - } - _, err = WaitForV2WorkerZoneDeleted(clusterID, "default", oldZone["name"].(string), meta, d.Timeout(schema.TimeoutDelete), targetEnv) - if err != nil { - return fmt.Errorf( - "[ERROR] Error waiting for deleting workers of worker pool (%s) of cluster (%s): %s", "default", clusterID, err) - } - } - } - } - if d.HasChange("force_delete_storage") { var forceDeleteStorage bool if v, ok := d.GetOk("force_delete_storage"); ok { @@ -946,6 +876,7 @@ func resourceIBMContainerVpcClusterUpdate(d *schema.ResourceData, meta interface return resourceIBMContainerVpcClusterRead(d, meta) } + func WaitForV2WorkerZoneDeleted(clusterNameOrID, workerPoolNameOrID, zone string, meta interface{}, timeout time.Duration, target v2.ClusterTargetHeader) (interface{}, error) { csClient, err := meta.(conns.ClientSession).VpcContainerAPI() if err != nil { diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go index ad067026ae..da70f4e1b3 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_cluster_test.go @@ -152,6 +152,33 @@ func TestAccIBMContainerVpcClusterDedicatedHost(t *testing.T) { ) } +func TestAccIBMContainerVpcClusterSecurityGroups(t *testing.T) { + name := fmt.Sprintf("tf-vpc-cluster-%d", acctest.RandIntRange(10, 100)) + var conf *v2.ClusterInfo + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerVpcClusterSecurityGroups(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMContainerVpcExists("ibm_container_vpc_cluster.cluster", conf), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "name", name), + ), + }, + { + ResourceName: "ibm_container_vpc_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "wait_till", "update_all_workers", "kms_config", "force_delete_storage", "wait_for_worker_update"}, + }, + }, + }) +} + func testAccCheckIBMContainerVpcClusterDestroy(s *terraform.State) error { csClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcContainerAPI() if err != nil { @@ -275,6 +302,68 @@ resource "ibm_container_vpc_cluster" "cluster" { }`, name) } +// preveously you have to create securitygroups and use them instead +func testAccCheckIBMContainerVpcClusterSecurityGroups(name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "resource_group" { + is_default = "true" + //name = "Default" + } + resource "ibm_is_vpc" "vpc" { + name = "%[1]s" + } + resource "ibm_is_security_group" "security_group" { + name = "example-security-group" + vpc = ibm_is_vpc.vpc.id + } + resource "ibm_is_subnet" "subnet" { + name = "%[1]s" + vpc = ibm_is_vpc.vpc.id + zone = "us-south-1" + total_ipv4_address_count = 256 + } + resource "ibm_resource_instance" "kms_instance" { + name = "%[1]s" + service = "kms" + plan = "tiered-pricing" + location = "eu-de" + } + + resource "ibm_kms_key" "test" { + instance_id = ibm_resource_instance.kms_instance.guid + key_name = "%[1]s" + standard_key = false + force_delete = true + } + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = ibm_is_vpc.vpc.id + flavor = "cx2.2x4" + worker_count = 1 + wait_till = "OneWorkerNodeReady" + resource_group_id = data.ibm_resource_group.resource_group.id + zones { + subnet_id = ibm_is_subnet.subnet.id + name = "us-south-1" + } + kms_config { + instance_id = ibm_resource_instance.kms_instance.guid + crk_id = ibm_kms_key.test.key_id + private_endpoint = false + } + worker_labels = { + "test" = "test-default-pool" + "test1" = "test-default-pool1" + "test2" = "test-default-pool2" + } + + security_groups = [ + ibm_is_security_group.security_group.id, + "cluster", + ] + }`, name) +} + func testAccCheckIBMContainerVpcClusterUpdate(name string) string { return fmt.Sprintf(` provider "ibm" { @@ -470,6 +559,38 @@ func TestAccIBMContainerVpcClusterBaseEnvvar(t *testing.T) { }) } +func TestAccIBMContainerVpcClusterKMSEnvvar(t *testing.T) { + name := fmt.Sprintf("tf-vpc-cluster-%d", acctest.RandIntRange(10, 100)) + var conf *v2.ClusterInfo + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMContainerVpcClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerVpcClusterKMSEnvvar(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMContainerVpcExists("ibm_container_vpc_cluster.cluster", conf), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "name", name), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "worker_count", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_cluster.cluster", "kms_config.#", "1"), + ), + }, + { + ResourceName: "ibm_container_vpc_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "wait_till", "update_all_workers", "kms_config", "force_delete_storage", "wait_for_worker_update"}, + }, + }, + }) +} + // You need to set up env vars: // export IBM_CLUSTER_VPC_ID // export IBM_CLUSTER_VPC_SUBNET_ID @@ -547,3 +668,33 @@ func testAccCheckIBMContainerVpcClusterBaseEnvvar(name string) string { fmt.Println(config) return config } + +// You need to set up env vars: +// export IBM_CLUSTER_VPC_ID +// export IBM_CLUSTER_VPC_SUBNET_ID +// export IBM_CLUSTER_VPC_RESOURCE_GROUP_ID +// export IBM_KMS_INSTANCE_ID +// export IBM_CRK_ID +func testAccCheckIBMContainerVpcClusterKMSEnvvar(name string) string { + config := fmt.Sprintf(` + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + resource_group_id = "%[3]s" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + wait_till = "normal" + kms_config { + instance_id = "%[5]s" + crk_id = "%[6]s" + private_endpoint = false + } + } + `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, acc.KmsInstanceID, acc.CrkID) + fmt.Println(config) + return config +} diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go index dfbfde4c3b..477994c5bc 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool.go @@ -100,6 +100,7 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { "taints": { Type: schema.TypeSet, Optional: true, + Computed: true, Description: "WorkerPool Taints", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -207,11 +208,27 @@ func ResourceIBMContainerVpcWorkerPool() *schema.Resource { RequiredWith: []string{"kms_instance_id", "crk"}, }, + "import_on_create": { + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Import an existing WorkerPool from the cluster, instead of creating a new", + }, + "autoscale_enabled": { Type: schema.TypeBool, Computed: true, Description: "Autoscaling is enabled on the workerpool", }, + + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Description: "Allow user to set which security groups added to their workers", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: flex.ResourceIBMVPCHash, + DiffSuppressFunc: flex.ApplyOnce, + }, }, } } @@ -249,12 +266,34 @@ func ResourceIBMContainerVPCWorkerPoolValidator() *validate.ResourceValidator { func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + clusterNameorID := d.Get("cluster").(string) + wpClient, err := meta.(conns.ClientSession).VpcContainerAPI() if err != nil { return err } - clusterNameorID := d.Get("cluster").(string) + if ioc, ok := d.GetOk("import_on_create"); ok && ioc.(bool) { + log.Printf("Importing workerpool from cluster %s", clusterNameorID) + + //read to get ID for default and d.Set! + + targetEnv, err := getVpcClusterTargetHeader(d, meta) + if err != nil { + return err + } + + wp, err := wpClient.WorkerPools().GetWorkerPool(clusterNameorID, "default", targetEnv) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", clusterNameorID, wp.ID)) + + return resourceIBMContainerVpcWorkerPoolRead(d, meta) + + } + var zonei []interface{} zone := []v2.Zone{} @@ -283,6 +322,11 @@ func resourceIBMContainerVpcWorkerPoolCreate(d *schema.ResourceData, meta interf }, } + if v, ok := d.GetOk("security_groups"); ok { + securityGroups := flex.FlattenSet(v.(*schema.Set)) + params.SecurityGroupIDs = securityGroups + } + if kmsid, ok := d.GetOk("kms_instance_id"); ok { crk := d.Get("crk").(string) wve := v2.WorkerVolumeEncryption{ diff --git a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go index 1b80e2efba..bb66dc98ef 100644 --- a/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_vpc_worker_pool_test.go @@ -85,6 +85,31 @@ func TestAccIBMContainerVpcClusterWorkerPoolDedicatedHost(t *testing.T) { }) } +func TestAccIBMContainerVpcClusterWorkerPoolSecurityGroups(t *testing.T) { + + name := fmt.Sprintf("tf-vpc-worker-pool-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "flavor", "cx2.2x4"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "zones.#", "1"), + ), + }, + { + ResourceName: "ibm_container_vpc_worker_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckIBMVpcContainerWorkerPoolDestroy(s *terraform.State) error { wpClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).VpcContainerAPI() @@ -173,6 +198,70 @@ func testAccCheckIBMVpcContainerWorkerPoolBasic(name string) string { `, name) } +func testAccCheckIBMVpcContainerWorkerPoolSecurityGroups(name string) string { + return fmt.Sprintf(` + data "ibm_resource_group" "resource_group" { + is_default=true + } + resource "ibm_is_vpc" "vpc" { + name = "%[1]s" + } + resource "ibm_is_security_group" "security_group1" { + name = "%[1]s-security-group-1" + vpc = ibm_is_vpc.vpc.id + } + resource "ibm_is_security_group" "security_group2" { + name = "%[1]s-security-group-2" + vpc = ibm_is_vpc.vpc.id + } + resource "ibm_is_subnet" "subnet1" { + name = "%[1]s-subnet-1" + vpc = ibm_is_vpc.vpc.id + zone = "us-south-1" + total_ipv4_address_count = 256 + } + resource "ibm_is_subnet" "subnet2" { + name = "%[1]s-subnet-2" + vpc = ibm_is_vpc.vpc.id + zone = "us-south-2" + total_ipv4_address_count = 256 + } + + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = ibm_is_vpc.vpc.id + flavor = "cx2.2x4" + worker_count = 1 + resource_group_id = data.ibm_resource_group.resource_group.id + wait_till = "MasterNodeReady" + zones { + subnet_id = ibm_is_subnet.subnet1.id + name = ibm_is_subnet.subnet1.zone + } + security_groups = [ + ibm_is_security_group.security_group1.id, + "cluster", + ] + } + resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + worker_pool_name = "%[1]s" + flavor = "cx2.2x4" + vpc_id = ibm_is_vpc.vpc.id + worker_count = 1 + resource_group_id = data.ibm_resource_group.resource_group.id + zones { + subnet_id = ibm_is_subnet.subnet2.id + name = ibm_is_subnet.subnet2.zone + } + security_groups = [ + ibm_is_security_group.security_group2.id, + ] + + } + `, name) +} + func testAccCheckIBMVpcContainerWorkerPoolUpdate(name string) string { return fmt.Sprintf(` provider "ibm" { @@ -455,3 +544,147 @@ func testAccCheckIBMOpcContainerWorkerPoolBasic(name, openshiftFlavour, openShif } `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID, openshiftFlavour, openShiftworkerCount, operatingSystem) } + +func TestAccIBMContainerVpcClusterWorkerPoolImportOnCreateEnvvar(t *testing.T) { + + name := fmt.Sprintf("tf-vpc-worker-%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMVpcContainerWorkerPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), + ), + }, + { + Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "1"), + ), + }, + { + Config: testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "labels.%", "1"), + resource.TestCheckResourceAttr( + "ibm_container_vpc_worker_pool.test_pool", "worker_count", "3"), + ), + }, + }, + }) +} +func testAccCheckIBMOpcContainerWorkerPoolImportOnCreate(name string) string { + return fmt.Sprintf(` + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + resource_group_id = "%[3]s" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + wait_till = "normal" + worker_labels = { + "test" = "test-pool" + } + } + + resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + worker_pool_name = "default" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + import_on_create = "true" + } + `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) +} + +func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateClusterUpdate(name string) string { + return fmt.Sprintf(` + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 3 + resource_group_id = "%[3]s" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + wait_till = "normal" + worker_labels = { + "test" = "test-pool" + } + } + + resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + worker_pool_name = "default" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + import_on_create = "true" + } + `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) +} + +func testAccCheckIBMOpcContainerWorkerPoolImportOnCreateWPUpdate(name string) string { + return fmt.Sprintf(` + resource "ibm_container_vpc_cluster" "cluster" { + name = "%[1]s" + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 1 + resource_group_id = "%[3]s" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + wait_till = "normal" + worker_labels = { + "test" = "test-pool" + } + } + + resource "ibm_container_vpc_worker_pool" "test_pool" { + cluster = ibm_container_vpc_cluster.cluster.id + vpc_id = "%[2]s" + flavor = "bx2.4x16" + worker_count = 3 + worker_pool_name = "default" + zones { + subnet_id = "%[4]s" + name = "us-south-1" + } + import_on_create = "true" + } + `, name, acc.IksClusterVpcID, acc.IksClusterResourceGroupID, acc.IksClusterSubnetID) +} diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go index 3fbb6bafb3..962a748ed3 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool.go @@ -192,6 +192,13 @@ func ResourceIBMContainerWorkerPool() *schema.Resource { Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this cluster", }, + "import_on_create": { + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "Import a workerpool from a cluster", + }, + "autoscale_enabled": { Type: schema.TypeBool, Computed: true, @@ -232,6 +239,25 @@ func resourceIBMContainerWorkerPoolCreate(d *schema.ResourceData, meta interface clusterNameorID := d.Get("cluster").(string) + if ioc, ok := d.GetOk("import_on_create"); ok && ioc.(bool) { + + workerPoolsAPI := csClient.WorkerPools() + targetEnv, err := getWorkerPoolTargetHeader(d, meta) + if err != nil { + return err + } + + res, err := workerPoolsAPI.GetWorkerPool(clusterNameorID, "default", targetEnv) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", clusterNameorID, res.ID)) + + return resourceIBMContainerWorkerPoolRead(d, meta) + + } + workerPoolConfig := v1.WorkerPoolConfig{ Name: d.Get("worker_pool_name").(string), Size: d.Get("size_per_zone").(int), diff --git a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go index 8fa4f21db9..18122d0e29 100644 --- a/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_worker_pool_test.go @@ -206,3 +206,139 @@ resource "ibm_container_worker_pool" "test_pool" { } }`, workerPoolName, acc.MachineType, clusterName) } + +func TestAccIBMContainerWorkerPoolImportOnCreate(t *testing.T) { + + clusterName := fmt.Sprintf("tf-cluster-worker-%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMContainerWorkerPoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "hardware", "shared"), + ), + }, + { + Config: testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "size_per_zone", "1"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "hardware", "shared"), + ), + }, + { + Config: testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "worker_pool_name", "default"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "size_per_zone", "3"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "labels.%", "2"), + resource.TestCheckResourceAttr( + "ibm_container_worker_pool.test_pool", "hardware", "shared"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerWorkerPoolImportOnCreate(clusterName string) string { + return fmt.Sprintf(` + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + machine_type = "%s" + hardware = "shared" + public_vlan_id = "%s" + private_vlan_id = "%s" + kube_version = "%s" + wait_till = "OneWorkerNodeReady" + default_pool_size = 1 + labels = { + "test" = "test-pool" + "test1" = "test-pool1" + } +} + +resource "ibm_container_worker_pool" "test_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 1 + import_on_create = "true" +}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) +} + +func testAccCheckIBMContainerWorkerPoolImportOnCreateClusterUpdate(clusterName string) string { + return fmt.Sprintf(` + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + machine_type = "%s" + hardware = "shared" + public_vlan_id = "%s" + private_vlan_id = "%s" + kube_version = "%s" + wait_till = "OneWorkerNodeReady" + default_pool_size = 3 + labels = { + "test" = "test-pool" + "test1" = "test-pool1" + } +} + +resource "ibm_container_worker_pool" "test_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 1 + import_on_create = "true" +}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) +} + +func testAccCheckIBMContainerWorkerPoolImportOnCreateWPUpdate(clusterName string) string { + return fmt.Sprintf(` + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + machine_type = "%s" + hardware = "shared" + public_vlan_id = "%s" + private_vlan_id = "%s" + kube_version = "%s" + wait_till = "OneWorkerNodeReady" + default_pool_size = 1 + labels = { + "test" = "test-pool" + "test1" = "test-pool1" + } +} + +resource "ibm_container_worker_pool" "test_pool" { + worker_pool_name = "default" + machine_type = "%[3]s" + cluster = ibm_container_cluster.testacc_cluster.id + size_per_zone = 3 + import_on_create = "true" +}`, clusterName, acc.Datacenter, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, acc.KubeVersion) +} diff --git a/ibm/service/metricsrouter/resource_ibm_metrics_router_route_test.go b/ibm/service/metricsrouter/resource_ibm_metrics_router_route_test.go index d84f2404bc..6c1e2e8fe7 100644 --- a/ibm/service/metricsrouter/resource_ibm_metrics_router_route_test.go +++ b/ibm/service/metricsrouter/resource_ibm_metrics_router_route_test.go @@ -139,7 +139,7 @@ func TestAccIBMMetricsRouterRouteSendNoTarget(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccCheckIBMMetricsRouterRouteNoTarget(name, action), - ExpectError: regexp.MustCompile("Send rule action requires non-empty targets"), + ExpectError: regexp.MustCompile("You have a rule with empty targets."), }, }, }) diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_application.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_application.go index 9cef9c4831..8f2ea42810 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_application.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_application.go @@ -9,11 +9,11 @@ import ( "time" - "github.com/IBM/mqcloud-go-sdk/mqcloudv1" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/mqcloud-go-sdk/mqcloudv1" ) func DataSourceIbmMqcloudApplication() *schema.Resource { @@ -128,7 +128,7 @@ func dataSourceIbmMqcloudApplicationRead(context context.Context, d *schema.Reso if suppliedFilter { if len(allItems) == 0 { - return diag.FromErr(fmt.Errorf("No Applications found with name %s", name)) + return diag.FromErr(fmt.Errorf("No Application found with name: \"%s\"", name)) } d.SetId(name) } else { @@ -146,7 +146,7 @@ func dataSourceIbmMqcloudApplicationRead(context context.Context, d *schema.Reso } if err = d.Set("applications", mapSlice); err != nil { - return diag.FromErr(fmt.Errorf("Error setting applications %s", err)) + return diag.FromErr(fmt.Errorf("Error setting applications: %s", err)) } return nil diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_application_test.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_application_test.go index 1c9f1d7347..49eed96b15 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_application_test.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_application_test.go @@ -13,6 +13,7 @@ import ( ) func TestAccIbmMqcloudApplicationDataSourceBasic(t *testing.T) { + t.Parallel() applicationDetailsServiceInstanceGuid := acc.MqcloudInstanceID applicationDetailsName := "appdsbasic" @@ -33,31 +34,6 @@ func TestAccIbmMqcloudApplicationDataSourceBasic(t *testing.T) { }) } -func TestAccIbmMqcloudApplicationDataSourceAllArgs(t *testing.T) { - applicationDetailsServiceInstanceGuid := acc.MqcloudInstanceID - applicationDetailsName := "appdsargs" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudApplicationDataSourceConfig(applicationDetailsServiceInstanceGuid, applicationDetailsName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "service_instance_guid"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "name"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "applications.#"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "applications.0.id"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_application.mqcloud_application_instance", "applications.0.name", applicationDetailsName), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "applications.0.create_api_key_uri"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_application.mqcloud_application_instance", "applications.0.href"), - ), - }, - }, - }) -} - func testAccCheckIbmMqcloudApplicationDataSourceConfigBasic(applicationDetailsServiceInstanceGuid string, applicationDetailsName string) string { return fmt.Sprintf(` resource "ibm_mqcloud_application" "mqcloud_application_instance" { @@ -71,17 +47,3 @@ func testAccCheckIbmMqcloudApplicationDataSourceConfigBasic(applicationDetailsSe } `, applicationDetailsServiceInstanceGuid, applicationDetailsName) } - -func testAccCheckIbmMqcloudApplicationDataSourceConfig(applicationDetailsServiceInstanceGuid string, applicationDetailsName string) string { - return fmt.Sprintf(` - resource "ibm_mqcloud_application" "mqcloud_application_instance" { - service_instance_guid = "%s" - name = "%s" - } - - data "ibm_mqcloud_application" "mqcloud_application_instance" { - service_instance_guid = ibm_mqcloud_application.mqcloud_application_instance.service_instance_guid - name = ibm_mqcloud_application.mqcloud_application_instance.name - } - `, applicationDetailsServiceInstanceGuid, applicationDetailsName) -} diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate.go index 0f27f37fa3..0881b79156 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate.go @@ -169,7 +169,7 @@ func dataSourceIbmMqcloudKeystoreCertificateRead(context context.Context, d *sch if suppliedFilter { if len(keyStoreCertificateDetailsCollection.KeyStore) == 0 { - return diag.FromErr(fmt.Errorf("no KeyStore found with label %s", label)) + return diag.FromErr(fmt.Errorf("No Key Store Certificate found with label: \"%s\"", label)) } d.SetId(label) } else { @@ -192,7 +192,7 @@ func dataSourceIbmMqcloudKeystoreCertificateRead(context context.Context, d *sch } } if err = d.Set("key_store", keyStore); err != nil { - return diag.FromErr(fmt.Errorf("Error setting key_store %s", err)) + return diag.FromErr(fmt.Errorf("Error setting key_store: %s", err)) } return nil diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate_test.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate_test.go index 9f0b3218e6..984831aac7 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate_test.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_keystore_certificate_test.go @@ -14,6 +14,7 @@ import ( ) func TestAccIbmMqcloudKeystoreCertificateDataSourceBasic(t *testing.T) { + t.Parallel() keyStoreCertificateDetailsServiceInstanceGuid := acc.MqcloudInstanceID keyStoreCertificateDetailsQueueManagerID := acc.MqcloudQueueManagerID keyStoreCertificateDetailsLabel := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) @@ -26,46 +27,9 @@ func TestAccIbmMqcloudKeystoreCertificateDataSourceBasic(t *testing.T) { { Config: testAccCheckIbmMqcloudKeystoreCertificateDataSourceConfigBasic(keyStoreCertificateDetailsServiceInstanceGuid, keyStoreCertificateDetailsQueueManagerID, keyStoreCertificateDetailsLabel, keyStoreCertificateDetailsCertificateFile), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "service_instance_guid"), resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "queue_manager_id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.#"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.label", keyStoreCertificateDetailsLabel), - ), - }, - }, - }) -} - -func TestAccIbmMqcloudKeystoreCertificateDataSourceAllArgs(t *testing.T) { - keyStoreCertificateDetailsServiceInstanceGuid := acc.MqcloudInstanceID - keyStoreCertificateDetailsQueueManagerID := acc.MqcloudQueueManagerID - keyStoreCertificateDetailsLabel := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) - keyStoreCertificateDetailsCertificateFile := acc.MqcloudKSCertFilePath - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudKeystoreCertificateDataSourceConfig(keyStoreCertificateDetailsServiceInstanceGuid, keyStoreCertificateDetailsQueueManagerID, keyStoreCertificateDetailsLabel, keyStoreCertificateDetailsCertificateFile), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "service_instance_guid"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "queue_manager_id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "label"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "total_count"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.#"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.label", keyStoreCertificateDetailsLabel), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.certificate_type"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.fingerprint_sha256"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.subject_dn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.subject_cn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.issuer_dn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.issuer_cn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.issued"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.expiry"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.is_default"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.dns_names_total_count"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "key_store.0.href"), ), }, }, @@ -78,24 +42,7 @@ func testAccCheckIbmMqcloudKeystoreCertificateDataSourceConfigBasic(keyStoreCert service_instance_guid = "%s" queue_manager_id = "%s" label = "%s" - certificate_file = "%s" - } - - data "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { - service_instance_guid = ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance.service_instance_guid - queue_manager_id = ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance.queue_manager_id - label = ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance.label - } - `, keyStoreCertificateDetailsServiceInstanceGuid, keyStoreCertificateDetailsQueueManagerID, keyStoreCertificateDetailsLabel, keyStoreCertificateDetailsCertificateFile) -} - -func testAccCheckIbmMqcloudKeystoreCertificateDataSourceConfig(keyStoreCertificateDetailsServiceInstanceGuid string, keyStoreCertificateDetailsQueueManagerID string, keyStoreCertificateDetailsLabel string, keyStoreCertificateDetailsCertificateFile string) string { - return fmt.Sprintf(` - resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { - service_instance_guid = "%s" - queue_manager_id = "%s" - label = "%s" - certificate_file = "%s" + certificate_file = filebase64("%s") } data "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager.go index 42774e4bce..245cbf93b7 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager.go @@ -182,7 +182,7 @@ func dataSourceIbmMqcloudQueueManagerRead(context context.Context, d *schema.Res if suppliedFilter { if len(allItems) == 0 { - return diag.FromErr(fmt.Errorf("No Queue Managers found with name %s", name)) + return diag.FromErr(fmt.Errorf("No Queue Manager found with name: \"%s\"", name)) } d.SetId(name) } else { @@ -200,7 +200,7 @@ func dataSourceIbmMqcloudQueueManagerRead(context context.Context, d *schema.Res } if err = d.Set("queue_managers", mapSlice); err != nil { - return diag.FromErr(fmt.Errorf("Error setting queue_managers %s", err)) + return diag.FromErr(fmt.Errorf("Error setting queue_managers: %s", err)) } return nil diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager_test.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager_test.go index 69f9690370..1bf9dccef8 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager_test.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_queue_manager_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" @@ -15,8 +16,8 @@ import ( func TestAccIbmMqcloudQueueManagerDataSourceBasic(t *testing.T) { t.Parallel() queueManagerDetailsServiceInstanceGuid := acc.MqcloudInstanceID - queueManagerDetailsName := "queue_manager_ds_basic" - queueManagerDetailsLocation := "ibmcloud_eu_de" + queueManagerDetailsName := fmt.Sprintf("tf_queue_manager_ds_basic%d", acctest.RandIntRange(10, 100)) + queueManagerDetailsLocation := acc.MqCloudQueueManagerLocation queueManagerDetailsSize := "small" resource.Test(t, resource.TestCase{ @@ -41,11 +42,11 @@ func TestAccIbmMqcloudQueueManagerDataSourceBasic(t *testing.T) { func TestAccIbmMqcloudQueueManagerDataSourceAllArgs(t *testing.T) { t.Parallel() queueManagerDetailsServiceInstanceGuid := acc.MqcloudInstanceID - queueManagerDetailsName := "queue_manager_ds_allargs" - queueManagerDetailsDisplayName := "queue_manager_ds_allargs" - queueManagerDetailsLocation := "ibmcloud_eu_de" + queueManagerDetailsName := fmt.Sprintf("tf_queue_manager_ds_allargs%d", acctest.RandIntRange(10, 100)) + queueManagerDetailsDisplayName := queueManagerDetailsName + queueManagerDetailsLocation := acc.MqCloudQueueManagerLocation queueManagerDetailsSize := "small" - queueManagerDetailsVersion := "9.3.3_3" + queueManagerDetailsVersion := acc.MqCloudQueueManagerVersion resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate.go index 265c2cb8b1..c8d0604ab0 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate.go @@ -156,7 +156,7 @@ func dataSourceIbmMqcloudTruststoreCertificateRead(context context.Context, d *s if suppliedFilter { if len(trustStoreCertificateDetailsCollection.TrustStore) == 0 { - return diag.FromErr(fmt.Errorf("no TrustStore found with label %s", label)) + return diag.FromErr(fmt.Errorf("No Trust Store Certificate found with label: \"%s\"", label)) } d.SetId(label) } else { @@ -179,7 +179,7 @@ func dataSourceIbmMqcloudTruststoreCertificateRead(context context.Context, d *s } } if err = d.Set("trust_store", trustStore); err != nil { - return diag.FromErr(fmt.Errorf("Error setting trust_store %s", err)) + return diag.FromErr(fmt.Errorf("Error setting trust_store: %s", err)) } return nil diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate_test.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate_test.go index a4c50e196f..75552460a4 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate_test.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_truststore_certificate_test.go @@ -14,6 +14,7 @@ import ( ) func TestAccIbmMqcloudTruststoreCertificateDataSourceBasic(t *testing.T) { + t.Parallel() trustStoreCertificateDetailsServiceInstanceGuid := acc.MqcloudInstanceID trustStoreCertificateDetailsQueueManagerID := acc.MqcloudQueueManagerID trustStoreCertificateDetailsLabel := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) @@ -26,45 +27,9 @@ func TestAccIbmMqcloudTruststoreCertificateDataSourceBasic(t *testing.T) { { Config: testAccCheckIbmMqcloudTruststoreCertificateDataSourceConfigBasic(trustStoreCertificateDetailsServiceInstanceGuid, trustStoreCertificateDetailsQueueManagerID, trustStoreCertificateDetailsLabel, trustStoreCertificateDetailsCertificateFile), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "service_instance_guid"), resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "queue_manager_id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.#"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.label", trustStoreCertificateDetailsLabel), - ), - }, - }, - }) -} - -func TestAccIbmMqcloudTruststoreCertificateDataSourceAllArgs(t *testing.T) { - trustStoreCertificateDetailsServiceInstanceGuid := acc.MqcloudInstanceID - trustStoreCertificateDetailsQueueManagerID := acc.MqcloudQueueManagerID - trustStoreCertificateDetailsLabel := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) - trustStoreCertificateDetailsCertificateFile := acc.MqcloudTSCertFilePath - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudTruststoreCertificateDataSourceConfig(trustStoreCertificateDetailsServiceInstanceGuid, trustStoreCertificateDetailsQueueManagerID, trustStoreCertificateDetailsLabel, trustStoreCertificateDetailsCertificateFile), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "service_instance_guid"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "queue_manager_id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "label"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "total_count"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.#"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.label", trustStoreCertificateDetailsLabel), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.certificate_type"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.fingerprint_sha256"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.subject_dn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.subject_cn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.issuer_dn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.issuer_cn"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.issued"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.expiry"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.trusted"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "trust_store.0.href"), ), }, }, @@ -77,24 +42,7 @@ func testAccCheckIbmMqcloudTruststoreCertificateDataSourceConfigBasic(trustStore service_instance_guid = "%s" queue_manager_id = "%s" label = "%s" - certificate_file = "%s" - } - - data "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { - service_instance_guid = ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance.service_instance_guid - queue_manager_id = ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance.queue_manager_id - label = ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance.label - } - `, trustStoreCertificateDetailsServiceInstanceGuid, trustStoreCertificateDetailsQueueManagerID, trustStoreCertificateDetailsLabel, trustStoreCertificateDetailsCertificateFile) -} - -func testAccCheckIbmMqcloudTruststoreCertificateDataSourceConfig(trustStoreCertificateDetailsServiceInstanceGuid string, trustStoreCertificateDetailsQueueManagerID string, trustStoreCertificateDetailsLabel string, trustStoreCertificateDetailsCertificateFile string) string { - return fmt.Sprintf(` - resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { - service_instance_guid = "%s" - queue_manager_id = "%s" - label = "%s" - certificate_file = "%s" + certificate_file = filebase64("%s") } data "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_user.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_user.go index 259aefa17d..c89a9e0f9e 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_user.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_user.go @@ -127,7 +127,7 @@ func dataSourceIbmMqcloudUserRead(context context.Context, d *schema.ResourceDat if suppliedFilter { if len(allItems) == 0 { - return diag.FromErr(fmt.Errorf("No Users found with name %s", name)) + return diag.FromErr(fmt.Errorf("No User found with name: \"%s\"", name)) } d.SetId(name) } else { @@ -145,7 +145,7 @@ func dataSourceIbmMqcloudUserRead(context context.Context, d *schema.ResourceDat } if err = d.Set("users", mapSlice); err != nil { - return diag.FromErr(fmt.Errorf("Error setting users %s", err)) + return diag.FromErr(fmt.Errorf("Error setting users: %s", err)) } return nil diff --git a/ibm/service/mqcloud/data_source_ibm_mqcloud_user_test.go b/ibm/service/mqcloud/data_source_ibm_mqcloud_user_test.go index eb3491817e..8823e06d76 100644 --- a/ibm/service/mqcloud/data_source_ibm_mqcloud_user_test.go +++ b/ibm/service/mqcloud/data_source_ibm_mqcloud_user_test.go @@ -14,6 +14,7 @@ import ( ) func TestAccIbmMqcloudUserDataSourceBasic(t *testing.T) { + t.Parallel() userDetailsServiceInstanceGuid := acc.MqcloudInstanceID userDetailsName := fmt.Sprintf("tfname%d", acctest.RandIntRange(10, 100)) userDetailsEmail := fmt.Sprintf("tfemail%d@ibm.com", acctest.RandIntRange(10, 100)) @@ -36,32 +37,6 @@ func TestAccIbmMqcloudUserDataSourceBasic(t *testing.T) { }) } -func TestAccIbmMqcloudUserDataSourceAllArgs(t *testing.T) { - userDetailsServiceInstanceGuid := acc.MqcloudInstanceID - userDetailsName := fmt.Sprintf("tfname%d", acctest.RandIntRange(10, 100)) - userDetailsEmail := fmt.Sprintf("tfemail%d@ibm.com", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudUserDataSourceConfig(userDetailsServiceInstanceGuid, userDetailsName, userDetailsEmail), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "service_instance_guid"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "name"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "users.#"), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "users.0.id"), - resource.TestCheckResourceAttr("data.ibm_mqcloud_user.mqcloud_user_instance", "users.0.name", userDetailsName), - resource.TestCheckResourceAttr("data.ibm_mqcloud_user.mqcloud_user_instance", "users.0.email", userDetailsEmail), - resource.TestCheckResourceAttrSet("data.ibm_mqcloud_user.mqcloud_user_instance", "users.0.href"), - ), - }, - }, - }) -} - func testAccCheckIbmMqcloudUserDataSourceConfigBasic(userDetailsServiceInstanceGuid string, userDetailsName string, userDetailsEmail string) string { return fmt.Sprintf(` resource "ibm_mqcloud_user" "mqcloud_user_instance" { @@ -76,18 +51,3 @@ func testAccCheckIbmMqcloudUserDataSourceConfigBasic(userDetailsServiceInstanceG } `, userDetailsServiceInstanceGuid, userDetailsName, userDetailsEmail) } - -func testAccCheckIbmMqcloudUserDataSourceConfig(userDetailsServiceInstanceGuid string, userDetailsName string, userDetailsEmail string) string { - return fmt.Sprintf(` - resource "ibm_mqcloud_user" "mqcloud_user_instance" { - service_instance_guid = "%s" - name = "%s" - email = "%s" - } - - data "ibm_mqcloud_user" "mqcloud_user_instance" { - service_instance_guid = ibm_mqcloud_user.mqcloud_user_instance.service_instance_guid - name = ibm_mqcloud_user.mqcloud_user_instance.name - } - `, userDetailsServiceInstanceGuid, userDetailsName, userDetailsEmail) -} diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_application_test.go b/ibm/service/mqcloud/resource_ibm_mqcloud_application_test.go index b780d789d1..fec357e6ed 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_application_test.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_application_test.go @@ -17,6 +17,7 @@ import ( ) func TestAccIbmMqcloudApplicationBasic(t *testing.T) { + t.Parallel() var conf mqcloudv1.ApplicationDetails serviceInstanceGuid := acc.MqcloudInstanceID name := "appbasic" @@ -34,28 +35,6 @@ func TestAccIbmMqcloudApplicationBasic(t *testing.T) { resource.TestCheckResourceAttr("ibm_mqcloud_application.mqcloud_application_instance", "name", name), ), }, - }, - }) -} - -func TestAccIbmMqcloudApplicationAllArgs(t *testing.T) { - var conf mqcloudv1.ApplicationDetails - serviceInstanceGuid := acc.MqcloudInstanceID - name := "appallargs" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmMqcloudApplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudApplicationConfig(serviceInstanceGuid, name), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmMqcloudApplicationExists("ibm_mqcloud_application.mqcloud_application_instance", conf), - resource.TestCheckResourceAttr("ibm_mqcloud_application.mqcloud_application_instance", "service_instance_guid", serviceInstanceGuid), - resource.TestCheckResourceAttr("ibm_mqcloud_application.mqcloud_application_instance", "name", name), - ), - }, { ResourceName: "ibm_mqcloud_application.mqcloud_application_instance", ImportState: true, @@ -74,16 +53,6 @@ func testAccCheckIbmMqcloudApplicationConfigBasic(serviceInstanceGuid string, na `, serviceInstanceGuid, name) } -func testAccCheckIbmMqcloudApplicationConfig(serviceInstanceGuid string, name string) string { - return fmt.Sprintf(` - - resource "ibm_mqcloud_application" "mqcloud_application_instance" { - service_instance_guid = "%s" - name = "%s" - } - `, serviceInstanceGuid, name) -} - func testAccCheckIbmMqcloudApplicationExists(n string, obj mqcloudv1.ApplicationDetails) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate.go b/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate.go index 859d0aa5cc..9cc0f6be06 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate.go @@ -4,12 +4,12 @@ package mqcloud import ( + "bytes" "context" + "encoding/base64" "fmt" "io" "log" - "os" - "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -47,7 +47,13 @@ func ResourceIbmMqcloudKeystoreCertificate() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: validate.InvokeValidator("ibm_mqcloud_keystore_certificate", "label"), - Description: "Certificate label in queue manager store.", + Description: "The label to use for the certificate to be uploaded.", + }, + "certificate_file": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The filename and path of the certificate to be uploaded.", }, "certificate_type": { Type: schema.TypeString, @@ -115,12 +121,6 @@ func ResourceIbmMqcloudKeystoreCertificate() *schema.Resource { Computed: true, Description: "ID of the certificate.", }, - "certificate_file": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The filename and path of the certificate to be uploaded.", - }, }, } } @@ -175,18 +175,11 @@ func resourceIbmMqcloudKeystoreCertificateCreate(context context.Context, d *sch createKeyStorePemCertificateOptions.SetServiceInstanceGuid(d.Get("service_instance_guid").(string)) createKeyStorePemCertificateOptions.SetQueueManagerID(d.Get("queue_manager_id").(string)) createKeyStorePemCertificateOptions.SetLabel(d.Get("label").(string)) - //Custom code to read certs and pass to SDK - certBytes, err := os.ReadFile(d.Get("certificate_file").(string)) // just pass the file name + certificateFileBytes, err := base64.StdEncoding.DecodeString(d.Get("certificate_file").(string)) if err != nil { - fmt.Print(err) + return diag.FromErr(err) } - certString := string(certBytes) // convert content to a 'string' - rc := io.NopCloser(strings.NewReader(certString)) - // certificateFileModel, err := resourceIbmMqcloudKeystoreCertificateMapToio.ReadCloser(d.Get("certificate_file.0").(map[string]interface{})) - // if err != nil { - // return diag.FromErr(err) - // } - createKeyStorePemCertificateOptions.SetCertificateFile(rc) + createKeyStorePemCertificateOptions.SetCertificateFile(io.NopCloser(bytes.NewReader(certificateFileBytes))) keyStoreCertificateDetails, response, err := mqcloudClient.CreateKeyStorePemCertificateWithContext(context, createKeyStorePemCertificateOptions) if err != nil { @@ -225,16 +218,13 @@ func resourceIbmMqcloudKeystoreCertificateRead(context context.Context, d *schem log.Printf("[DEBUG] GetKeyStoreCertificateWithContext failed %s\n%s", err, response) return diag.FromErr(fmt.Errorf("GetKeyStoreCertificateWithContext failed %s\n%s", err, response)) } + if err = d.Set("service_instance_guid", parts[0]); err != nil { return diag.FromErr(fmt.Errorf("Error setting service_instance_guid: %s", err)) } if err = d.Set("queue_manager_id", parts[1]); err != nil { return diag.FromErr(fmt.Errorf("Error setting queue_manager_id: %s", err)) } - downloadCertificatePath := "./certificates/keystore/" + *keyStoreCertificateDetails.Label + ".pem" - if err = d.Set("certificate_file", downloadCertificatePath); err != nil { - return diag.FromErr(fmt.Errorf("Error setting certificate_file: %s", err)) - } if err = d.Set("label", keyStoreCertificateDetails.Label); err != nil { return diag.FromErr(fmt.Errorf("Error setting label: %s", err)) } diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate_test.go b/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate_test.go index 5041c8ed77..83461fc0a2 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate_test.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_keystore_certificate_test.go @@ -18,6 +18,7 @@ import ( ) func TestAccIbmMqcloudKeystoreCertificateBasic(t *testing.T) { + t.Parallel() var conf mqcloudv1.KeyStoreCertificateDetails serviceInstanceGuid := acc.MqcloudInstanceID queueManagerID := acc.MqcloudQueueManagerID @@ -38,35 +39,11 @@ func TestAccIbmMqcloudKeystoreCertificateBasic(t *testing.T) { resource.TestCheckResourceAttr("ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "label", label), ), }, - }, - }) -} - -func TestAccIbmMqcloudKeystoreCertificateAllArgs(t *testing.T) { - var conf mqcloudv1.KeyStoreCertificateDetails - serviceInstanceGuid := acc.MqcloudInstanceID - queueManagerID := acc.MqcloudQueueManagerID - label := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) - certificateFile := acc.MqcloudKSCertFilePath - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmMqcloudKeystoreCertificateDestroy, - Steps: []resource.TestStep{ { - Config: testAccCheckIbmMqcloudKeystoreCertificateConfig(serviceInstanceGuid, queueManagerID, label, certificateFile), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmMqcloudKeystoreCertificateExists("ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", conf), - resource.TestCheckResourceAttr("ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "service_instance_guid", serviceInstanceGuid), - resource.TestCheckResourceAttr("ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "queue_manager_id", queueManagerID), - resource.TestCheckResourceAttr("ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", "label", label), - ), - }, - { - ResourceName: "ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"certificate_file"}, }, }, }) @@ -78,19 +55,7 @@ func testAccCheckIbmMqcloudKeystoreCertificateConfigBasic(serviceInstanceGuid st service_instance_guid = "%s" queue_manager_id = "%s" label = "%s" - certificate_file = "%s" - } - `, serviceInstanceGuid, queueManagerID, label, certificateFile) -} - -func testAccCheckIbmMqcloudKeystoreCertificateConfig(serviceInstanceGuid string, queueManagerID string, label string, certificateFile string) string { - return fmt.Sprintf(` - - resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { - service_instance_guid = "%s" - queue_manager_id = "%s" - label = "%s" - certificate_file = "%s" + certificate_file = filebase64("%s") } `, serviceInstanceGuid, queueManagerID, label, certificateFile) } diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager.go b/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager.go index a94f6fa5c9..252837593f 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager.go @@ -252,14 +252,15 @@ func resourceIbmMqcloudQueueManagerRead(context context.Context, d *schema.Resou var queueManagerDetails *mqcloudv1.QueueManagerDetails var response *core.DetailedResponse - err = resource.RetryContext(context, 10*time.Second, func() *resource.RetryError { + err = resource.RetryContext(context, 150*time.Second, func() *resource.RetryError { queueManagerDetails, response, err = mqcloudClient.GetQueueManagerWithContext(context, getQueueManagerOptions) - if err != nil || response == nil { - if response.StatusCode == 404 { - return resource.RetryableError(err) + if err != nil || queueManagerDetails == nil { + if response != nil && response.StatusCode == 404 { + return resource.RetryableError(fmt.Errorf("Queue Manager not found, retrying")) } return resource.NonRetryableError(err) } + return nil }) diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager_test.go b/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager_test.go index a6ccbd7feb..d010549e4f 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager_test.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_queue_manager_test.go @@ -14,14 +14,15 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM/mqcloud-go-sdk/mqcloudv1" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" ) func TestAccIbmMqcloudQueueManagerBasic(t *testing.T) { t.Parallel() var conf mqcloudv1.QueueManagerDetails serviceInstanceGuid := acc.MqcloudInstanceID - name := "queue_manager_basic" - location := "ibmcloud_eu_de" + name := fmt.Sprintf("tf_queue_manager_basic%d", acctest.RandIntRange(10, 100)) + location := acc.MqCloudQueueManagerLocation size := "small" resource.Test(t, resource.TestCase{ @@ -47,12 +48,12 @@ func TestAccIbmMqcloudQueueManagerAllArgs(t *testing.T) { t.Parallel() var conf mqcloudv1.QueueManagerDetails serviceInstanceGuid := acc.MqcloudInstanceID - name := "queue_manager_allargs" - displayName := "queue_manager_allargs" - location := "ibmcloud_eu_de" + name := fmt.Sprintf("tf_queue_manager_allargs%d", acctest.RandIntRange(10, 100)) + displayName := name + location := acc.MqCloudQueueManagerLocation size := "small" - version := "9.3.3_3" - versionUpdate := "9.3.4_1" + version := acc.MqCloudQueueManagerVersion + versionUpdate := acc.MqCloudQueueManagerVersionUpdate resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate.go b/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate.go index 63a35c247e..ed12be2ddf 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate.go @@ -4,12 +4,12 @@ package mqcloud import ( + "bytes" "context" + "encoding/base64" "fmt" "io" "log" - "os" - "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -47,7 +47,13 @@ func ResourceIbmMqcloudTruststoreCertificate() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: validate.InvokeValidator("ibm_mqcloud_truststore_certificate", "label"), - Description: "Certificate label in queue manager store.", + Description: "The label to use for the certificate to be uploaded.", + }, + "certificate_file": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The filename and path of the certificate to be uploaded.", }, "certificate_type": { Type: schema.TypeString, @@ -104,12 +110,6 @@ func ResourceIbmMqcloudTruststoreCertificate() *schema.Resource { Computed: true, Description: "Id of the certificate.", }, - "certificate_file": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The filename and path of the certificate to be uploaded.", - }, }, } } @@ -165,18 +165,11 @@ func resourceIbmMqcloudTruststoreCertificateCreate(context context.Context, d *s createTrustStorePemCertificateOptions.SetServiceInstanceGuid(d.Get("service_instance_guid").(string)) createTrustStorePemCertificateOptions.SetQueueManagerID(d.Get("queue_manager_id").(string)) createTrustStorePemCertificateOptions.SetLabel(d.Get("label").(string)) - //Custom code to read certs and pass to SDK - certBytes, err := os.ReadFile(d.Get("certificate_file").(string)) // just pass the file name + certificateFileBytes, err := base64.StdEncoding.DecodeString(d.Get("certificate_file").(string)) if err != nil { - fmt.Print(err) + return diag.FromErr(err) } - certString := string(certBytes) // convert content to a 'string' - rc := io.NopCloser(strings.NewReader(certString)) - // certificateFileModel, err := resourceIbmMqcloudTruststoreCertificateMapToio.ReadCloser(d.Get("certificate_file.0").(map[string]interface{})) - // if err != nil { - // return diag.FromErr(err) - // } - createTrustStorePemCertificateOptions.SetCertificateFile(rc) + createTrustStorePemCertificateOptions.SetCertificateFile(io.NopCloser(bytes.NewReader(certificateFileBytes))) trustStoreCertificateDetails, response, err := mqcloudClient.CreateTrustStorePemCertificateWithContext(context, createTrustStorePemCertificateOptions) if err != nil { @@ -215,16 +208,13 @@ func resourceIbmMqcloudTruststoreCertificateRead(context context.Context, d *sch log.Printf("[DEBUG] GetTrustStoreCertificateWithContext failed %s\n%s", err, response) return diag.FromErr(fmt.Errorf("GetTrustStoreCertificateWithContext failed %s\n%s", err, response)) } + if err = d.Set("service_instance_guid", parts[0]); err != nil { return diag.FromErr(fmt.Errorf("Error setting service_instance_guid: %s", err)) } if err = d.Set("queue_manager_id", parts[1]); err != nil { return diag.FromErr(fmt.Errorf("Error setting queue_manager_id: %s", err)) } - downloadCertificatePath := "./certificates/truststore/" + *trustStoreCertificateDetails.Label + ".pem" - if err = d.Set("certificate_file", downloadCertificatePath); err != nil { - return diag.FromErr(fmt.Errorf("Error setting certificate_file: %s", err)) - } if err = d.Set("label", trustStoreCertificateDetails.Label); err != nil { return diag.FromErr(fmt.Errorf("Error setting label: %s", err)) } diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate_test.go b/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate_test.go index 80acc0e4a1..bfea0ce68e 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate_test.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_truststore_certificate_test.go @@ -18,6 +18,7 @@ import ( ) func TestAccIbmMqcloudTruststoreCertificateBasic(t *testing.T) { + t.Parallel() var conf mqcloudv1.TrustStoreCertificateDetails serviceInstanceGuid := acc.MqcloudInstanceID queueManagerID := acc.MqcloudQueueManagerID @@ -38,35 +39,11 @@ func TestAccIbmMqcloudTruststoreCertificateBasic(t *testing.T) { resource.TestCheckResourceAttr("ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "label", label), ), }, - }, - }) -} - -func TestAccIbmMqcloudTruststoreCertificateAllArgs(t *testing.T) { - var conf mqcloudv1.TrustStoreCertificateDetails - serviceInstanceGuid := acc.MqcloudInstanceID - queueManagerID := acc.MqcloudQueueManagerID - label := fmt.Sprintf("tf_label_%d", acctest.RandIntRange(10, 100)) - certificateFile := acc.MqcloudTSCertFilePath - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmMqcloudTruststoreCertificateDestroy, - Steps: []resource.TestStep{ { - Config: testAccCheckIbmMqcloudTruststoreCertificateConfig(serviceInstanceGuid, queueManagerID, label, certificateFile), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmMqcloudTruststoreCertificateExists("ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", conf), - resource.TestCheckResourceAttr("ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "service_instance_guid", serviceInstanceGuid), - resource.TestCheckResourceAttr("ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "queue_manager_id", queueManagerID), - resource.TestCheckResourceAttr("ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", "label", label), - ), - }, - { - ResourceName: "ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", - ImportState: true, - ImportStateVerify: true, + ResourceName: "ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"certificate_file"}, }, }, }) @@ -78,19 +55,7 @@ func testAccCheckIbmMqcloudTruststoreCertificateConfigBasic(serviceInstanceGuid service_instance_guid = "%s" queue_manager_id = "%s" label = "%s" - certificate_file = "%s" - } - `, serviceInstanceGuid, queueManagerID, label, certificateFile) -} - -func testAccCheckIbmMqcloudTruststoreCertificateConfig(serviceInstanceGuid string, queueManagerID string, label string, certificateFile string) string { - return fmt.Sprintf(` - - resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" { - service_instance_guid = "%s" - queue_manager_id = "%s" - label = "%s" - certificate_file = "%s" + certificate_file = filebase64("%s") } `, serviceInstanceGuid, queueManagerID, label, certificateFile) } diff --git a/ibm/service/mqcloud/resource_ibm_mqcloud_user_test.go b/ibm/service/mqcloud/resource_ibm_mqcloud_user_test.go index 551827f583..fd7ad738af 100644 --- a/ibm/service/mqcloud/resource_ibm_mqcloud_user_test.go +++ b/ibm/service/mqcloud/resource_ibm_mqcloud_user_test.go @@ -18,6 +18,7 @@ import ( ) func TestAccIbmMqcloudUserBasic(t *testing.T) { + t.Parallel() var conf mqcloudv1.UserDetails serviceInstanceGuid := acc.MqcloudInstanceID name := fmt.Sprintf("tfname%d", acctest.RandIntRange(10, 100)) @@ -37,30 +38,6 @@ func TestAccIbmMqcloudUserBasic(t *testing.T) { resource.TestCheckResourceAttr("ibm_mqcloud_user.mqcloud_user_instance", "email", email), ), }, - }, - }) -} - -func TestAccIbmMqcloudUserAllArgs(t *testing.T) { - var conf mqcloudv1.UserDetails - serviceInstanceGuid := acc.MqcloudInstanceID - name := fmt.Sprintf("tfname%d", acctest.RandIntRange(10, 100)) - email := fmt.Sprintf("tfemail%d@ibm.com", acctest.RandIntRange(10, 100)) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheckMqcloud(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmMqcloudUserDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCheckIbmMqcloudUserConfig(serviceInstanceGuid, name, email), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmMqcloudUserExists("ibm_mqcloud_user.mqcloud_user_instance", conf), - resource.TestCheckResourceAttr("ibm_mqcloud_user.mqcloud_user_instance", "service_instance_guid", serviceInstanceGuid), - resource.TestCheckResourceAttr("ibm_mqcloud_user.mqcloud_user_instance", "name", name), - resource.TestCheckResourceAttr("ibm_mqcloud_user.mqcloud_user_instance", "email", email), - ), - }, { ResourceName: "ibm_mqcloud_user.mqcloud_user_instance", ImportState: true, @@ -80,17 +57,6 @@ func testAccCheckIbmMqcloudUserConfigBasic(serviceInstanceGuid string, name stri `, serviceInstanceGuid, name, email) } -func testAccCheckIbmMqcloudUserConfig(serviceInstanceGuid string, name string, email string) string { - return fmt.Sprintf(` - - resource "ibm_mqcloud_user" "mqcloud_user_instance" { - service_instance_guid = "%s" - name = "%s" - email = "%s" - } - `, serviceInstanceGuid, name, email) -} - func testAccCheckIbmMqcloudUserExists(n string, obj mqcloudv1.UserDetails) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/ibm/service/mqcloud/utils.go b/ibm/service/mqcloud/utils.go index 877b5ed79b..58cd7c50eb 100644 --- a/ibm/service/mqcloud/utils.go +++ b/ibm/service/mqcloud/utils.go @@ -33,7 +33,7 @@ const isQueueManagerDeleteDone = "true" const reservedDeploymentPlan = "reserved-deployment" const enforceReservedDeploymentPlan = true -// waitForQmStatusUpdate Waits for QM to be in running state +// waitForQmStatusUpdate waits for Queue Manager to be in running state func waitForQmStatusUpdate(context context.Context, d *schema.ResourceData, meta interface{}) (interface{}, error) { mqcloudClient, err := meta.(conns.ClientSession).MqcloudV1() if err != nil { @@ -58,7 +58,6 @@ func waitForQmStatusUpdate(context context.Context, d *schema.ResourceData, meta if queueManagerStatus == nil || queueManagerStatus.Status == nil { return nil, "", fmt.Errorf("queueManagerStatus or queueManagerStatus.Status is nil") } - fmt.Println("The queue manager is currently in the " + *queueManagerStatus.Status + " state ....") if *queueManagerStatus.Status == "running" { return queueManagerStatus, qmStatus, nil @@ -135,7 +134,7 @@ func checkSIPlan(d *schema.ResourceData, meta interface{}) error { } instance, response, err := rsConClient.GetResourceInstance(&rsInst) if err != nil { - return fmt.Errorf("[ERROR] Failed to retrieve resource instance: %s, Response: %s", err, response) + return fmt.Errorf("[ERROR] Failed to retrieve Resource Instance: %s, Response: %s", err, response) } // Creating a Resource Catalog Client @@ -148,7 +147,7 @@ func checkSIPlan(d *schema.ResourceData, meta interface{}) error { // Checking the service plan plan, err := rsCatRepo.GetServicePlanName(*instance.ResourcePlanID) if err != nil { - return fmt.Errorf("[ERROR] Failed to retrieve service plan: %s", err) + return fmt.Errorf("[ERROR] Failed to retrieve Service Plan: %s", err) } // Update cache diff --git a/ibm/service/power/data_source_ibm_pi_cloud_connection.go b/ibm/service/power/data_source_ibm_pi_cloud_connection.go index 8e5bc59f83..f877aac73b 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_connection.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_connection.go @@ -7,116 +7,104 @@ import ( "context" "log" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" -) - -const ( - PICloudConnectionId = "cloud_connection_id" - PICloudConnectionName = "name" - PICloudConnectionSpeed = "speed" - PICloudConnectionGlobalRouting = "global_routing" - PICloudConnectionMetered = "metered" - PICloudConnectionStatus = "status" - PICloudConnectionClassicEnabled = "classic_enabled" - PICloudConnectionUserIPAddress = "user_ip_address" - PICloudConnectionIBMIPAddress = "ibm_ip_address" - PICloudConnectionPort = "port" - PICloudConnectionNetworks = "networks" - PICloudConnectionClassicGreDest = "gre_destination_address" - PICloudConnectionClassicGreSource = "gre_source_address" - PICloudConnectionVPCEnabled = "vpc_enabled" - PICloudConnectionVPCCRNs = "vpc_crns" - PICloudConnectionConnectionMode = "connection_mode" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPICloudConnection() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPICloudConnectionRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PICloudConnectionName: { - Type: schema.TypeString, + Arg_CloudConnectionName: { + Description: "The cloud connection name to be used.", Required: true, - Description: "Cloud Connection Name to be used", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PICloudConnectionSpeed: { - Type: schema.TypeInt, - Computed: true, + // Attributes + Attr_ClassicEnabled: { + Computed: true, + Description: "Enable classic endpoint destination.", + Type: schema.TypeBool, }, - PICloudConnectionGlobalRouting: { - Type: schema.TypeBool, - Computed: true, + Attr_ConnectionMode: { + Computed: true, + Description: "Type of service the gateway is attached to.", + Type: schema.TypeString, }, - PICloudConnectionMetered: { - Type: schema.TypeBool, - Computed: true, + Attr_GlobalRouting: { + Computed: true, + Description: "Enable global routing for this cloud connection.", + Type: schema.TypeBool, }, - PICloudConnectionStatus: { - Type: schema.TypeString, - Computed: true, + Attr_GreDestinationAddress: { + Computed: true, + Description: "GRE destination IP address.", + Type: schema.TypeString, }, - PICloudConnectionIBMIPAddress: { - Type: schema.TypeString, - Computed: true, + Attr_GreSourceAddress: { + Computed: true, + Description: "GRE auto-assigned source IP address.", + Type: schema.TypeString, }, - PICloudConnectionUserIPAddress: { - Type: schema.TypeString, - Computed: true, + Attr_IBMIPAddress: { + Computed: true, + Description: "The IBM IP address.", + Type: schema.TypeString, }, - PICloudConnectionPort: { - Type: schema.TypeString, - Computed: true, + Attr_Metered: { + Computed: true, + Description: "Enable metering for this cloud connection.", + Type: schema.TypeBool, }, - PICloudConnectionNetworks: { - Type: schema.TypeSet, + Attr_Networks: { Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, Description: "Set of Networks attached to this cloud connection", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, }, - PICloudConnectionClassicEnabled: { - Type: schema.TypeBool, + Attr_Port: { Computed: true, - Description: "Enable classic endpoint destination", - }, - PICloudConnectionClassicGreDest: { + Description: "Port.", Type: schema.TypeString, + }, + Attr_Speed: { Computed: true, - Description: "GRE destination IP address", + Description: "Speed of the cloud connection (speed in megabits per second)", + Type: schema.TypeInt, }, - PICloudConnectionClassicGreSource: { - Type: schema.TypeString, + Attr_Status: { Computed: true, - Description: "GRE auto-assigned source IP address", + Description: "Link status.", + Type: schema.TypeString, }, - PICloudConnectionVPCEnabled: { - Type: schema.TypeBool, + Attr_UserIPAddress: { Computed: true, - Description: "Enable VPC for this cloud connection", + Description: "User IP address.", + Type: schema.TypeString, }, - PICloudConnectionVPCCRNs: { - Type: schema.TypeSet, + Attr_VPCCRNs: { Computed: true, + Description: "Set of VPCs attached to this cloud connection.", Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Set of VPCs attached to this cloud connection", + Type: schema.TypeSet, }, - PICloudConnectionConnectionMode: { - Type: schema.TypeString, + Attr_VPCEnabled: { Computed: true, - Description: "Type of service the gateway is attached to", + Description: "Enable VPC for this cloud connection.", + Type: schema.TypeBool, }, }, } @@ -128,8 +116,8 @@ func dataSourceIBMPICloudConnectionRead(ctx context.Context, d *schema.ResourceD return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - cloudConnectionName := d.Get(helpers.PICloudConnectionName).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + cloudConnectionName := d.Get(Arg_CloudConnectionName).(string) client := instance.NewIBMPICloudConnectionClient(ctx, sess, cloudInstanceID) // Get API does not work with name for Cloud Connection hence using GetAll (max 2) @@ -165,16 +153,18 @@ func dataSourceIBMPICloudConnectionRead(ctx context.Context, d *schema.ResourceD } d.SetId(*cloudConnection.CloudConnectionID) - d.Set(helpers.PICloudConnectionName, cloudConnection.Name) - d.Set(PICloudConnectionGlobalRouting, cloudConnection.GlobalRouting) - d.Set(PICloudConnectionMetered, cloudConnection.Metered) - d.Set(PICloudConnectionIBMIPAddress, cloudConnection.IbmIPAddress) - d.Set(PICloudConnectionUserIPAddress, cloudConnection.UserIPAddress) - d.Set(PICloudConnectionStatus, cloudConnection.LinkStatus) - d.Set(PICloudConnectionPort, cloudConnection.Port) - d.Set(PICloudConnectionSpeed, cloudConnection.Speed) - d.Set(helpers.PICloudInstanceId, cloudInstanceID) - d.Set(PICloudConnectionConnectionMode, cloudConnection.ConnectionMode) + + d.Set(Arg_CloudInstanceID, cloudInstanceID) + d.Set(Arg_CloudConnectionName, cloudConnection.Name) + + d.Set(Attr_GlobalRouting, cloudConnection.GlobalRouting) + d.Set(Attr_Metered, cloudConnection.Metered) + d.Set(Attr_IBMIPAddress, cloudConnection.IbmIPAddress) + d.Set(Attr_UserIPAddress, cloudConnection.UserIPAddress) + d.Set(Attr_Status, cloudConnection.LinkStatus) + d.Set(Attr_Port, cloudConnection.Port) + d.Set(Attr_Speed, cloudConnection.Speed) + d.Set(Attr_ConnectionMode, cloudConnection.ConnectionMode) if cloudConnection.Networks != nil { networks := make([]string, len(cloudConnection.Networks)) for i, ccNetwork := range cloudConnection.Networks { @@ -182,24 +172,25 @@ func dataSourceIBMPICloudConnectionRead(ctx context.Context, d *schema.ResourceD networks[i] = *ccNetwork.NetworkID } } - d.Set(PICloudConnectionNetworks, networks) + d.Set(Attr_Networks, networks) } if cloudConnection.Classic != nil { - d.Set(PICloudConnectionClassicEnabled, cloudConnection.Classic.Enabled) + d.Set(Attr_ClassicEnabled, cloudConnection.Classic.Enabled) if cloudConnection.Classic.Gre != nil { - d.Set(PICloudConnectionClassicGreDest, cloudConnection.Classic.Gre.DestIPAddress) - d.Set(PICloudConnectionClassicGreSource, cloudConnection.Classic.Gre.SourceIPAddress) + d.Set(Attr_GreDestinationAddress, cloudConnection.Classic.Gre.DestIPAddress) + d.Set(Attr_GreSourceAddress, cloudConnection.Classic.Gre.SourceIPAddress) } } if cloudConnection.Vpc != nil { - d.Set(PICloudConnectionVPCEnabled, cloudConnection.Vpc.Enabled) + d.Set(Attr_VPCEnabled, cloudConnection.Vpc.Enabled) if cloudConnection.Vpc.Vpcs != nil && len(cloudConnection.Vpc.Vpcs) > 0 { vpcCRNs := make([]string, len(cloudConnection.Vpc.Vpcs)) for i, vpc := range cloudConnection.Vpc.Vpcs { vpcCRNs[i] = *vpc.VpcID } - d.Set(PICloudConnectionVPCCRNs, vpcCRNs) + d.Set(Attr_VPCCRNs, vpcCRNs) } } + return nil } diff --git a/ibm/service/power/data_source_ibm_pi_cloud_connection_test.go b/ibm/service/power/data_source_ibm_pi_cloud_connection_test.go index 732b10ad1f..60eda0cb08 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_connection_test.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_connection_test.go @@ -30,9 +30,8 @@ func TestAccIBMPICloudConnectionDataSource_basic(t *testing.T) { func testAccCheckIBMPICloudConnectionDataSourceConfig() string { return fmt.Sprintf(` - data "ibm_pi_cloud_connection" "example" { - pi_cloud_connection_name = "%s" - pi_cloud_instance_id = "%s" - } - `, acc.PiCloudConnectionName, acc.Pi_cloud_instance_id) + data "ibm_pi_cloud_connection" "example" { + pi_cloud_connection_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.PiCloudConnectionName, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_cloud_connections.go b/ibm/service/power/data_source_ibm_pi_cloud_connections.go index ad5a35e434..2e99b1ea97 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_connections.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_connections.go @@ -7,113 +7,118 @@ import ( "context" "log" - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -/* -Datasource to get the list of Cloud Connections in a power instance -*/ - -const PICloudConnections = "connections" - +// Datasource to list Cloud Connections in a power instance func DataSourceIBMPICloudConnections() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPICloudConnectionsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PICloudConnections: { - Type: schema.TypeList, - Computed: true, + + // Attributes + Attr_Connections: { + Computed: true, + Description: "List of all the Cloud Connections.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - PICloudConnectionId: { - Type: schema.TypeString, - Computed: true, + Attr_ClassicEnabled: { + Computed: true, + Description: "Enable classic endpoint destination.", + Type: schema.TypeBool, }, - PICloudConnectionName: { - Type: schema.TypeString, - Computed: true, + Attr_CloudConnectionID: { + Computed: true, + Description: "The unique identifier of the cloud connection.", + Type: schema.TypeString, }, - PICloudConnectionSpeed: { - Type: schema.TypeInt, - Computed: true, + Attr_ConnectionMode: { + Computed: true, + Description: "Type of service the gateway is attached to.", + Type: schema.TypeString, }, - PICloudConnectionGlobalRouting: { - Type: schema.TypeBool, - Computed: true, + Attr_GlobalRouting: { + Computed: true, + Description: "Enable global routing for this cloud connection.", + Type: schema.TypeBool, }, - PICloudConnectionMetered: { - Type: schema.TypeBool, - Computed: true, + Attr_GreDestinationAddress: { + Computed: true, + Description: "GRE destination IP address.", + Type: schema.TypeString, }, - PICloudConnectionStatus: { - Type: schema.TypeString, - Computed: true, + Attr_GreSourceAddress: { + Computed: true, + Description: "GRE auto-assigned source IP address.", + Type: schema.TypeString, }, - PICloudConnectionIBMIPAddress: { - Type: schema.TypeString, - Computed: true, + Attr_IBMIPAddress: { + Computed: true, + Description: "IBM IP address.", + Type: schema.TypeString, }, - PICloudConnectionUserIPAddress: { - Type: schema.TypeString, - Computed: true, + Attr_Metered: { + Computed: true, + Description: "Enable metering for this cloud connection.", + Type: schema.TypeBool, }, - PICloudConnectionPort: { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "Name of the cloud connection.", + Type: schema.TypeString, }, - PICloudConnectionNetworks: { - Type: schema.TypeSet, + Attr_Networks: { Computed: true, + Description: "Set of Networks attached to this cloud connection.", Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Set of Networks attached to this cloud connection", + Type: schema.TypeSet, }, - PICloudConnectionClassicEnabled: { - Type: schema.TypeBool, + Attr_Port: { Computed: true, - Description: "Enable classic endpoint destination", - }, - PICloudConnectionClassicGreDest: { + Description: "Port.", Type: schema.TypeString, + }, + Attr_Speed: { Computed: true, - Description: "GRE destination IP address", + Description: "Speed of the cloud connection (speed in megabits per second).", + Type: schema.TypeInt, }, - PICloudConnectionClassicGreSource: { - Type: schema.TypeString, + Attr_Status: { Computed: true, - Description: "GRE auto-assigned source IP address", + Description: "Link status.", + Type: schema.TypeString, }, - PICloudConnectionVPCEnabled: { - Type: schema.TypeBool, + Attr_UserIPAddress: { Computed: true, - Description: "Enable VPC for this cloud connection", + Description: "User IP address.", + Type: schema.TypeString, }, - PICloudConnectionVPCCRNs: { - Type: schema.TypeSet, + Attr_VPCCRNs: { Computed: true, + Description: "Set of VPCs attached to this cloud connection.", Elem: &schema.Schema{Type: schema.TypeString}, - Description: "Set of VPCs attached to this cloud connection", + Type: schema.TypeSet, }, - PICloudConnectionConnectionMode: { - Type: schema.TypeString, + Attr_VPCEnabled: { Computed: true, - Description: "Type of service the gateway is attached to", + Description: "Enable VPC for this cloud connection.", + Type: schema.TypeBool, }, }, }, + Type: schema.TypeList, }, }, } @@ -125,8 +130,8 @@ func dataSourceIBMPICloudConnectionsRead(ctx context.Context, d *schema.Resource return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - client := st.NewIBMPICloudConnectionClient(ctx, sess, cloudInstanceID) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + client := instance.NewIBMPICloudConnectionClient(ctx, sess, cloudInstanceID) cloudConnections, err := client.GetAll() if err != nil { @@ -137,16 +142,16 @@ func dataSourceIBMPICloudConnectionsRead(ctx context.Context, d *schema.Resource result := make([]map[string]interface{}, 0, len(cloudConnections.CloudConnections)) for _, cloudConnection := range cloudConnections.CloudConnections { cc := map[string]interface{}{ - PICloudConnectionId: *cloudConnection.CloudConnectionID, - PICloudConnectionName: *cloudConnection.Name, - PICloudConnectionGlobalRouting: *cloudConnection.GlobalRouting, - PICloudConnectionMetered: *cloudConnection.Metered, - PICloudConnectionIBMIPAddress: *cloudConnection.IbmIPAddress, - PICloudConnectionUserIPAddress: *cloudConnection.UserIPAddress, - PICloudConnectionStatus: *cloudConnection.LinkStatus, - PICloudConnectionPort: *cloudConnection.Port, - PICloudConnectionSpeed: *cloudConnection.Speed, - PICloudConnectionConnectionMode: cloudConnection.ConnectionMode, + Attr_CloudConnectionID: *cloudConnection.CloudConnectionID, + Attr_ConnectionMode: cloudConnection.ConnectionMode, + Attr_GlobalRouting: *cloudConnection.GlobalRouting, + Attr_IBMIPAddress: *cloudConnection.IbmIPAddress, + Attr_Metered: *cloudConnection.Metered, + Attr_Name: *cloudConnection.Name, + Attr_Port: *cloudConnection.Port, + Attr_Speed: *cloudConnection.Speed, + Attr_Status: *cloudConnection.LinkStatus, + Attr_UserIPAddress: *cloudConnection.UserIPAddress, } if cloudConnection.Networks != nil { @@ -156,23 +161,23 @@ func dataSourceIBMPICloudConnectionsRead(ctx context.Context, d *schema.Resource networks[i] = *ccNetwork.NetworkID } } - cc[PICloudConnectionNetworks] = networks + cc[Attr_Networks] = networks } if cloudConnection.Classic != nil { - cc[PICloudConnectionClassicEnabled] = cloudConnection.Classic.Enabled + cc[Attr_ClassicEnabled] = cloudConnection.Classic.Enabled if cloudConnection.Classic.Gre != nil { - cc[PICloudConnectionClassicGreDest] = cloudConnection.Classic.Gre.DestIPAddress - cc[PICloudConnectionClassicGreSource] = cloudConnection.Classic.Gre.SourceIPAddress + cc[Attr_GreDestinationAddress] = cloudConnection.Classic.Gre.DestIPAddress + cc[Attr_GreSourceAddress] = cloudConnection.Classic.Gre.SourceIPAddress } } if cloudConnection.Vpc != nil { - cc[PICloudConnectionVPCEnabled] = cloudConnection.Vpc.Enabled + cc[Attr_VPCEnabled] = cloudConnection.Vpc.Enabled if cloudConnection.Vpc.Vpcs != nil && len(cloudConnection.Vpc.Vpcs) > 0 { vpcCRNs := make([]string, len(cloudConnection.Vpc.Vpcs)) for i, vpc := range cloudConnection.Vpc.Vpcs { vpcCRNs[i] = *vpc.VpcID } - cc[PICloudConnectionVPCCRNs] = vpcCRNs + cc[Attr_VPCCRNs] = vpcCRNs } } @@ -181,7 +186,7 @@ func dataSourceIBMPICloudConnectionsRead(ctx context.Context, d *schema.Resource var genID, _ = uuid.GenerateUUID() d.SetId(genID) - d.Set(PICloudConnections, result) + d.Set(Attr_Connections, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_cloud_instance.go b/ibm/service/power/data_source_ibm_pi_cloud_instance.go index f9fa86dcd5..4a09bf2768 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_instance.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_instance.go @@ -6,96 +6,111 @@ package power import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPICloudInstance() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPICloudInstanceRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Start of Computed Attributes - "enabled": { - Type: schema.TypeBool, - Computed: true, - }, - "tenant_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "capabilities": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "total_processors_consumed": { - Type: schema.TypeFloat, - Computed: true, + // Attributes + Attr_Capabilities: { + Computed: true, + Description: "Lists the capabilities for this cloud instance.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, }, - "total_instances": { - Type: schema.TypeFloat, - Computed: true, + Attr_Enabled: { + Computed: true, + Description: "Indicates whether the tenant is enabled.", + Type: schema.TypeBool, }, - "total_memory_consumed": { - Type: schema.TypeFloat, - Computed: true, - }, - "total_ssd_storage_consumed": { - Type: schema.TypeFloat, - Computed: true, - }, - "total_standard_storage_consumed": { - Type: schema.TypeFloat, - Computed: true, - }, - "pvm_instances": { - Type: schema.TypeList, - Computed: true, + Attr_PVMInstances: { + Computed: true, + Description: "PVM instances owned by the Cloud Instance.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, + Attr_CreationDate: { + Computed: true, + Description: "Date of PVM instance creation.", + Type: schema.TypeString, }, - "name": { - Type: schema.TypeString, - Computed: true, + Attr_Href: { + Computed: true, + Description: "Link to Cloud Instance resource.", + Type: schema.TypeString, }, - "href": { - Type: schema.TypeString, - Computed: true, + Attr_ID: { + Computed: true, + Description: "PVM Instance ID.", + Type: schema.TypeString, }, - "status": { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "Name of the server.", + Type: schema.TypeString, }, - "systype": { - Type: schema.TypeString, - Computed: true, + Attr_Status: { + Computed: true, + Description: "The status of the instance.", + Type: schema.TypeString, }, - "creation_date": { - Type: schema.TypeString, - Computed: true, + Attr_SysType: { + Computed: true, + Description: "System type used to host the instance.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, + }, + Attr_Region: { + Computed: true, + Description: "The region the cloud instance lives.", + Type: schema.TypeString, + }, + Attr_TenantID: { + Computed: true, + Description: "The tenant ID that owns this cloud instance.", + Type: schema.TypeString, + }, + Attr_TotalInstances: { + Computed: true, + Description: "The count of lpars that belong to this specific cloud instance.", + Type: schema.TypeFloat, + }, + Attr_TotalMemoryConsumed: { + Computed: true, + Description: "The total memory consumed by this service instance.", + Type: schema.TypeFloat, + }, + Attr_TotalProcessorsConsumed: { + Computed: true, + Description: "The total processors consumed by this service instance.", + Type: schema.TypeFloat, + }, + Attr_TotalSSDStorageConsumed: { + Computed: true, + Description: "The total SSD Storage consumed by this service instance.", + Type: schema.TypeFloat, + }, + Attr_TotalStandardStorageConsumed: { + Computed: true, + Description: "The total Standard Storage consumed by this service instance.", + Type: schema.TypeFloat, }, }, } @@ -107,7 +122,7 @@ func dataSourceIBMPICloudInstanceRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) cloud_instance := instance.NewIBMPICloudInstanceClient(ctx, sess, cloudInstanceID) cloud_instance_data, err := cloud_instance.Get(cloudInstanceID) @@ -116,35 +131,33 @@ func dataSourceIBMPICloudInstanceRead(ctx context.Context, d *schema.ResourceDat } d.SetId(*cloud_instance_data.CloudInstanceID) - d.Set("tenant_id", (cloud_instance_data.TenantID)) - d.Set("enabled", cloud_instance_data.Enabled) - d.Set("region", cloud_instance_data.Region) - d.Set("capabilities", cloud_instance_data.Capabilities) - d.Set("pvm_instances", flattenpvminstances(cloud_instance_data.PvmInstances)) - d.Set("total_ssd_storage_consumed", cloud_instance_data.Usage.StorageSSD) - d.Set("total_instances", cloud_instance_data.Usage.Instances) - d.Set("total_standard_storage_consumed", cloud_instance_data.Usage.StorageStandard) - d.Set("total_processors_consumed", cloud_instance_data.Usage.Processors) - d.Set("total_memory_consumed", cloud_instance_data.Usage.Memory) - return nil + d.Set(Attr_Capabilities, cloud_instance_data.Capabilities) + d.Set(Attr_Enabled, cloud_instance_data.Enabled) + d.Set(Attr_PVMInstances, flattenpvminstances(cloud_instance_data.PvmInstances)) + d.Set(Attr_Region, cloud_instance_data.Region) + d.Set(Attr_TenantID, (cloud_instance_data.TenantID)) + d.Set(Attr_TotalInstances, cloud_instance_data.Usage.Instances) + d.Set(Attr_TotalMemoryConsumed, cloud_instance_data.Usage.Memory) + d.Set(Attr_TotalProcessorsConsumed, cloud_instance_data.Usage.Processors) + d.Set(Attr_TotalSSDStorageConsumed, cloud_instance_data.Usage.StorageSSD) + d.Set(Attr_TotalStandardStorageConsumed, cloud_instance_data.Usage.StorageStandard) + return nil } func flattenpvminstances(list []*models.PVMInstanceReference) []map[string]interface{} { pvms := make([]map[string]interface{}, 0) for _, lpars := range list { - l := map[string]interface{}{ - "id": *lpars.PvmInstanceID, - "name": *lpars.ServerName, - "href": *lpars.Href, - "status": *lpars.Status, - "systype": lpars.SysType, - "creation_date": lpars.CreationDate.String(), + Attr_CreationDate: lpars.CreationDate.String(), + Attr_ID: *lpars.PvmInstanceID, + Attr_Href: *lpars.Href, + Attr_Name: *lpars.ServerName, + Attr_Status: *lpars.Status, + Attr_SysType: lpars.SysType, } pvms = append(pvms, l) - } return pvms } diff --git a/ibm/service/power/data_source_ibm_pi_cloud_instance_test.go b/ibm/service/power/data_source_ibm_pi_cloud_instance_test.go index 1d80d59337..d2f0b69db9 100644 --- a/ibm/service/power/data_source_ibm_pi_cloud_instance_test.go +++ b/ibm/service/power/data_source_ibm_pi_cloud_instance_test.go @@ -13,7 +13,6 @@ import ( ) func TestAccIBMPICloudInstanceDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -29,10 +28,8 @@ func TestAccIBMPICloudInstanceDataSource_basic(t *testing.T) { } func testAccCheckIBMPICloudInstanceDataSourceConfig() string { - return fmt.Sprintf(` - -data "ibm_pi_cloud_instance" "testacc_ds_cloud_instance" { - pi_cloud_instance_id = "%s" -}`, acc.Pi_cloud_instance_id) - + return fmt.Sprintf(` + data "ibm_pi_cloud_instance" "testacc_ds_cloud_instance" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_instance_console_languages.go b/ibm/service/power/data_source_ibm_pi_instance_console_languages.go index e5cb5b344d..d34dad28fb 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_console_languages.go +++ b/ibm/service/power/data_source_ibm_pi_instance_console_languages.go @@ -6,59 +6,52 @@ package power import ( "context" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - ConsoleLanguages = "console_languages" - ConsoleLanguageCode = "code" - ConsoleLanguageDesc = "language" -) - -/* -Datasource to get the list of available console languages for an instance -*/ +// Datasource to list available console languages for an instance func DataSourceIBMPIInstanceConsoleLanguages() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIInstanceConsoleLanguagesRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PIInstanceName: { - Type: schema.TypeString, + Arg_InstanceName: { + Description: "The unique identifier or name of the instance.", Required: true, - Description: "The unique identifier or name of the instance", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - ConsoleLanguages: { - Type: schema.TypeList, - Computed: true, + // Attributes + Attr_ConsoleLanguages: { + Computed: true, + Description: "List of all the Console Languages.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - ConsoleLanguageCode: { - Type: schema.TypeString, + Attr_Code: { Computed: true, - Description: "language code", - }, - ConsoleLanguageDesc: { + Description: "Language code.", Type: schema.TypeString, + }, + Attr_Language: { Computed: true, - Description: "language description", + Description: "Language description.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } @@ -70,8 +63,8 @@ func dataSourceIBMPIInstanceConsoleLanguagesRead(ctx context.Context, d *schema. return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - instanceName := d.Get(helpers.PIInstanceName).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + instanceName := d.Get(Arg_InstanceName).(string) client := instance.NewIBMPIInstanceClient(ctx, sess, cloudInstanceID) languages, err := client.GetConsoleLanguages(instanceName) @@ -86,12 +79,12 @@ func dataSourceIBMPIInstanceConsoleLanguagesRead(ctx context.Context, d *schema. result := make([]map[string]interface{}, 0, len(languages.ConsoleLanguages)) for _, language := range languages.ConsoleLanguages { l := map[string]interface{}{ - ConsoleLanguageCode: *language.Code, - ConsoleLanguageDesc: language.Language, + Attr_Code: *language.Code, + Attr_Language: language.Language, } result = append(result, l) } - d.Set(ConsoleLanguages, result) + d.Set(Attr_ConsoleLanguages, result) } return nil diff --git a/ibm/service/power/data_source_ibm_pi_instance_console_languages_test.go b/ibm/service/power/data_source_ibm_pi_instance_console_languages_test.go index acadb056e0..9d52aa963f 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_console_languages_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_console_languages_test.go @@ -30,8 +30,8 @@ func TestAccIBMPIInstanceConsoleLanguages(t *testing.T) { func testAccCheckIBMPIInstanceConsoleLanguagesConfig() string { return fmt.Sprintf(` - data "ibm_pi_console_languages" "example" { - pi_cloud_instance_id = "%s" - pi_instance_name = "%s" - }`, acc.Pi_cloud_instance_id, acc.Pi_instance_name) + data "ibm_pi_console_languages" "example" { + pi_cloud_instance_id = "%s" + pi_instance_name = "%s" + }`, acc.Pi_cloud_instance_id, acc.Pi_instance_name) } diff --git a/ibm/service/power/data_source_ibm_pi_instance_ip.go b/ibm/service/power/data_source_ibm_pi_instance_ip.go index 4d57021d36..60cac39d42 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_ip.go +++ b/ibm/service/power/data_source_ibm_pi_instance_ip.go @@ -10,7 +10,6 @@ import ( "strconv" "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -18,51 +17,59 @@ import ( ) func DataSourceIBMPIInstanceIP() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPIInstancesIPRead, Schema: map[string]*schema.Schema{ - helpers.PIInstanceName: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "Server Name to be used for pvminstances", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PICloudInstanceId: { - Type: schema.TypeString, + Arg_InstanceName: { + Description: "The unique identifier or name of the instance.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PINetworkName: { + Arg_NetworkName: { + Description: "The subnet that the instance belongs to.", Type: schema.TypeString, Required: true, ValidateFunc: validation.NoZeroValues, }, - // Computed attributes - "ip": { - Type: schema.TypeString, - Computed: true, + // Attributes + Attr_ExternalIP: { + Computed: true, + Description: "The external IP of the network that is attached to this instance.", + Type: schema.TypeString, }, - "ipoctet": { - Type: schema.TypeString, - Computed: true, + Attr_IP: { + Computed: true, + Description: "The IP address that is attached to this instance from the subnet.", + Type: schema.TypeString, }, - "macaddress": { - Type: schema.TypeString, - Computed: true, + Attr_IPOctet: { + Computed: true, + Description: "The IP octet of the network that is attached to this instance.", + Type: schema.TypeString, }, - "network_id": { - Type: schema.TypeString, - Computed: true, + Attr_MacAddress: { + Computed: true, + Description: "The MAC address of the network that is attached to this instance.", + Type: schema.TypeString, }, - "type": { - Type: schema.TypeString, - Computed: true, + Attr_NetworkID: { + Computed: true, + Description: "ID of the network.", + Type: schema.TypeString, }, - "external_ip": { - Type: schema.TypeString, - Computed: true, + Attr_Type: { + Computed: true, + Description: "The type of the network that is attached to this instance.", + Type: schema.TypeString, }, }, } @@ -74,11 +81,11 @@ func dataSourceIBMPIInstancesIPRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - networkName := d.Get(helpers.PINetworkName).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + networkName := d.Get(Arg_NetworkName).(string) powerC := instance.NewIBMPIInstanceClient(ctx, sess, cloudInstanceID) - powervmdata, err := powerC.Get(d.Get(helpers.PIInstanceName).(string)) + powervmdata, err := powerC.Get(d.Get(Arg_InstanceName).(string)) if err != nil { return diag.FromErr(err) } @@ -87,17 +94,16 @@ func dataSourceIBMPIInstancesIPRead(ctx context.Context, d *schema.ResourceData, if network.NetworkName == networkName { log.Printf("Printing the ip %s", network.IPAddress) d.SetId(network.NetworkID) - d.Set("ip", network.IPAddress) - d.Set("network_id", network.NetworkID) - d.Set("macaddress", network.MacAddress) - d.Set("external_ip", network.ExternalIP) - d.Set("type", network.Type) + d.Set(Attr_ExternalIP, network.ExternalIP) + d.Set(Attr_IP, network.IPAddress) + d.Set(Attr_MacAddress, network.MacAddress) + d.Set(Attr_NetworkID, network.NetworkID) + d.Set(Attr_Type, network.Type) IPObject := net.ParseIP(network.IPAddress).To4() if len(IPObject) > 0 { - d.Set("ipoctet", strconv.Itoa(int(IPObject[3]))) + d.Set(Attr_IPOctet, strconv.Itoa(int(IPObject[3]))) } - return nil } } diff --git a/ibm/service/power/data_source_ibm_pi_instance_ip_test.go b/ibm/service/power/data_source_ibm_pi_instance_ip_test.go index 36362776a8..b8963ee639 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_ip_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_ip_test.go @@ -29,10 +29,9 @@ func TestAccIBMPIInstanceIPDataSource_basic(t *testing.T) { func testAccCheckIBMPIInstanceIPDataSourceConfig() string { return fmt.Sprintf(` - data "ibm_pi_instance_ip" "testacc_ds_instance_ip" { - pi_network_name = "%[1]s" - pi_instance_name = "%[2]s" - pi_cloud_instance_id = "%[3]s" - } - `, acc.Pi_network_name, acc.Pi_instance_name, acc.Pi_cloud_instance_id) + data "ibm_pi_instance_ip" "testacc_ds_instance_ip" { + pi_network_name = "%[1]s" + pi_instance_name = "%[2]s" + pi_cloud_instance_id = "%[3]s" + }`, acc.Pi_network_name, acc.Pi_instance_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_instance_volumes.go b/ibm/service/power/data_source_ibm_pi_instance_volumes.go index 611ef5cf62..0bd9b2a09e 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_volumes.go +++ b/ibm/service/power/data_source_ibm_pi_instance_volumes.go @@ -6,82 +6,92 @@ package power import ( "context" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPIInstanceVolumes() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPIInstanceVolumesRead, Schema: map[string]*schema.Schema{ - helpers.PIInstanceName: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "Instance Name to be used for pvminstances", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PICloudInstanceId: { - Type: schema.TypeString, + Arg_InstanceName: { + Description: "The unique identifier or name of the instance.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - //Computed Attributes - "boot_volume_id": { - Type: schema.TypeString, - Computed: true, + // Attribute + Attr_BootVolumeID: { + Computed: true, + Description: "The unique identifier of the boot volume.", + Type: schema.TypeString, }, - "instance_volumes": { - Type: schema.TypeList, - Computed: true, + Attr_InstanceVolumes: { + Computed: true, + Description: "List of volumes attached to instance.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, + Attr_Bootable: { + Computed: true, + Description: "Indicates if the volume is boot capable.", + Type: schema.TypeBool, }, - "size": { - Type: schema.TypeFloat, - Computed: true, + Attr_Href: { + Computed: true, + Description: "The hyper link of the volume.", + Type: schema.TypeString, }, - "href": { - Type: schema.TypeString, - Computed: true, + Attr_ID: { + Computed: true, + Description: "The unique identifier of the volume.", + Type: schema.TypeString, }, - "name": { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "The name of the volume.", + Type: schema.TypeString, }, - "state": { - Type: schema.TypeString, - Computed: true, + Attr_Pool: { + Computed: true, + Description: "Volume pool, name of storage pool where the volume is located.", + Type: schema.TypeString, }, - "type": { - Type: schema.TypeString, - Computed: true, + Attr_Shareable: { + Computed: true, + Description: "Indicates if the volume is shareable between VMs.", + Type: schema.TypeBool, }, - "pool": { - Type: schema.TypeString, - Computed: true, + Attr_Size: { + Computed: true, + Description: "The size of this volume in gigabytes.", + Type: schema.TypeFloat, }, - "shareable": { - Type: schema.TypeBool, - Computed: true, + Attr_State: { + Computed: true, + Description: "The state of the volume.", + Type: schema.TypeString, }, - "bootable": { - Type: schema.TypeBool, - Computed: true, + Attr_Type: { + Computed: true, + Description: "The disk type that is used for this volume.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } @@ -93,38 +103,36 @@ func dataSourceIBMPIInstanceVolumesRead(ctx context.Context, d *schema.ResourceD return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) volumeC := instance.NewIBMPIVolumeClient(ctx, sess, cloudInstanceID) - volumedata, err := volumeC.GetAllInstanceVolumes(d.Get(helpers.PIInstanceName).(string)) + volumedata, err := volumeC.GetAllInstanceVolumes(d.Get(Arg_InstanceName).(string)) if err != nil { return diag.FromErr(err) } var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set("boot_volume_id", *volumedata.Volumes[0].VolumeID) - d.Set("instance_volumes", flattenVolumesInstances(volumedata.Volumes)) + d.Set(Attr_BootVolumeID, *volumedata.Volumes[0].VolumeID) + d.Set(Attr_InstanceVolumes, flattenVolumesInstances(volumedata.Volumes)) return nil - } func flattenVolumesInstances(list []*models.VolumeReference) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ - "id": *i.VolumeID, - "state": *i.State, - "href": *i.Href, - "name": *i.Name, - "size": *i.Size, - "type": *i.DiskType, - "pool": i.VolumePool, - "shareable": *i.Shareable, - "bootable": *i.Bootable, + Attr_Bootable: *i.Bootable, + Attr_Href: *i.Href, + Attr_ID: *i.VolumeID, + Attr_Name: *i.Name, + Attr_Pool: i.VolumePool, + Attr_Shareable: *i.Shareable, + Attr_Size: *i.Size, + Attr_State: *i.State, + Attr_Type: *i.DiskType, } - result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_instance_volumes_test.go b/ibm/service/power/data_source_ibm_pi_instance_volumes_test.go index 211c17b9e7..10543c5a66 100644 --- a/ibm/service/power/data_source_ibm_pi_instance_volumes_test.go +++ b/ibm/service/power/data_source_ibm_pi_instance_volumes_test.go @@ -31,9 +31,8 @@ func TestAccIBMPIVolumesDataSource_basic(t *testing.T) { func testAccCheckIBMPIVolumesDataSourceConfig(name string) string { return fmt.Sprintf(` -data "ibm_pi_instance_volumes" "testacc_ds_volumes" { - pi_instance_name = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_instance_name, acc.Pi_cloud_instance_id) - + data "ibm_pi_instance_volumes" "testacc_ds_volumes" { + pi_instance_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_instance_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_key.go b/ibm/service/power/data_source_ibm_pi_key.go index 70ab96ba7c..b7f928828f 100644 --- a/ibm/service/power/data_source_ibm_pi_key.go +++ b/ibm/service/power/data_source_ibm_pi_key.go @@ -6,79 +6,65 @@ package power import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPIKey() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPIKeyRead, Schema: map[string]*schema.Schema{ - // Arguments - Arg_KeyName: { - Type: schema.TypeString, + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "SSH key name for a pcloud tenant", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - Arg_CloudInstanceID: { - Type: schema.TypeString, + Arg_KeyName: { Required: true, + Description: "User defined name for the SSH key.", ValidateFunc: validation.NoZeroValues, + Type: schema.TypeString, }, // Attributes - Attr_KeyCreationDate: { - Type: schema.TypeString, + Attr_CreationDate: { Computed: true, - Description: "Date of sshkey creation", - }, - Attr_Key: { + Description: "Date of SSH Key creation.", Type: schema.TypeString, - Sensitive: true, - Computed: true, - Description: "SSH RSA key", }, - "sshkey": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - Deprecated: "This field is deprecated, use ssh_key instead", + Attr_SSHKey: { + Computed: true, + Description: "SSH RSA key.", + Sensitive: true, + Type: schema.TypeString, }, }, } } func dataSourceIBMPIKeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - - // session sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - // arguments - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - // get key sshkeyC := instance.NewIBMPIKeyClient(ctx, sess, cloudInstanceID) sshkeydata, err := sshkeyC.Get(d.Get(helpers.PIKeyName).(string)) if err != nil { return diag.FromErr(err) } - // set attributes d.SetId(*sshkeydata.Name) - d.Set(Attr_KeyCreationDate, sshkeydata.CreationDate.String()) - d.Set(Attr_Key, sshkeydata.SSHKey) - d.Set("sshkey", sshkeydata.SSHKey) // TODO: deprecated, to remove + d.Set(Attr_CreationDate, sshkeydata.CreationDate.String()) + d.Set(Attr_SSHKey, sshkeydata.SSHKey) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_key_test.go b/ibm/service/power/data_source_ibm_pi_key_test.go index 184b74a0ec..a4fb373339 100644 --- a/ibm/service/power/data_source_ibm_pi_key_test.go +++ b/ibm/service/power/data_source_ibm_pi_key_test.go @@ -13,7 +13,6 @@ import ( ) func TestAccIBMPIKeyDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -30,9 +29,8 @@ func TestAccIBMPIKeyDataSource_basic(t *testing.T) { func testAccCheckIBMPIKeyDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_key" "testacc_ds_key" { - pi_key_name = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_key_name, acc.Pi_cloud_instance_id) - + data "ibm_pi_key" "testacc_ds_key" { + pi_key_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_key_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_keys.go b/ibm/service/power/data_source_ibm_pi_keys.go index 0f78a9ab4e..91bf669e1f 100644 --- a/ibm/service/power/data_source_ibm_pi_keys.go +++ b/ibm/service/power/data_source_ibm_pi_keys.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" ) @@ -21,56 +20,51 @@ func DataSourceIBMPIKeys() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIKeysRead, Schema: map[string]*schema.Schema{ - // Arguments Arg_CloudInstanceID: { - Type: schema.TypeString, + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "PI cloud instance ID", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, // Attributes Attr_Keys: { - Type: schema.TypeList, Computed: true, - Description: "SSH Keys", + Description: "List of all the SSH keys.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Attr_KeyName: { - Type: schema.TypeString, + Attr_CreationDate: { Computed: true, - Description: "User defined name for the SSH key", - }, - Attr_Key: { + Description: "Date of SSH key creation.", Type: schema.TypeString, - Computed: true, - Description: "SSH RSA key", }, - Attr_KeyCreationDate: { + Attr_Name: { + Computed: true, + Description: "User defined name for the SSH key.", Type: schema.TypeString, + }, + Attr_SSHKey: { Computed: true, - Description: "Date of SSH key creation", + Description: "SSH RSA key.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } } func dataSourceIBMPIKeysRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - - // session sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - // arguments - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - // get keys client := st.NewIBMPIKeyClient(ctx, sess, cloudInstanceID) sshKeys, err := client.GetAll() if err != nil { @@ -78,13 +72,12 @@ func dataSourceIBMPIKeysRead(ctx context.Context, d *schema.ResourceData, meta i return diag.FromErr(err) } - // set attributes result := make([]map[string]interface{}, 0, len(sshKeys.SSHKeys)) for _, sshKey := range sshKeys.SSHKeys { key := map[string]interface{}{ - Attr_KeyName: sshKey.Name, - Attr_Key: sshKey.SSHKey, - Attr_KeyCreationDate: sshKey.CreationDate.String(), + Attr_CreationDate: sshKey.CreationDate.String(), + Attr_Name: sshKey.Name, + Attr_SSHKey: sshKey.SSHKey, } result = append(result, key) } diff --git a/ibm/service/power/data_source_ibm_pi_keys_test.go b/ibm/service/power/data_source_ibm_pi_keys_test.go index f45dfc7fce..66bb6c4596 100644 --- a/ibm/service/power/data_source_ibm_pi_keys_test.go +++ b/ibm/service/power/data_source_ibm_pi_keys_test.go @@ -31,6 +31,5 @@ func testAccCheckIBMPIKeysDataSourceConfig() string { return fmt.Sprintf(` data "ibm_pi_keys" "test" { pi_cloud_instance_id = "%s" - } - `, acc.Pi_cloud_instance_id) + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_network.go b/ibm/service/power/data_source_ibm_pi_network.go index 3a04453ca7..2835514f90 100644 --- a/ibm/service/power/data_source_ibm_pi_network.go +++ b/ibm/service/power/data_source_ibm_pi_network.go @@ -4,86 +4,97 @@ package power import ( - //"fmt" - "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPINetwork() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPINetworkRead, Schema: map[string]*schema.Schema{ - helpers.PINetworkName: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "Network Name to be used for pvminstances", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PICloudInstanceId: { - Type: schema.TypeString, + Arg_NetworkName: { + Description: "The unique identifier or name of a network.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - "cidr": { - Type: schema.TypeString, - Computed: true, + // Attributes + Attr_AccessConfig: { + Computed: true, + Description: "The network communication configuration option of the network (for satellite locations only).", + Type: schema.TypeString, }, - "type": { - Type: schema.TypeString, - Computed: true, + Attr_AvailableIPCount: { + Computed: true, + Description: "The total number of IP addresses that you have in your network.", + Type: schema.TypeFloat, }, - "vlan_id": { - Type: schema.TypeInt, - Computed: true, + Attr_CIDR: { + Computed: true, + Description: "The CIDR of the network.", + Type: schema.TypeString, }, - "gateway": { - Type: schema.TypeString, - Computed: true, + Attr_DNS: { + Computed: true, + Description: "The DNS Servers for the network.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, }, - "available_ip_count": { - Type: schema.TypeFloat, - Computed: true, + Attr_Gateway: { + Computed: true, + Description: "The network gateway that is attached to your network.", + Type: schema.TypeString, }, - "used_ip_count": { - Type: schema.TypeFloat, - Computed: true, + Attr_Jumbo: { + Computed: true, + Deprecated: "This field is deprecated, use mtu instead.", + Description: "MTU Jumbo option of the network (for multi-zone locations only).", + Type: schema.TypeBool, }, - "used_ip_percent": { - Type: schema.TypeFloat, - Computed: true, + Attr_MTU: { + Computed: true, + Description: "Maximum Transmission Unit option of the network.", + Type: schema.TypeInt, }, - "name": { - Type: schema.TypeString, - Computed: true, - Deprecated: "This value is deprecated in favor of" + helpers.PINetworkName, + Attr_Name: { + Computed: true, + Deprecated: "This field is deprecated, use pi_network_name instead.", + Description: "The unique identifier or name of a network.", + Type: schema.TypeString, }, - "dns": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Attr_Type: { + Computed: true, + Description: "The type of network.", + Type: schema.TypeString, }, - "jumbo": { - Type: schema.TypeBool, - Computed: true, + Attr_UsedIPCount: { + Computed: true, + Description: "The number of used IP addresses.", + Type: schema.TypeFloat, }, - "mtu": { - Type: schema.TypeInt, - Computed: true, + Attr_UsedIPPercent: { + Computed: true, + Description: "The percentage of IP addresses used.", + Type: schema.TypeFloat, }, - "access_config": { - Type: schema.TypeString, - Computed: true, + Attr_VLanID: { + Computed: true, + Description: "The VLAN ID that the network is connected to.", + Type: schema.TypeInt, }, }, } @@ -95,7 +106,7 @@ func dataSourceIBMPINetworkRead(ctx context.Context, d *schema.ResourceData, met return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) networkC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) networkdata, err := networkC.Get(d.Get(helpers.PINetworkName).(string)) @@ -104,35 +115,34 @@ func dataSourceIBMPINetworkRead(ctx context.Context, d *schema.ResourceData, met } d.SetId(*networkdata.NetworkID) + d.Set(Attr_AccessConfig, networkdata.AccessConfig) + if networkdata.IPAddressMetrics.Available != nil { + d.Set(Attr_AvailableIPCount, networkdata.IPAddressMetrics.Available) + } if networkdata.Cidr != nil { - d.Set("cidr", networkdata.Cidr) + d.Set(Attr_CIDR, networkdata.Cidr) } - if networkdata.Type != nil { - d.Set("type", networkdata.Type) + if len(networkdata.DNSServers) > 0 { + d.Set(Attr_DNS, networkdata.DNSServers) } - d.Set("gateway", networkdata.Gateway) - if networkdata.VlanID != nil { - d.Set("vlan_id", networkdata.VlanID) + d.Set(Attr_Gateway, networkdata.Gateway) + d.Set(Attr_Jumbo, networkdata.Jumbo) + d.Set(Attr_MTU, networkdata.Mtu) + if networkdata.Name != nil { + d.Set(Attr_Name, networkdata.Name) } - if networkdata.IPAddressMetrics.Available != nil { - d.Set("available_ip_count", networkdata.IPAddressMetrics.Available) + if networkdata.Type != nil { + d.Set(Attr_Type, networkdata.Type) } if networkdata.IPAddressMetrics.Used != nil { - d.Set("used_ip_count", networkdata.IPAddressMetrics.Used) + d.Set(Attr_UsedIPCount, networkdata.IPAddressMetrics.Used) } if networkdata.IPAddressMetrics.Utilization != nil { - d.Set("used_ip_percent", networkdata.IPAddressMetrics.Utilization) - } - if networkdata.Name != nil { - d.Set("name", networkdata.Name) + d.Set(Attr_UsedIPPercent, networkdata.IPAddressMetrics.Utilization) } - if len(networkdata.DNSServers) > 0 { - d.Set("dns", networkdata.DNSServers) + if networkdata.VlanID != nil { + d.Set(Attr_VLanID, networkdata.VlanID) } - d.Set("jumbo", networkdata.Jumbo) - d.Set("mtu", networkdata.Mtu) - d.Set("access_config", networkdata.AccessConfig) return nil - } diff --git a/ibm/service/power/data_source_ibm_pi_network_port.go b/ibm/service/power/data_source_ibm_pi_network_port.go index e98a9c3b1b..5fef77e7af 100644 --- a/ibm/service/power/data_source_ibm_pi_network_port.go +++ b/ibm/service/power/data_source_ibm_pi_network_port.go @@ -7,69 +7,74 @@ import ( "context" "log" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - //"fmt" - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPINetworkPort() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPINetworkPortsRead, Schema: map[string]*schema.Schema{ - helpers.PINetworkName: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "Network Name to be used for pvminstances", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - helpers.PICloudInstanceId: { - Type: schema.TypeString, + Arg_NetworkName: { + Description: "The unique identifier or name of a network.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - "network_ports": { + // Attributes + Attr_NetworkPorts: { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "ipaddress": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Attr_Description: { + Computed: true, + Description: "The description for the network port.", + Type: schema.TypeString, }, - "macaddress": { - Type: schema.TypeString, - Computed: true, + Attr_Href: { + Computed: true, + Description: "Network port href.", + Type: schema.TypeString, }, - "portid": { - Type: schema.TypeString, - Computed: true, + Attr_IPAddress: { + Computed: true, + Description: "The IP address of the port.", + Type: schema.TypeString, }, - "status": { - Type: schema.TypeString, - Computed: true, + Attr_MacAddress: { + Computed: true, + Description: "The MAC address of the port.", + Type: schema.TypeString, }, - "href": { - Type: schema.TypeString, - Computed: true, + Attr_PortID: { + Computed: true, + Description: "The ID of the port.", + Type: schema.TypeString, }, - "description": { - Type: schema.TypeString, - Required: true, + Attr_PublicIP: { + Computed: true, + Description: "The public IP associated with the port.", + Type: schema.TypeString, }, - "public_ip": { - Type: schema.TypeString, - Computed: true, + Attr_Status: { + Computed: true, + Description: "The status of the port.", + Type: schema.TypeString, }, }, }, @@ -84,7 +89,8 @@ func dataSourceIBMPINetworkPortsRead(ctx context.Context, d *schema.ResourceData return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + networkportC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) networkportdata, err := networkportC.GetAllPorts(d.Get(helpers.PINetworkName).(string)) if err != nil { @@ -93,10 +99,9 @@ func dataSourceIBMPINetworkPortsRead(ctx context.Context, d *schema.ResourceData var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set("network_ports", flattenNetworkPorts(networkportdata.Ports)) + d.Set(Attr_NetworkPorts, flattenNetworkPorts(networkportdata.Ports)) return nil - } func flattenNetworkPorts(networkPorts []*models.NetworkPort) interface{} { @@ -104,14 +109,14 @@ func flattenNetworkPorts(networkPorts []*models.NetworkPort) interface{} { log.Printf("the number of ports is %d", len(networkPorts)) for _, i := range networkPorts { l := map[string]interface{}{ - "portid": *i.PortID, - "status": *i.Status, - "href": i.Href, - "ipaddress": *i.IPAddress, - "macaddress": *i.MacAddress, - "public_ip": i.ExternalIP, + Attr_Description: i.Description, + Attr_Href: i.Href, + Attr_IPAddress: *i.IPAddress, + Attr_MacAddress: *i.MacAddress, + Attr_PortID: *i.PortID, + Attr_PublicIP: i.ExternalIP, + Attr_Status: *i.Status, } - result = append(result, l) } return result diff --git a/ibm/service/power/data_source_ibm_pi_network_port_test.go b/ibm/service/power/data_source_ibm_pi_network_port_test.go new file mode 100644 index 0000000000..24a386af2d --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_network_port_test.go @@ -0,0 +1,36 @@ +// Copyright IBM Corp. 2017, 2021 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIBMPINetworkPortDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPINetworkPortDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_network_port.testacc_ds_network_port", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMPINetworkPortDataSourceConfig() string { + return fmt.Sprintf(` + data "ibm_pi_network_port" "testacc_ds_network_port" { + pi_network_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_network_name, acc.Pi_cloud_instance_id) +} diff --git a/ibm/service/power/data_source_ibm_pi_network_test.go b/ibm/service/power/data_source_ibm_pi_network_test.go index 873681869a..c4c09fa58c 100644 --- a/ibm/service/power/data_source_ibm_pi_network_test.go +++ b/ibm/service/power/data_source_ibm_pi_network_test.go @@ -29,9 +29,8 @@ func TestAccIBMPINetworkDataSource_basic(t *testing.T) { func testAccCheckIBMPINetworkDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_network" "testacc_ds_network" { - pi_network_name = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_network_name, acc.Pi_cloud_instance_id) - + data "ibm_pi_network" "testacc_ds_network" { + pi_network_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_network_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_placement_group.go b/ibm/service/power/data_source_ibm_pi_placement_group.go index 9792aa331d..6b90a44f98 100644 --- a/ibm/service/power/data_source_ibm_pi_placement_group.go +++ b/ibm/service/power/data_source_ibm_pi_placement_group.go @@ -7,55 +7,56 @@ import ( "context" "log" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPIPlacementGroup() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPIPlacementGroupRead, Schema: map[string]*schema.Schema{ - helpers.PIPlacementGroupName: { - Type: schema.TypeString, - Required: true, - }, - - "policy": { - Type: schema.TypeString, - Computed: true, - }, - - helpers.PICloudInstanceId: { + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_PlacementGroupName: { + Description: "The name of the placement group.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - PIPlacementGroupMembers: { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, + // Attribute + Attr_Members: { + Computed: true, + Description: "List of server instances IDs that are members of the placement group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, + Attr_Policy: { + Computed: true, + Description: "The value of the group's affinity policy. Valid values are affinity and anti-affinity.", + Type: schema.TypeString, }, }, } } func dataSourceIBMPIPlacementGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - placementGroupName := d.Get(helpers.PIPlacementGroupName).(string) - client := st.NewIBMPIPlacementGroupClient(ctx, sess, cloudInstanceID) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + placementGroupName := d.Get(Arg_PlacementGroupName).(string) + client := instance.NewIBMPIPlacementGroupClient(ctx, sess, cloudInstanceID) response, err := client.Get(placementGroupName) if err != nil { @@ -64,8 +65,8 @@ func dataSourceIBMPIPlacementGroupRead(ctx context.Context, d *schema.ResourceDa } d.SetId(*response.ID) - d.Set("policy", response.Policy) - d.Set(PIPlacementGroupMembers, response.Members) + d.Set(Attr_Members, response.Members) + d.Set(Attr_Policy, response.Policy) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_placement_group_test.go b/ibm/service/power/data_source_ibm_pi_placement_group_test.go index 0738266a98..05478cc72a 100644 --- a/ibm/service/power/data_source_ibm_pi_placement_group_test.go +++ b/ibm/service/power/data_source_ibm_pi_placement_group_test.go @@ -28,9 +28,8 @@ func TestAccIBMPIPlacementGroupDataSource_basic(t *testing.T) { func testAccCheckIBMPIPlacementGroupDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_placement_group" "testacc_ds_placement_group" { - pi_placement_group_name = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_placement_group_name, acc.Pi_cloud_instance_id) - + data "ibm_pi_placement_group" "testacc_ds_placement_group" { + pi_placement_group_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_placement_group_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_placement_groups.go b/ibm/service/power/data_source_ibm_pi_placement_groups.go index 345f2bf05c..6b25ed99ba 100644 --- a/ibm/service/power/data_source_ibm_pi_placement_groups.go +++ b/ibm/service/power/data_source_ibm_pi_placement_groups.go @@ -7,52 +7,52 @@ import ( "context" "log" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" -) - -const ( - PIPlacementGroups = "placement_groups" ) func DataSourceIBMPIPlacementGroups() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIPlacementGroupsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "PI cloud instance ID", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PIPlacementGroups: { + + // Attributes + Attr_PlacementGroups: { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, + Attr_ID: { + Computed: true, + Description: "The ID of the placement group.", + Type: schema.TypeString, }, - "name": { - Type: schema.TypeString, - Computed: true, + Attr_Members: { + Computed: true, + Description: "List of server instances IDs that are members of the placement group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, }, - PIPlacementGroupMembers: { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, + Attr_Name: { + Computed: true, + Description: "User defined name for the placement group.", + Type: schema.TypeString, }, - "policy": { - Type: schema.TypeString, - Computed: true, + Attr_Policy: { + Computed: true, + Description: "The value of the group's affinity policy. Valid values are affinity and anti-affinity.", + Type: schema.TypeString, }, }, }, @@ -67,9 +67,9 @@ func dataSourceIBMPIPlacementGroupsRead(ctx context.Context, d *schema.ResourceD return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - client := st.NewIBMPIPlacementGroupClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPIPlacementGroupClient(ctx, sess, cloudInstanceID) groups, err := client.GetAll() if err != nil { log.Printf("[ERROR] get all placement groups failed %v", err) @@ -79,17 +79,17 @@ func dataSourceIBMPIPlacementGroupsRead(ctx context.Context, d *schema.ResourceD result := make([]map[string]interface{}, 0, len(groups.PlacementGroups)) for _, placementGroup := range groups.PlacementGroups { key := map[string]interface{}{ - "id": placementGroup.ID, - "name": placementGroup.Name, - PIPlacementGroupMembers: placementGroup.Members, - "policy": placementGroup.Policy, + Attr_ID: placementGroup.ID, + Attr_Members: placementGroup.Members, + Attr_Name: placementGroup.Name, + Attr_Policy: placementGroup.Policy, } result = append(result, key) } var genID, _ = uuid.GenerateUUID() d.SetId(genID) - d.Set(PIPlacementGroups, result) + d.Set(Attr_PlacementGroups, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_placement_groups_test.go b/ibm/service/power/data_source_ibm_pi_placement_groups_test.go index 2f0e8131c8..d520f3f1e0 100644 --- a/ibm/service/power/data_source_ibm_pi_placement_groups_test.go +++ b/ibm/service/power/data_source_ibm_pi_placement_groups_test.go @@ -30,6 +30,5 @@ func testAccCheckIBMPIPlacementGrousDataSourceConfig() string { return fmt.Sprintf(` data "ibm_pi_placement_groups" "test" { pi_cloud_instance_id = "%s" - } - `, acc.Pi_cloud_instance_id) + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_public_network.go b/ibm/service/power/data_source_ibm_pi_public_network.go index ca5cc380e2..ef8195c964 100644 --- a/ibm/service/power/data_source_ibm_pi_public_network.go +++ b/ibm/service/power/data_source_ibm_pi_public_network.go @@ -4,41 +4,42 @@ package power import ( - //"fmt" "context" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" ) func DataSourceIBMPIPublicNetwork() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPIPublicNetworkRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - "name": { - Type: schema.TypeString, - Computed: true, + // Attributes + Attr_Name: { + Computed: true, + Description: "The name of the network.", + Type: schema.TypeString, }, - "type": { - Type: schema.TypeString, - Computed: true, + Attr_Type: { + Computed: true, + Description: "The type of VLAN that the network is connected to.", + Type: schema.TypeString, }, - "vlan_id": { - Type: schema.TypeInt, - Computed: true, + Attr_VLanID: { + Computed: true, + Description: "The ID of the VLAN that the network is connected to.", + Type: schema.TypeInt, }, }, } @@ -50,7 +51,7 @@ func dataSourceIBMPIPublicNetworkRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) networkC := instance.NewIBMPINetworkClient(ctx, sess, cloudInstanceID) networkdata, err := networkC.GetAllPublic() @@ -62,14 +63,14 @@ func dataSourceIBMPIPublicNetworkRead(ctx context.Context, d *schema.ResourceDat } d.SetId(*networkdata.Networks[0].NetworkID) - if networkdata.Networks[0].Type != nil { - d.Set("type", networkdata.Networks[0].Type) - } if networkdata.Networks[0].Name != nil { - d.Set("name", networkdata.Networks[0].Name) + d.Set(Attr_Name, networkdata.Networks[0].Name) + } + if networkdata.Networks[0].Type != nil { + d.Set(Attr_Type, networkdata.Networks[0].Type) } if networkdata.Networks[0].VlanID != nil { - d.Set("vlan_id", networkdata.Networks[0].VlanID) + d.Set(Attr_VLanID, networkdata.Networks[0].VlanID) } return nil diff --git a/ibm/service/power/data_source_ibm_pi_public_network_test.go b/ibm/service/power/data_source_ibm_pi_public_network_test.go index f8c3eb1c5b..3ac39c13a6 100644 --- a/ibm/service/power/data_source_ibm_pi_public_network_test.go +++ b/ibm/service/power/data_source_ibm_pi_public_network_test.go @@ -29,8 +29,7 @@ func TestAccIBMPIPublicNetworkDataSource_basic(t *testing.T) { func testAccCheckIBMPIPublicNetworkDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_public_network" "testacc_ds_public_network" { - pi_cloud_instance_id = "%s" -}`, acc.Pi_cloud_instance_id) - + data "ibm_pi_public_network" "testacc_ds_public_network" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_sap_profile.go b/ibm/service/power/data_source_ibm_pi_sap_profile.go index 43510987d1..1f53d1ad72 100644 --- a/ibm/service/power/data_source_ibm_pi_sap_profile.go +++ b/ibm/service/power/data_source_ibm_pi_sap_profile.go @@ -7,49 +7,51 @@ import ( "context" "log" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" ) func DataSourceIBMPISAPProfile() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPISAPProfileRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - PISAPInstanceProfileID: { - Type: schema.TypeString, - Required: true, - Description: "SAP Profile ID", + Arg_SAPProfileID: { + Description: "SAP Profile ID", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PISAPProfileCertified: { - Type: schema.TypeBool, + + // Attributes + Attr_Certified: { Computed: true, - Description: "Has certification been performed on profile", + Description: "Has certification been performed on profile.", + Type: schema.TypeBool, }, - PISAPProfileCores: { - Type: schema.TypeInt, + Attr_Cores: { Computed: true, - Description: "Amount of cores", - }, - PISAPProfileMemory: { + Description: "Amount of cores.", Type: schema.TypeInt, + }, + Attr_Memory: { Computed: true, - Description: "Amount of memory (in GB)", + Description: "Amount of memory (in GB).", + Type: schema.TypeInt, }, - PISAPProfileType: { - Type: schema.TypeString, + Attr_Type: { Computed: true, - Description: "Type of profile", + Description: "Type of profile.", + Type: schema.TypeString, }, }, } @@ -61,8 +63,8 @@ func dataSourceIBMPISAPProfileRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - profileID := d.Get(PISAPInstanceProfileID).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + profileID := d.Get(Arg_SAPProfileID).(string) client := instance.NewIBMPISAPInstanceClient(ctx, sess, cloudInstanceID) sapProfile, err := client.GetSAPProfile(profileID) @@ -72,10 +74,10 @@ func dataSourceIBMPISAPProfileRead(ctx context.Context, d *schema.ResourceData, } d.SetId(*sapProfile.ProfileID) - d.Set(PISAPProfileCertified, *sapProfile.Certified) - d.Set(PISAPProfileCores, *sapProfile.Cores) - d.Set(PISAPProfileMemory, *sapProfile.Memory) - d.Set(PISAPProfileType, *sapProfile.Type) + d.Set(Attr_Certified, *sapProfile.Certified) + d.Set(Attr_Cores, *sapProfile.Cores) + d.Set(Attr_Memory, *sapProfile.Memory) + d.Set(Attr_Type, *sapProfile.Type) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_sap_profiles.go b/ibm/service/power/data_source_ibm_pi_sap_profiles.go index 0b314791ca..a3c3c5f5a6 100644 --- a/ibm/service/power/data_source_ibm_pi_sap_profiles.go +++ b/ibm/service/power/data_source_ibm_pi_sap_profiles.go @@ -7,58 +7,60 @@ import ( "context" "log" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" ) func DataSourceIBMPISAPProfiles() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPISAPProfilesRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PISAPProfiles: { - Type: schema.TypeList, - Computed: true, + + // Attributes + Attr_Profiles: { + Computed: true, + Description: "List of all the SAP Profiles.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - PISAPProfileCertified: { - Type: schema.TypeBool, + Attr_Certified: { Computed: true, - Description: "Has certification been performed on profile", + Description: "Has certification been performed on profile.", + Type: schema.TypeBool, }, - PISAPProfileCores: { - Type: schema.TypeInt, + Attr_Cores: { Computed: true, - Description: "Amount of cores", - }, - PISAPProfileMemory: { + Description: "Amount of cores.", Type: schema.TypeInt, - Computed: true, - Description: "Amount of memory (in GB)", }, - PISAPProfileID: { - Type: schema.TypeString, + Attr_Memory: { Computed: true, - Description: "SAP Profile ID", + Description: "Amount of memory (in GB).", + Type: schema.TypeInt, }, - PISAPProfileType: { + Attr_ProfileID: { + Computed: true, + Description: "SAP Profile ID.", Type: schema.TypeString, + }, + Attr_Type: { Computed: true, - Description: "Type of profile", + Description: "Type of profile.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } @@ -70,7 +72,7 @@ func dataSourceIBMPISAPProfilesRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) client := instance.NewIBMPISAPInstanceClient(ctx, sess, cloudInstanceID) sapProfiles, err := client.GetAllSAPProfiles(cloudInstanceID) @@ -82,18 +84,18 @@ func dataSourceIBMPISAPProfilesRead(ctx context.Context, d *schema.ResourceData, result := make([]map[string]interface{}, 0, len(sapProfiles.Profiles)) for _, sapProfile := range sapProfiles.Profiles { profile := map[string]interface{}{ - PISAPProfileCertified: *sapProfile.Certified, - PISAPProfileCores: *sapProfile.Cores, - PISAPProfileMemory: *sapProfile.Memory, - PISAPProfileID: *sapProfile.ProfileID, - PISAPProfileType: *sapProfile.Type, + Attr_Certified: *sapProfile.Certified, + Attr_Cores: *sapProfile.Cores, + Attr_Memory: *sapProfile.Memory, + Attr_ProfileID: *sapProfile.ProfileID, + Attr_Type: *sapProfile.Type, } result = append(result, profile) } var genID, _ = uuid.GenerateUUID() d.SetId(genID) - d.Set(PISAPProfiles, result) + d.Set(Attr_Profiles, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go index e3fcc1be34..b6c04c4f58 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pool.go @@ -6,143 +6,137 @@ package power import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPISharedProcessorPool() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPISharedProcessorPoolRead, Schema: map[string]*schema.Schema{ - Arg_SharedProcessorPoolID: { - Type: schema.TypeString, - Required: true, - }, - + // Arguments Arg_CloudInstanceID: { - Type: schema.TypeString, - Required: true, - Description: "PI cloud instance ID", + Description: "The GUID of the service instance associated with an account.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, }, - - Attr_SharedProcessorPoolName: { - Type: schema.TypeString, - Computed: true, - Description: "Name of the shared processor pool", - }, - - Attr_SharedProcessorPoolHostID: { - Type: schema.TypeInt, - Computed: true, - Description: "The host ID where the shared processor pool resides", - }, - - Attr_SharedProcessorPoolReservedCores: { - Type: schema.TypeInt, - Computed: true, - Description: "The amount of reserved cores for the shared processor pool", + Arg_SharedProcessorPoolID: { + Description: "The ID of the shared processor pool.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, }, - Attr_SharedProcessorPoolAvailableCores: { - Type: schema.TypeFloat, + // Attributes + Attr_AllocatedCores: { Computed: true, - Description: "Shared processor pool available cores", - }, - - Attr_SharedProcessorPoolAllocatedCores: { + Description: "The allocated cores in the shared processor pool.", Type: schema.TypeFloat, - Computed: true, - Description: "Shared processor pool allocated cores", }, - - Attr_SharedProcessorPoolStatus: { - Type: schema.TypeString, + Attr_AvailableCores: { Computed: true, - Description: "The status of the shared processor pool", + Description: "The available cores in the shared processor pool.", + Type: schema.TypeFloat, }, - - Attr_SharedProcessorPoolStatusDetail: { - Type: schema.TypeString, + Attr_HostID: { Computed: true, - Description: "The status details of the shared processor pool", + Description: "The host ID where the shared processor pool resides.", + Type: schema.TypeInt, }, - - Attr_SharedProcessorPoolInstances: { - Type: schema.TypeList, + Attr_Instances: { Computed: true, - Description: "List of server instances deployed in the shared processor pool", + Description: "List of server instances deployed in the shared processor pool.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Attr_SharedProcessorPoolInstanceCpus: { - Type: schema.TypeInt, - Optional: true, + Attr_AvailabilityZone: { Computed: true, - Description: "The amount of cpus for the server instance", - }, - Attr_SharedProcessorPoolInstanceUncapped: { - Type: schema.TypeBool, + Description: "Availability zone for the server instances.", Optional: true, - Computed: true, - Description: "Identifies if uncapped or not", - }, - Attr_SharedProcessorPoolInstanceAvailabilityZone: { Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Availability zone for the server instances", }, - Attr_SharedProcessorPoolInstanceId: { - Type: schema.TypeString, - Optional: true, + Attr_CPUs: { Computed: true, - Description: "The server instance ID", - }, - Attr_SharedProcessorPoolInstanceMemory: { - Type: schema.TypeInt, + Description: "The amount of cpus for the server instance.", Optional: true, - Computed: true, - Description: "The amount of memory for the server instance", + Type: schema.TypeInt, }, - Attr_SharedProcessorPoolInstanceName: { - Type: schema.TypeString, + Attr_ID: { + Computed: true, + Description: "The server instance ID.", Optional: true, + Type: schema.TypeString, + }, + Attr_Memory: { Computed: true, - Description: "The server instance name", + Description: "The amount of memory for the server instance.", + Optional: true, + Type: schema.TypeInt, }, - Attr_SharedProcessorPoolInstanceStatus: { - Type: schema.TypeString, + Attr_Name: { + Computed: true, + Description: "The server instance name.", Optional: true, + Type: schema.TypeString, + }, + Attr_Status: { Computed: true, - Description: "Status of the server", + Description: "Status of the instance.", + Optional: true, + Type: schema.TypeString, }, - Attr_SharedProcessorPoolInstanceVcpus: { - Type: schema.TypeFloat, + Attr_Uncapped: { + Computed: true, + Description: "Identifies if uncapped or not.", Optional: true, + Type: schema.TypeBool, + }, + Attr_VCPUs: { Computed: true, - Description: "The amout of vcpus for the server instance", + Description: "The amout of vcpus for the server instance.", + Optional: true, + Type: schema.TypeFloat, }, }, }, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the shared processor pool.", + Type: schema.TypeString, + }, + Attr_ReservedCores: { + Computed: true, + Description: "The amount of reserved cores for the shared processor pool.", + Type: schema.TypeInt, + }, + Attr_Status: { + Computed: true, + Description: "The status of the shared processor pool.", + Type: schema.TypeString, + }, + Attr_StatusDetail: { + Computed: true, + Description: "The status details of the shared processor pool.", + Type: schema.TypeString, }, }, } } func dataSourceIBMPISharedProcessorPoolRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) poolID := d.Get(Arg_SharedProcessorPoolID).(string) - client := st.NewIBMPISharedProcessorPoolClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPISharedProcessorPoolClient(ctx, sess, cloudInstanceID) response, err := client.Get(poolID) if err != nil || response == nil { @@ -150,33 +144,33 @@ func dataSourceIBMPISharedProcessorPoolRead(ctx context.Context, d *schema.Resou } d.SetId(*response.SharedProcessorPool.ID) - d.Set(Attr_SharedProcessorPoolName, response.SharedProcessorPool.Name) - d.Set(Attr_SharedProcessorPoolReservedCores, response.SharedProcessorPool.ReservedCores) - d.Set(Attr_SharedProcessorPoolAllocatedCores, response.SharedProcessorPool.AllocatedCores) - d.Set(Attr_SharedProcessorPoolAvailableCores, response.SharedProcessorPool.AvailableCores) - d.Set(Attr_SharedProcessorPoolHostID, response.SharedProcessorPool.HostID) - d.Set(Attr_SharedProcessorPoolStatus, response.SharedProcessorPool.Status) - d.Set(Attr_SharedProcessorPoolStatusDetail, response.SharedProcessorPool.StatusDetail) + d.Set(Attr_AllocatedCores, response.SharedProcessorPool.AllocatedCores) + d.Set(Attr_AvailableCores, response.SharedProcessorPool.AvailableCores) + d.Set(Attr_HostID, response.SharedProcessorPool.HostID) + d.Set(Attr_Name, response.SharedProcessorPool.Name) + d.Set(Attr_ReservedCores, response.SharedProcessorPool.ReservedCores) + d.Set(Attr_Status, response.SharedProcessorPool.Status) + d.Set(Attr_StatusDetail, response.SharedProcessorPool.StatusDetail) serversMap := []map[string]interface{}{} if response.Servers != nil { for _, s := range response.Servers { if s != nil { v := map[string]interface{}{ - Attr_SharedProcessorPoolInstanceCpus: s.Cpus, - Attr_SharedProcessorPoolInstanceUncapped: s.Uncapped, - Attr_SharedProcessorPoolInstanceAvailabilityZone: s.AvailabilityZone, - Attr_SharedProcessorPoolInstanceId: s.ID, - Attr_SharedProcessorPoolInstanceMemory: s.Memory, - Attr_SharedProcessorPoolInstanceName: s.Name, - Attr_SharedProcessorPoolInstanceStatus: s.Status, - Attr_SharedProcessorPoolInstanceVcpus: s.Vcpus, + Attr_AvailabilityZone: s.AvailabilityZone, + Attr_CPUs: s.Cpus, + Attr_ID: s.ID, + Attr_Memory: s.Memory, + Attr_Name: s.Name, + Attr_Status: s.Status, + Attr_Uncapped: s.Uncapped, + Attr_VCPUs: s.Vcpus, } serversMap = append(serversMap, v) } } } - d.Set(Attr_SharedProcessorPoolInstances, serversMap) + d.Set(Attr_Instances, serversMap) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pool_test.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pool_test.go index a39f509265..17df24c366 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pool_test.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pool_test.go @@ -28,9 +28,8 @@ func TestAccIBMPIPISharedProcessorPoolDataSource_basic(t *testing.T) { func testAccCheckIBMPIPISharedProcessorPoolDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_shared_processor_pool" "test_pool" { - pi_shared_processor_pool_id = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_shared_processor_pool_id, acc.Pi_cloud_instance_id) - + data "ibm_pi_shared_processor_pool" "test_pool" { + pi_shared_processor_pool_id = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_shared_processor_pool_id, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go index e14611d878..74ccf37d22 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pools.go @@ -6,70 +6,75 @@ package power import ( "context" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" -) - -const ( - PISharedProcessorPools = "shared_processor_pools" ) func DataSourceIBMPISharedProcessorPools() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPISharedProcessorPoolsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "PI cloud instance ID", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PISharedProcessorPools: { - Type: schema.TypeList, - Computed: true, + + // Attributes + Attr_SharedProcessorPools: { + Computed: true, + Description: "List of all the shared processor pools.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Attr_SharedProcessorPoolID: { - Type: schema.TypeString, - Computed: true, + Attr_AllocatedCores: { + Computed: true, + Description: "The allocated cores in the shared processor pool.", + Type: schema.TypeFloat, }, - Attr_SharedProcessorPoolAllocatedCores: { - Type: schema.TypeFloat, - Computed: true, + Attr_AvailableCores: { + Computed: true, + Description: "The available cores in the shared processor pool.", + Type: schema.TypeInt, }, - Attr_SharedProcessorPoolAvailableCores: { - Type: schema.TypeInt, - Computed: true, + Attr_HostID: { + Computed: true, + Description: "The host ID where the shared processor pool resides.", + Type: schema.TypeInt, }, - Attr_SharedProcessorPoolName: { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "The name of the shared processor pool.", + Type: schema.TypeString, }, - Attr_SharedProcessorPoolReservedCores: { - Type: schema.TypeInt, - Computed: true, + Attr_ReservedCores: { + Computed: true, + Description: "The amount of reserved cores for the shared processor pool.", + Type: schema.TypeInt, }, - Attr_SharedProcessorPoolHostID: { - Type: schema.TypeInt, - Computed: true, + Attr_SharedProcessorPoolID: { + Computed: true, + Description: "The shared processor pool's unique ID.", + Type: schema.TypeString, }, - Attr_SharedProcessorPoolStatus: { - Type: schema.TypeString, - Computed: true, + Attr_Status: { + Computed: true, + Description: "The status of the shared processor pool.", + Type: schema.TypeString, }, - Attr_SharedProcessorPoolStatusDetail: { - Type: schema.TypeString, - Computed: true, + Attr_StatusDetail: { + Computed: true, + Description: "The status details of the shared processor pool.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } @@ -81,9 +86,9 @@ func dataSourceIBMPISharedProcessorPoolsRead(ctx context.Context, d *schema.Reso return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - client := st.NewIBMPISharedProcessorPoolClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPISharedProcessorPoolClient(ctx, sess, cloudInstanceID) pools, err := client.GetAll() if err != nil || pools == nil { return diag.Errorf("error fetching shared processor pools: %v", err) @@ -92,21 +97,21 @@ func dataSourceIBMPISharedProcessorPoolsRead(ctx context.Context, d *schema.Reso result := make([]map[string]interface{}, 0, len(pools.SharedProcessorPools)) for _, pool := range pools.SharedProcessorPools { key := map[string]interface{}{ - Attr_SharedProcessorPoolID: *pool.ID, - Attr_SharedProcessorPoolName: *pool.Name, - Attr_SharedProcessorPoolAllocatedCores: *pool.AllocatedCores, - Attr_SharedProcessorPoolAvailableCores: *pool.AvailableCores, - Attr_SharedProcessorPoolReservedCores: *pool.ReservedCores, - Attr_SharedProcessorPoolHostID: pool.HostID, - Attr_SharedProcessorPoolStatus: pool.Status, - Attr_SharedProcessorPoolStatusDetail: pool.StatusDetail, + Attr_AllocatedCores: *pool.AllocatedCores, + Attr_AvailableCores: *pool.AvailableCores, + Attr_HostID: pool.HostID, + Attr_Name: *pool.Name, + Attr_ReservedCores: *pool.ReservedCores, + Attr_SharedProcessorPoolID: *pool.ID, + Attr_Status: pool.Status, + Attr_StatusDetail: pool.StatusDetail, } result = append(result, key) } var genID, _ = uuid.GenerateUUID() d.SetId(genID) - d.Set(PISharedProcessorPools, result) + d.Set(Attr_SharedProcessorPools, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_shared_processor_pools_test.go b/ibm/service/power/data_source_ibm_pi_shared_processor_pools_test.go index fcbb9c4dbb..1d5367bf85 100644 --- a/ibm/service/power/data_source_ibm_pi_shared_processor_pools_test.go +++ b/ibm/service/power/data_source_ibm_pi_shared_processor_pools_test.go @@ -30,6 +30,5 @@ func testAccCheckIBMPISharedProcessorPoolsDataSourceConfig() string { return fmt.Sprintf(` data "ibm_pi_shared_processor_pools" "test" { pi_cloud_instance_id = "%s" - } - `, acc.Pi_cloud_instance_id) + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_snapshot.go b/ibm/service/power/data_source_ibm_pi_snapshot.go index b852f1024f..ad454cf521 100644 --- a/ibm/service/power/data_source_ibm_pi_snapshot.go +++ b/ibm/service/power/data_source_ibm_pi_snapshot.go @@ -8,7 +8,6 @@ import ( "log" "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" @@ -18,65 +17,73 @@ import ( ) func DataSourceIBMPISnapshot() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPISnapshotRead, Schema: map[string]*schema.Schema{ - - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - - helpers.PIInstanceName: { - Type: schema.TypeString, + Arg_InstanceName: { + Description: "The unique identifier or name of the instance.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - //Computed Attributes - "pvm_snapshots": { + // Attributes + Attr_PVMSnapshots: { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, + Attr_Action: { + Computed: true, + Description: "Action performed on the instance snapshot.", + Type: schema.TypeString, }, - "name": { - Type: schema.TypeString, - Computed: true, + Attr_CreationDate: { + Computed: true, + Description: "Date of snapshot creation.", + Type: schema.TypeString, }, - "percent_complete": { - Type: schema.TypeInt, - Computed: true, + Attr_Description: { + Computed: true, + Description: "The description of the snapshot.", + Type: schema.TypeString, }, - - "description": { - Type: schema.TypeString, - Computed: true, + Attr_ID: { + Computed: true, + Description: "The unique identifier of the Power Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "action": { - Type: schema.TypeString, - Computed: true, + Attr_LastUpdatedDate: { + Computed: true, + Description: "Date of last update.", + Type: schema.TypeString, }, - "status": { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "The name of the Power Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "creation_date": { - Type: schema.TypeString, - Computed: true, + Attr_PercentComplete: { + Computed: true, + Description: "The snapshot completion percentage.", + Type: schema.TypeInt, }, - "last_updated_date": { - Type: schema.TypeString, - Computed: true, + Attr_Status: { + Computed: true, + Description: "The status of the Power Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "volume_snapshots": { - Type: schema.TypeMap, - Computed: true, + Attr_VolumeSnapshots: { + Computed: true, + Description: "A map of volume snapshots included in the Power Virtual Machine instance snapshot.", + Type: schema.TypeMap, }, }, }, @@ -86,14 +93,13 @@ func DataSourceIBMPISnapshot() *schema.Resource { } func dataSourceIBMPISnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - powerinstancename := d.Get(helpers.PIInstanceName).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + powerinstancename := d.Get(Arg_InstanceName).(string) snapshot := instance.NewIBMPIInstanceClient(ctx, sess, cloudInstanceID) snapshotData, err := snapshot.GetSnapShotVM(powerinstancename) @@ -103,30 +109,27 @@ func dataSourceIBMPISnapshotRead(ctx context.Context, d *schema.ResourceData, me var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set("pvm_snapshots", flattenPVMSnapshotInstances(snapshotData.Snapshots)) + d.Set(Attr_PVMSnapshots, flattenPVMSnapshotInstances(snapshotData.Snapshots)) return nil - } func flattenPVMSnapshotInstances(list []*models.Snapshot) []map[string]interface{} { - log.Printf("Calling the flattensnapshotinstances call with list %d", len(list)) + log.Printf("Calling the flattenPVMSnapshotInstances call with list %d", len(list)) result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ - "id": *i.SnapshotID, - "name": *i.Name, - "description": i.Description, - "creation_date": i.CreationDate.String(), - "last_updated_date": i.LastUpdateDate.String(), - "action": i.Action, - "percent_complete": i.PercentComplete, - "status": i.Status, - "volume_snapshots": i.VolumeSnapshots, + Attr_Action: i.Action, + Attr_CreationDate: i.CreationDate.String(), + Attr_Description: i.Description, + Attr_ID: *i.SnapshotID, + Attr_LastUpdatedDate: i.LastUpdateDate.String(), + Attr_Name: *i.Name, + Attr_PercentComplete: i.PercentComplete, + Attr_Status: i.Status, + Attr_VolumeSnapshots: i.VolumeSnapshots, } - result = append(result, l) } - return result } diff --git a/ibm/service/power/data_source_ibm_pi_snapshot_test.go b/ibm/service/power/data_source_ibm_pi_snapshot_test.go index 3636fbf048..e5c502c1cb 100644 --- a/ibm/service/power/data_source_ibm_pi_snapshot_test.go +++ b/ibm/service/power/data_source_ibm_pi_snapshot_test.go @@ -29,10 +29,8 @@ func TestAccIBMPISnapshotDataSource_basic(t *testing.T) { func testAccCheckIBMPISnapshotDataSourceConfig() string { return fmt.Sprintf(` - -data "ibm_pi_pvm_snapshots" "testacc_pi_snapshots" { - pi_instance_name = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_instance_name, acc.Pi_cloud_instance_id) - + data "ibm_pi_pvm_snapshots" "testacc_pi_snapshots" { + pi_instance_name = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_instance_name, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_snapshots.go b/ibm/service/power/data_source_ibm_pi_snapshots.go index 273c0fcef8..6e2ea76e34 100644 --- a/ibm/service/power/data_source_ibm_pi_snapshots.go +++ b/ibm/service/power/data_source_ibm_pi_snapshots.go @@ -8,7 +8,6 @@ import ( "log" "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" @@ -18,60 +17,71 @@ import ( ) func DataSourceIBMPISnapshots() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPISnapshotsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - //Computed Attributes - "instance_snapshots": { - Type: schema.TypeList, - Computed: true, + // Attributes + Attr_InstanceSnapshots: { + Computed: true, + Description: "List of Power Virtual Machine instance snapshots within the given cloud instance.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, + Attr_Action: { + Computed: true, + Description: "Action performed on the instance snapshot.", + Type: schema.TypeString, }, - "name": { - Type: schema.TypeString, - Computed: true, + Attr_CreationDate: { + Computed: true, + Description: "Date of snapshot creation.", + Type: schema.TypeString, }, - "percent_complete": { - Type: schema.TypeInt, - Computed: true, + Attr_Description: { + Computed: true, + Description: "The description of the snapshot.", + Type: schema.TypeString, }, - "description": { - Type: schema.TypeString, - Computed: true, + Attr_ID: { + Computed: true, + Description: "The unique identifier of the Power Systems Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "action": { - Type: schema.TypeString, - Computed: true, + Attr_LastUpdatedDate: { + Computed: true, + Description: "Date of last update.", + Type: schema.TypeString, }, - "status": { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "The name of the Power Systems Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "creation_date": { - Type: schema.TypeString, - Computed: true, + Attr_PercentComplete: { + Computed: true, + Description: "The snapshot completion percentage.", + Type: schema.TypeInt, }, - "last_updated_date": { - Type: schema.TypeString, - Computed: true, + Attr_Status: { + Computed: true, + Description: "The status of the Power Virtual Machine instance snapshot.", + Type: schema.TypeString, }, - "volume_snapshots": { - Type: schema.TypeMap, - Computed: true, + Attr_VolumeSnapshots: { + Computed: true, + Description: "A map of volume snapshots included in the Power Virtual Machine instance snapshot.", + Type: schema.TypeMap, }, }, }, + Type: schema.TypeList, }, }, } @@ -83,7 +93,7 @@ func dataSourceIBMPISnapshotsRead(ctx context.Context, d *schema.ResourceData, m return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) snapshot := instance.NewIBMPISnapshotClient(ctx, sess, cloudInstanceID) snapshotData, err := snapshot.GetAll() if err != nil { @@ -92,29 +102,27 @@ func dataSourceIBMPISnapshotsRead(ctx context.Context, d *schema.ResourceData, m var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set("instance_snapshots", flattenSnapshotsInstances(snapshotData.Snapshots)) + d.Set(Attr_InstanceSnapshots, flattenSnapshotsInstances(snapshotData.Snapshots)) return nil } func flattenSnapshotsInstances(list []*models.Snapshot) []map[string]interface{} { - log.Printf("Calling the flattensnapshotsinstances call with list %d", len(list)) + log.Printf("Calling the flattenSnapshotsInstances call with list %d", len(list)) result := make([]map[string]interface{}, 0, len(list)) for _, i := range list { l := map[string]interface{}{ - "id": *i.SnapshotID, - "name": *i.Name, - "description": i.Description, - "creation_date": i.CreationDate.String(), - "last_updated_date": i.LastUpdateDate.String(), - "action": i.Action, - "percent_complete": i.PercentComplete, - "status": i.Status, - "volume_snapshots": i.VolumeSnapshots, + Attr_Action: i.Action, + Attr_CreationDate: i.CreationDate.String(), + Attr_Description: i.Description, + Attr_ID: *i.SnapshotID, + Attr_LastUpdatedDate: i.LastUpdateDate.String(), + Attr_Name: *i.Name, + Attr_PercentComplete: i.PercentComplete, + Attr_Status: i.Status, + Attr_VolumeSnapshots: i.VolumeSnapshots, } - result = append(result, l) } - return result } diff --git a/ibm/service/power/data_source_ibm_pi_snapshots_test.go b/ibm/service/power/data_source_ibm_pi_snapshots_test.go index be8d385d8a..8494e0c9c7 100644 --- a/ibm/service/power/data_source_ibm_pi_snapshots_test.go +++ b/ibm/service/power/data_source_ibm_pi_snapshots_test.go @@ -13,7 +13,6 @@ import ( ) func TestAccIBMPISnapshotsDataSource_basic(t *testing.T) { - resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, Providers: acc.TestAccProviders, @@ -30,9 +29,7 @@ func TestAccIBMPISnapshotsDataSource_basic(t *testing.T) { func testAccCheckIBMPISnapshotsDataSourceConfig() string { return fmt.Sprintf(` - -data "ibm_pi_instance_snapshots" "testacc_ds_snapshots" { - pi_cloud_instance_id = "%s" -}`, acc.Pi_cloud_instance_id) - + data "ibm_pi_instance_snapshots" "testacc_ds_snapshots" { + pi_cloud_instance_id = "%s" + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_spp_placement_group.go b/ibm/service/power/data_source_ibm_pi_spp_placement_group.go index 5b6f632ae3..2985750318 100644 --- a/ibm/service/power/data_source_ibm_pi_spp_placement_group.go +++ b/ibm/service/power/data_source_ibm_pi_spp_placement_group.go @@ -6,60 +6,61 @@ package power import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMPISPPPlacementGroup() *schema.Resource { - return &schema.Resource{ ReadContext: dataSourceIBMPISPPPlacementGroupRead, Schema: map[string]*schema.Schema{ - Arg_SPPPlacementGroupID: { - Type: schema.TypeString, - Required: true, - }, - - Attr_SPPPlacementGroupName: { - Type: schema.TypeString, - Computed: true, - }, - - Attr_SPPPlacementGroupPolicy: { - Type: schema.TypeString, - Computed: true, - }, - - helpers.PICloudInstanceId: { + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", + Required: true, Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + Arg_SPPPlacementGroupID: { + Description: "The ID of the shared processor pool placement group.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - Attr_SPPPlacementGroupMembers: { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, + // Attributes + Attr_Members: { + Computed: true, + Description: "List of shared processor pool IDs that are members of the placement group.", + Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + }, + Attr_Name: { + Computed: true, + Description: "The name of the shared processor pool placement group.", + Type: schema.TypeString, + }, + Attr_Policy: { + Computed: true, + Description: "The value of the group's affinity policy. Valid values are affinity and anti-affinity.", + Type: schema.TypeString, }, }, } } func dataSourceIBMPISPPPlacementGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) placementGroupID := d.Get(Arg_SPPPlacementGroupID).(string) - client := st.NewIBMPISPPPlacementGroupClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPISPPPlacementGroupClient(ctx, sess, cloudInstanceID) response, err := client.Get(placementGroupID) if err != nil || response == nil { @@ -67,9 +68,9 @@ func dataSourceIBMPISPPPlacementGroupRead(ctx context.Context, d *schema.Resourc } d.SetId(*response.ID) - d.Set(Attr_SPPPlacementGroupName, response.Name) - d.Set(Attr_SPPPlacementGroupPolicy, response.Policy) - d.Set(Attr_SPPPlacementGroupMembers, response.MemberSharedProcessorPools) + d.Set(Attr_Members, response.MemberSharedProcessorPools) + d.Set(Attr_Name, response.Name) + d.Set(Attr_Policy, response.Policy) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_spp_placement_group_test.go b/ibm/service/power/data_source_ibm_pi_spp_placement_group_test.go index a5f5d9d187..515a72b089 100644 --- a/ibm/service/power/data_source_ibm_pi_spp_placement_group_test.go +++ b/ibm/service/power/data_source_ibm_pi_spp_placement_group_test.go @@ -28,9 +28,8 @@ func TestAccIBMPISPPPlacementGroupDataSource_basic(t *testing.T) { func testAccCheckIBMPISPPPlacementGroupDataSourceConfig() string { return fmt.Sprintf(` -data "ibm_pi_spp_placement_group" "testacc_ds_spp_placement_group" { - pi_spp_placement_group_id = "%s" - pi_cloud_instance_id = "%s" -}`, acc.Pi_spp_placement_group_id, acc.Pi_cloud_instance_id) - + data "ibm_pi_spp_placement_group" "testacc_ds_spp_placement_group" { + pi_spp_placement_group_id = "%s" + pi_cloud_instance_id = "%s" + }`, acc.Pi_spp_placement_group_id, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_spp_placement_groups.go b/ibm/service/power/data_source_ibm_pi_spp_placement_groups.go index 8ac4d5d678..b75ab43e42 100644 --- a/ibm/service/power/data_source_ibm_pi_spp_placement_groups.go +++ b/ibm/service/power/data_source_ibm_pi_spp_placement_groups.go @@ -6,55 +6,56 @@ package power import ( "context" + "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - st "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" -) - -const ( - PISPPPlacementGroups = "spp_placement_groups" ) func DataSourceIBMPISPPPlacementGroups() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPISPPPlacementGroupsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, - Description: "PI cloud instance ID", + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - PISPPPlacementGroups: { - Type: schema.TypeList, - Computed: true, + + // Attributes + Attr_SPPPlacementGroups: { + Computed: true, + Description: "List of all the shared processor pool placement groups.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Attr_SPPPlacementGroupID: { - Type: schema.TypeString, - Computed: true, + Attr_Members: { + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The list of shared processor pool IDs that are members of the shared processor pool placement group.", + Type: schema.TypeList, }, - Attr_SPPPlacementGroupName: { - Type: schema.TypeString, - Computed: true, + Attr_Name: { + Computed: true, + Description: "User defined name for the shared processor pool placement group.", + Type: schema.TypeString, }, - Attr_SPPPlacementGroupMembers: { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, + Attr_Policy: { + Computed: true, + Description: "The value of the group's affinity policy. Valid values are affinity and anti-affinity.", + Type: schema.TypeString, }, - Attr_SPPPlacementGroupPolicy: { - Type: schema.TypeString, - Computed: true, + Attr_SPPPlacementGroupID: { + Computed: true, + Description: "The ID of the shared processor pool placement group.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, }, } @@ -66,9 +67,9 @@ func dataSourceIBMPISPPPlacementGroupsRead(ctx context.Context, d *schema.Resour return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - client := st.NewIBMPISPPPlacementGroupClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPISPPPlacementGroupClient(ctx, sess, cloudInstanceID) groups, err := client.GetAll() if err != nil || groups == nil { return diag.Errorf("error fetching spp placement groups: %v", err) @@ -77,17 +78,17 @@ func dataSourceIBMPISPPPlacementGroupsRead(ctx context.Context, d *schema.Resour result := make([]map[string]interface{}, 0, len(groups.SppPlacementGroups)) for _, placementGroup := range groups.SppPlacementGroups { key := map[string]interface{}{ - Attr_SPPPlacementGroupID: placementGroup.ID, - Attr_SPPPlacementGroupName: placementGroup.Name, - Attr_SPPPlacementGroupMembers: placementGroup.MemberSharedProcessorPools, - Attr_SPPPlacementGroupPolicy: placementGroup.Policy, + Attr_Members: placementGroup.MemberSharedProcessorPools, + Attr_Name: placementGroup.Name, + Attr_Policy: placementGroup.Policy, + Attr_SPPPlacementGroupID: placementGroup.ID, } result = append(result, key) } var genID, _ = uuid.GenerateUUID() d.SetId(genID) - d.Set(PISPPPlacementGroups, result) + d.Set(Attr_SPPPlacementGroups, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_storage_pool_capacity.go b/ibm/service/power/data_source_ibm_pi_storage_pool_capacity.go index 84500c05d0..004cffea82 100644 --- a/ibm/service/power/data_source_ibm_pi_storage_pool_capacity.go +++ b/ibm/service/power/data_source_ibm_pi_storage_pool_capacity.go @@ -6,58 +6,53 @@ package power import ( "context" "fmt" - "log" - st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" - - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - PIPoolName = "pi_storage_pool" -) - func DataSourceIBMPIStoragePoolCapacity() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIStoragePoolCapacityRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - PIPoolName: { - Type: schema.TypeString, + Arg_StoragePool: { + Description: "The storage pool name.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, - Description: "Storage pool name", }, - // Computed Attributes - MaxAllocationSize: { - Type: schema.TypeInt, + + // Attributes + Attr_MaxAllocationSize: { Computed: true, - Description: "Maximum allocation storage size (GB)", + Description: "Maximum allocation storage size (GB).", + Type: schema.TypeInt, }, - StorageType: { - Type: schema.TypeString, + Attr_ReplicationEnabled: { Computed: true, - Description: "Storage type of the storage pool", + Description: "Replication status of the storage pool.", + Type: schema.TypeBool, }, - TotalCapacity: { - Type: schema.TypeInt, + Attr_StorageType: { Computed: true, - Description: "Total pool capacity (GB)", + Description: "Storage type of the storage pool.", + Type: schema.TypeString, }, - ReplicationEnabled: { - Type: schema.TypeBool, + Attr_TotalCapacity: { Computed: true, - Description: "Replication status of the storage pool", + Description: "Total pool capacity (GB).", + Type: schema.TypeInt, }, }, } @@ -69,10 +64,10 @@ func dataSourceIBMPIStoragePoolCapacityRead(ctx context.Context, d *schema.Resou return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) - storagePool := d.Get(PIPoolName).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + storagePool := d.Get(Arg_StoragePool).(string) - client := st.NewIBMPIStorageCapacityClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPIStorageCapacityClient(ctx, sess, cloudInstanceID) sp, err := client.GetStoragePoolCapacity(storagePool) if err != nil { log.Printf("[ERROR] get storage pool capacity failed %v", err) @@ -80,9 +75,9 @@ func dataSourceIBMPIStoragePoolCapacityRead(ctx context.Context, d *schema.Resou } d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, storagePool)) - d.Set(MaxAllocationSize, *sp.MaxAllocationSize) - d.Set(StorageType, sp.StorageType) - d.Set(TotalCapacity, sp.TotalCapacity) - d.Set(ReplicationEnabled, *sp.ReplicationEnabled) + d.Set(Attr_MaxAllocationSize, *sp.MaxAllocationSize) + d.Set(Attr_ReplicationEnabled, *sp.ReplicationEnabled) + d.Set(Attr_StorageType, sp.StorageType) + d.Set(Attr_TotalCapacity, sp.TotalCapacity) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_storage_pools_capacity.go b/ibm/service/power/data_source_ibm_pi_storage_pools_capacity.go index 4e64ec5209..6c5ac36fd4 100644 --- a/ibm/service/power/data_source_ibm_pi_storage_pools_capacity.go +++ b/ibm/service/power/data_source_ibm_pi_storage_pools_capacity.go @@ -5,95 +5,82 @@ package power import ( "context" - "log" - st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" - - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - MaximumStorageAllocation = "maximum_storage_allocation" - StoragePoolsCapacity = "storage_pools_capacity" - MaxAllocationSize = "max_allocation_size" - PoolName = "pool_name" - StoragePool = "storage_pool" - StorageType = "storage_type" - TotalCapacity = "total_capacity" - ReplicationEnabled = "replication_enabled" -) - func DataSourceIBMPIStoragePoolsCapacity() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIStoragePoolsCapacityRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - MaximumStorageAllocation: { - Type: schema.TypeMap, + + // Attributes + Attr_MaximumStorageAllocation: { Computed: true, - Description: "Maximum storage allocation", + Description: "Maximum storage allocation.", + Type: schema.TypeMap, }, - StoragePoolsCapacity: { - Type: schema.TypeList, + Attr_StoragePoolsCapacity: { Computed: true, - Description: "Storage pools capacity", + Description: "List of storage pools capacity.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - MaxAllocationSize: { - Type: schema.TypeInt, + Attr_MaxAllocationSize: { Computed: true, - Description: "Maximum allocation storage size (GB)", + Description: "Maximum allocation storage size (GB).", + Type: schema.TypeInt, }, - PoolName: { - Type: schema.TypeString, + Attr_PoolName: { Computed: true, - Description: "Pool name", - }, - StorageType: { + Description: "The pool name.", Type: schema.TypeString, + }, + Attr_ReplicationEnabled: { Computed: true, - Description: "Storage type of the storage pool", + Description: "Replication status of the storage pool.", + Type: schema.TypeBool, }, - TotalCapacity: { - Type: schema.TypeInt, + Attr_StorageType: { Computed: true, - Description: "Total pool capacity (GB)", + Description: "Storage type of the storage pool.", + Type: schema.TypeString, }, - ReplicationEnabled: { - Type: schema.TypeBool, + Attr_TotalCapacity: { Computed: true, - Description: "Replication status of the storage pool", + Description: "Total pool capacity (GB).", + Type: schema.TypeInt, }, }, }, + Type: schema.TypeList, }, }, } } func dataSourceIBMPIStoragePoolsCapacityRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - client := st.NewIBMPIStorageCapacityClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPIStorageCapacityClient(ctx, sess, cloudInstanceID) spc, err := client.GetAllStoragePoolsCapacity() if err != nil { log.Printf("[ERROR] get all storage pools capacity failed %v", err) @@ -106,26 +93,25 @@ func dataSourceIBMPIStoragePoolsCapacityRead(ctx context.Context, d *schema.Reso if spc.MaximumStorageAllocation != nil { msa := spc.MaximumStorageAllocation data := map[string]interface{}{ - MaxAllocationSize: *msa.MaxAllocationSize, - StoragePool: *msa.StoragePool, - StorageType: *msa.StorageType, + Attr_MaxAllocationSize: *msa.MaxAllocationSize, + Attr_StoragePool: *msa.StoragePool, + Attr_StorageType: *msa.StorageType, } - d.Set(MaximumStorageAllocation, flex.Flatten(data)) + d.Set(Attr_MaximumStorageAllocation, flex.Flatten(data)) } result := make([]map[string]interface{}, 0, len(spc.StoragePoolsCapacity)) for _, sp := range spc.StoragePoolsCapacity { data := map[string]interface{}{ - MaxAllocationSize: *sp.MaxAllocationSize, - PoolName: sp.PoolName, - StorageType: sp.StorageType, - TotalCapacity: sp.TotalCapacity, - ReplicationEnabled: *sp.ReplicationEnabled, + Attr_MaxAllocationSize: *sp.MaxAllocationSize, + Attr_PoolName: sp.PoolName, + Attr_ReplicationEnabled: *sp.ReplicationEnabled, + Attr_StorageType: sp.StorageType, + Attr_TotalCapacity: sp.TotalCapacity, } - result = append(result, data) } - d.Set(StoragePoolsCapacity, result) + d.Set(Attr_StoragePoolsCapacity, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_storage_type_capacity.go b/ibm/service/power/data_source_ibm_pi_storage_type_capacity.go index d33d338ff1..6fa01bab6c 100644 --- a/ibm/service/power/data_source_ibm_pi_storage_type_capacity.go +++ b/ibm/service/power/data_source_ibm_pi_storage_type_capacity.go @@ -40,33 +40,33 @@ func DataSourceIBMPIStorageTypeCapacity() *schema.Resource { Description: "Storage type name", }, // Computed Attributes - MaximumStorageAllocation: { + Attr_MaximumStorageAllocation: { Type: schema.TypeMap, Computed: true, Description: "Maximum storage allocation", }, - StoragePoolsCapacity: { + Attr_StoragePoolsCapacity: { Type: schema.TypeList, Computed: true, Description: "Storage pools capacity", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - MaxAllocationSize: { + Attr_MaxAllocationSize: { Type: schema.TypeInt, Computed: true, Description: "Maximum allocation storage size (GB)", }, - PoolName: { + Attr_PoolName: { Type: schema.TypeString, Computed: true, Description: "Pool name", }, - StorageType: { + Attr_StorageType: { Type: schema.TypeString, Computed: true, Description: "Storage type of the storage pool", }, - TotalCapacity: { + Attr_TotalCapacity: { Type: schema.TypeInt, Computed: true, Description: "Total pool capacity (GB)", @@ -99,24 +99,24 @@ func dataSourceIBMPIStorageTypeCapacityRead(ctx context.Context, d *schema.Resou if stc.MaximumStorageAllocation != nil { msa := stc.MaximumStorageAllocation data := map[string]interface{}{ - MaxAllocationSize: *msa.MaxAllocationSize, - StoragePool: *msa.StoragePool, - StorageType: *msa.StorageType, + Attr_MaxAllocationSize: *msa.MaxAllocationSize, + Attr_StoragePool: *msa.StoragePool, + Attr_StorageType: *msa.StorageType, } - d.Set(MaximumStorageAllocation, flex.Flatten(data)) + d.Set(Attr_MaximumStorageAllocation, flex.Flatten(data)) } result := make([]map[string]interface{}, 0, len(stc.StoragePoolsCapacity)) for _, sp := range stc.StoragePoolsCapacity { data := map[string]interface{}{ - MaxAllocationSize: *sp.MaxAllocationSize, - PoolName: sp.PoolName, - StorageType: sp.StorageType, - TotalCapacity: sp.TotalCapacity, + Attr_MaxAllocationSize: *sp.MaxAllocationSize, + Attr_PoolName: sp.PoolName, + Attr_StorageType: sp.StorageType, + Attr_TotalCapacity: sp.TotalCapacity, } result = append(result, data) } - d.Set(StoragePoolsCapacity, result) + d.Set(Attr_StoragePoolsCapacity, result) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_storage_types_capacity.go b/ibm/service/power/data_source_ibm_pi_storage_types_capacity.go index 59feb27952..5f3f169dbe 100644 --- a/ibm/service/power/data_source_ibm_pi_storage_types_capacity.go +++ b/ibm/service/power/data_source_ibm_pi_storage_types_capacity.go @@ -34,7 +34,7 @@ func DataSourceIBMPIStorageTypesCapacity() *schema.Resource { ValidateFunc: validation.NoZeroValues, }, // Computed Attributes - MaximumStorageAllocation: { + Attr_MaximumStorageAllocation: { Type: schema.TypeMap, Computed: true, Description: "Maximum storage allocation", @@ -45,33 +45,33 @@ func DataSourceIBMPIStorageTypesCapacity() *schema.Resource { Description: "Storage types capacity", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - MaximumStorageAllocation: { + Attr_MaximumStorageAllocation: { Type: schema.TypeMap, Computed: true, Description: "Maximum storage allocation", }, - StoragePoolsCapacity: { + Attr_StoragePoolsCapacity: { Type: schema.TypeList, Computed: true, Description: "Storage pools capacity", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - MaxAllocationSize: { + Attr_MaxAllocationSize: { Type: schema.TypeInt, Computed: true, Description: "Maximum allocation storage size (GB)", }, - PoolName: { + Attr_PoolName: { Type: schema.TypeString, Computed: true, Description: "Pool name", }, - StorageType: { + Attr_StorageType: { Type: schema.TypeString, Computed: true, Description: "Storage type of the storage pool", }, - TotalCapacity: { + Attr_TotalCapacity: { Type: schema.TypeInt, Computed: true, Description: "Total pool capacity (GB)", @@ -79,7 +79,7 @@ func DataSourceIBMPIStorageTypesCapacity() *schema.Resource { }, }, }, - StorageType: { + Attr_StorageType: { Type: schema.TypeString, Computed: true, Description: "The storage type", @@ -112,11 +112,11 @@ func dataSourceIBMPIStorageTypesCapacityRead(ctx context.Context, d *schema.Reso if stc.MaximumStorageAllocation != nil { msa := stc.MaximumStorageAllocation data := map[string]interface{}{ - MaxAllocationSize: *msa.MaxAllocationSize, - StoragePool: *msa.StoragePool, - StorageType: *msa.StorageType, + Attr_MaxAllocationSize: *msa.MaxAllocationSize, + Attr_StoragePool: *msa.StoragePool, + Attr_StorageType: *msa.StorageType, } - d.Set(MaximumStorageAllocation, flex.Flatten(data)) + d.Set(Attr_MaximumStorageAllocation, flex.Flatten(data)) } stcResult := make([]map[string]interface{}, 0, len(stc.StorageTypesCapacity)) for _, st := range stc.StorageTypesCapacity { @@ -124,24 +124,24 @@ func dataSourceIBMPIStorageTypesCapacityRead(ctx context.Context, d *schema.Reso if st.MaximumStorageAllocation != nil { msa := st.MaximumStorageAllocation data := map[string]interface{}{ - MaxAllocationSize: *msa.MaxAllocationSize, - StoragePool: *msa.StoragePool, - StorageType: *msa.StorageType, + Attr_MaxAllocationSize: *msa.MaxAllocationSize, + Attr_StoragePool: *msa.StoragePool, + Attr_StorageType: *msa.StorageType, } - stResult[MaximumStorageAllocation] = flex.Flatten(data) + stResult[Attr_MaximumStorageAllocation] = flex.Flatten(data) } spc := make([]map[string]interface{}, 0, len(st.StoragePoolsCapacity)) for _, sp := range st.StoragePoolsCapacity { data := map[string]interface{}{ - MaxAllocationSize: *sp.MaxAllocationSize, - PoolName: sp.PoolName, - StorageType: sp.StorageType, - TotalCapacity: sp.TotalCapacity, + Attr_MaxAllocationSize: *sp.MaxAllocationSize, + Attr_PoolName: sp.PoolName, + Attr_StorageType: sp.StorageType, + Attr_TotalCapacity: sp.TotalCapacity, } spc = append(spc, data) } - stResult[StoragePoolsCapacity] = spc - stResult[StorageType] = st.StorageType + stResult[Attr_StoragePoolsCapacity] = spc + stResult[Attr_StorageType] = st.StorageType stcResult = append(stcResult, stResult) } diff --git a/ibm/service/power/data_source_ibm_pi_system_pools.go b/ibm/service/power/data_source_ibm_pi_system_pools.go index 7e2f52b633..f1664e0bc6 100644 --- a/ibm/service/power/data_source_ibm_pi_system_pools.go +++ b/ibm/service/power/data_source_ibm_pi_system_pools.go @@ -5,121 +5,100 @@ package power import ( "context" - "log" - st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/clients/instance" "github.com/IBM-Cloud/power-go-client/power/models" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" - - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - SystemPoolName = "system_pool_name" - SystemPools = "system_pools" - SystemPool = "system_pool" - Capacity = "capacity" - CoreMemoryRatio = "core_memory_ratio" - MaxAvailable = "max_available" - MaxCoresAvailable = "max_cores_available" - MaxMemoryAvailable = "max_memory_available" - SharedCoreRatio = "shared_core_ratio" - Type = "type" - Systems = "systems" - Cores = "cores" - ID = "id" - Memory = "memory" - Default = "default" - Max = "max" - Min = "min" -) - func DataSourceIBMPISystemPools() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPISystemPoolsRead, Schema: map[string]*schema.Schema{ - helpers.PICloudInstanceId: { - Type: schema.TypeString, + // Arguments + Arg_CloudInstanceID: { + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - // Computed Attributes - SystemPools: { + + // Attributes + Attr_SystemPools: { Type: schema.TypeList, Computed: true, - Description: "List of available system pools within a particular DataCenter", + Description: "List of available system pools within a particular Datacenter.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - SystemPoolName: { - Type: schema.TypeString, + Attr_Capacity: { Computed: true, - Description: "The system pool name", - }, - Capacity: { + Description: "Advertised capacity cores and memory (GB).", Type: schema.TypeMap, - Computed: true, - Description: "Advertised capacity cores and memory (GB)", }, - CoreMemoryRatio: { - Type: schema.TypeFloat, + Attr_CoreMemoryRatio: { Computed: true, - Description: "Processor to Memory (GB) Ratio", + Description: "Processor to Memory (GB) Ratio.", + Type: schema.TypeFloat, }, - MaxAvailable: { - Type: schema.TypeMap, + Attr_MaxAvailable: { Computed: true, - Description: "Maximum configurable cores and memory (GB) (aggregated from all hosts)", - }, - MaxCoresAvailable: { + Description: "Maximum configurable cores and memory (GB) (aggregated from all hosts).", Type: schema.TypeMap, - Computed: true, - Description: "Maximum configurable cores available combined with available memory of that host", }, - MaxMemoryAvailable: { + Attr_MaxCoresAvailable: { + Computed: true, + Description: "Maximum configurable cores available combined with available memory of that host.", Type: schema.TypeMap, + }, + Attr_MaxMemoryAvailable: { Computed: true, - Description: "Maximum configurable memory available combined with available cores of that host", + Description: "Maximum configurable memory available combined with available cores of that host.", + Type: schema.TypeMap, }, - SharedCoreRatio: { + Attr_SharedCoreRatio: { + Computed: true, + Description: "The min-max-default allocation percentage of shared core per vCPU.", Type: schema.TypeMap, + }, + Attr_SystemPoolName: { Computed: true, - Description: "The min-max-default allocation percentage of shared core per vCPU", + Description: "The system pool name", + Type: schema.TypeString, }, - Systems: { - Type: schema.TypeList, + Attr_Systems: { Computed: true, - Description: "The DataCenter list of servers and their available resources", + Description: "The Datacenter list of servers and their available resources.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Cores: { - Type: schema.TypeString, + Attr_Cores: { Computed: true, - Description: "The host available Processor units", - }, - ID: { + Description: "The host available Processor units.", Type: schema.TypeString, - Computed: true, - Description: "The host identifier", }, - Memory: { + Attr_ID: { + Computed: true, + Description: "The host identifier.", Type: schema.TypeString, + }, + Attr_Memory: { Computed: true, - Description: "The host available RAM memory in GiB", + Description: "The host available RAM memory in GiB.", + Type: schema.TypeString, }, }, }, + Type: schema.TypeList, }, - Type: { - Type: schema.TypeString, + Attr_Type: { Computed: true, - Description: "Type of system hardware", + Description: "Type of system hardware.", + Type: schema.TypeString, }, }, }, @@ -134,9 +113,9 @@ func dataSourceIBMPISystemPoolsRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) - client := st.NewIBMPISystemPoolClient(ctx, sess, cloudInstanceID) + client := instance.NewIBMPISystemPoolClient(ctx, sess, cloudInstanceID) sps, err := client.GetSystemPools() if err != nil { log.Printf("[ERROR] get system pools capacity failed %v", err) @@ -149,37 +128,37 @@ func dataSourceIBMPISystemPoolsRead(ctx context.Context, d *schema.ResourceData, result := make([]map[string]interface{}, 0, len(sps)) for s, sp := range sps { data := map[string]interface{}{ - SystemPoolName: s, - Capacity: flattenMax(sp.Capacity), - CoreMemoryRatio: sp.CoreMemoryRatio, - MaxAvailable: flattenMax(sp.MaxAvailable), - MaxCoresAvailable: flattenMax(sp.MaxCoresAvailable), - MaxMemoryAvailable: flattenMax(sp.MaxMemoryAvailable), - SharedCoreRatio: flattenSharedCoreRatio(sp.SharedCoreRatio), - Type: sp.Type, - Systems: flattenSystems(sp.Systems), + Attr_SystemPoolName: s, + Attr_Capacity: flattenMax(sp.Capacity), + Attr_CoreMemoryRatio: sp.CoreMemoryRatio, + Attr_MaxAvailable: flattenMax(sp.MaxAvailable), + Attr_MaxCoresAvailable: flattenMax(sp.MaxCoresAvailable), + Attr_MaxMemoryAvailable: flattenMax(sp.MaxMemoryAvailable), + Attr_SharedCoreRatio: flattenSharedCoreRatio(sp.SharedCoreRatio), + Attr_Type: sp.Type, + Attr_Systems: flattenSystems(sp.Systems), } result = append(result, data) } - d.Set(SystemPools, result) + d.Set(Attr_SystemPools, result) return nil } func flattenMax(s *models.System) map[string]string { ret := map[string]interface{}{ - Cores: *s.Cores, - Memory: *s.Memory, + Attr_Cores: *s.Cores, + Attr_Memory: *s.Memory, } return flex.Flatten(ret) } func flattenSystem(s *models.System) map[string]string { ret := map[string]interface{}{ - Cores: *s.Cores, - ID: s.ID, - Memory: *s.Memory, + Attr_Cores: *s.Cores, + Attr_ID: s.ID, + Attr_Memory: *s.Memory, } return flex.Flatten(ret) } @@ -197,9 +176,9 @@ func flattenSystems(sl []*models.System) (systems []map[string]string) { func flattenSharedCoreRatio(scr *models.MinMaxDefault) map[string]string { ret := map[string]interface{}{ - Default: scr.Default, - Max: scr.Max, - Min: scr.Min, + Attr_Default: scr.Default, + Attr_Max: scr.Max, + Attr_Min: scr.Min, } return flex.Flatten(ret) } diff --git a/ibm/service/power/data_source_ibm_pi_volume.go b/ibm/service/power/data_source_ibm_pi_volume.go index faa1d47a0f..f54ccd75db 100644 --- a/ibm/service/power/data_source_ibm_pi_volume.go +++ b/ibm/service/power/data_source_ibm_pi_volume.go @@ -110,6 +110,11 @@ func DataSourceIBMPIVolume() *schema.Resource { Computed: true, Description: "Indicates master volume name", }, + "io_throttle_rate": { + Type: schema.TypeString, + Computed: true, + Description: "Amount of iops assigned to the volume", + }, }, } } @@ -145,6 +150,7 @@ func dataSourceIBMPIVolumeRead(ctx context.Context, d *schema.ResourceData, meta d.Set("primary_role", volumedata.PrimaryRole) d.Set("auxiliary_volume_name", volumedata.AuxVolumeName) d.Set("master_volume_name", volumedata.MasterVolumeName) + d.Set("io_throttle_rate", volumedata.IoThrottleRate) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_volume_clone.go b/ibm/service/power/data_source_ibm_pi_volume_clone.go new file mode 100644 index 0000000000..3692be175f --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_clone.go @@ -0,0 +1,78 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" +) + +func DataSourceIBMPIVolumeClone() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceIBMPIVolumeCloneRead, + Schema: map[string]*schema.Schema{ + PIVolumeCloneTaskID: { + Type: schema.TypeString, + Required: true, + Description: "The ID of the volume clone task.", + ValidateFunc: validation.NoZeroValues, + }, + Arg_CloudInstanceID: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + Description: "The GUID of the service instance associated with an account.", + }, + // Computed attributes + "cloned_volumes": clonedVolumesSchema(), + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: "The reason the clone volumes task has failed.", + }, + "percent_complete": { + Type: schema.TypeInt, + Computed: true, + Description: "The completion percentage of the volume clone task.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the volume clone task.", + }, + }, + } +} + +func dataSourceIBMPIVolumeCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + client := st.NewIBMPICloneVolumeClient(ctx, sess, cloudInstanceID) + volClone, err := client.Get(d.Get(PIVolumeCloneTaskID).(string)) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(d.Get(PIVolumeCloneTaskID).(string)) + if volClone.Status != nil { + d.Set("status", *volClone.Status) + } + d.Set("failure_reason", volClone.FailedReason) + if volClone.PercentComplete != nil { + d.Set("percent_complete", *volClone.PercentComplete) + } + d.Set("cloned_volumes", flattenClonedVolumes(volClone.ClonedVolumes)) + + return nil +} diff --git a/ibm/service/power/data_source_ibm_pi_volume_clone_test.go b/ibm/service/power/data_source_ibm_pi_volume_clone_test.go new file mode 100644 index 0000000000..a8b8d805d4 --- /dev/null +++ b/ibm/service/power/data_source_ibm_pi_volume_clone_test.go @@ -0,0 +1,38 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "fmt" + "testing" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIBMPIVolumeClone_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeCloneBasicConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.ibm_pi_volume_clone.testacc_ds_volume_clone", "id"), + resource.TestCheckResourceAttrSet("data.ibm_pi_volume_clone.testacc_ds_volume_clone", "status"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeCloneBasicConfig() string { + return fmt.Sprintf(` +data "ibm_pi_volume_clone" "testacc_ds_volume_clone" { + pi_volume_clone_task_id = "%s" + pi_cloud_instance_id = "%s" +}`, acc.Pi_volume_clone_task_id, acc.Pi_cloud_instance_id) + +} diff --git a/ibm/service/power/data_source_ibm_pi_workspace.go b/ibm/service/power/data_source_ibm_pi_workspace.go index 6fb23303ea..0864470922 100644 --- a/ibm/service/power/data_source_ibm_pi_workspace.go +++ b/ibm/service/power/data_source_ibm_pi_workspace.go @@ -7,7 +7,6 @@ import ( "context" "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -15,68 +14,63 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - WorkspaceCreationDate = "creation_date" - WorkspaceCRN = "crn" - WorkspaceRegion = "region" - WorkspaceType = "type" - WorkspaceUrl = "url" -) - func DatasourceIBMPIWorkspace() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIWorkspaceRead, Schema: map[string]*schema.Schema{ + // Arguments Arg_CloudInstanceID: { - Type: schema.TypeString, + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, + + // Attributes Attr_WorkspaceCapabilities: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace Capabilities", + Description: "Workspace Capabilities.", Elem: &schema.Schema{ Type: schema.TypeBool, }, + Type: schema.TypeMap, }, Attr_WorkspaceDetails: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace information", + Description: "Workspace information.", + Type: schema.TypeMap, }, Attr_WorkspaceLocation: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace location", + Description: "Workspace location.", + Type: schema.TypeMap, }, Attr_WorkspaceName: { - Type: schema.TypeString, Computed: true, - Description: "Workspace name", + Description: "Workspace name.", + Type: schema.TypeString, }, Attr_WorkspaceStatus: { - Type: schema.TypeString, Computed: true, - Description: "Workspace status", + Description: "Workspace status, active, critical, failed, provisioning.", + Type: schema.TypeString, }, Attr_WorkspaceType: { - Type: schema.TypeString, Computed: true, - Description: "Workspace type", + Description: "Workspace type, off-premises or on-premises.", + Type: schema.TypeString, }, }, } } func dataSourceIBMPIWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // session sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) client := instance.NewIBMPIWorkspacesClient(ctx, sess, cloudInstanceID) wsData, err := client.Get(cloudInstanceID) if err != nil { @@ -88,14 +82,14 @@ func dataSourceIBMPIWorkspaceRead(ctx context.Context, d *schema.ResourceData, m d.Set(Attr_WorkspaceType, wsData.Type) d.Set(Attr_WorkspaceCapabilities, wsData.Capabilities) wsdetails := map[string]interface{}{ - WorkspaceCreationDate: wsData.Details.CreationDate.String(), - WorkspaceCRN: *wsData.Details.Crn, + Attr_CreationDate: wsData.Details.CreationDate.String(), + Attr_CRN: *wsData.Details.Crn, } d.Set(Attr_WorkspaceDetails, flex.Flatten(wsdetails)) wslocation := map[string]interface{}{ - WorkspaceRegion: *wsData.Location.Region, - WorkspaceType: wsData.Location.Type, - WorkspaceUrl: wsData.Location.URL, + Attr_Region: *wsData.Location.Region, + Attr_Type: wsData.Location.Type, + Attr_URL: wsData.Location.URL, } d.Set(Attr_WorkspaceLocation, flex.Flatten(wslocation)) d.SetId(*wsData.ID) diff --git a/ibm/service/power/data_source_ibm_pi_workspace_test.go b/ibm/service/power/data_source_ibm_pi_workspace_test.go index b2adea8072..4a7df8e63a 100644 --- a/ibm/service/power/data_source_ibm_pi_workspace_test.go +++ b/ibm/service/power/data_source_ibm_pi_workspace_test.go @@ -25,10 +25,10 @@ func TestAccIBMPIWorkspaceDataSourceBasic(t *testing.T) { }, }) } + func testAccCheckIBMPIWorkspaceDataSourceConfig() string { return fmt.Sprintf(` data "ibm_pi_workspace" "test" { pi_cloud_instance_id = "%s" - } - `, acc.Pi_cloud_instance_id) + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/data_source_ibm_pi_workspaces.go b/ibm/service/power/data_source_ibm_pi_workspaces.go index c66e924b63..1208562947 100644 --- a/ibm/service/power/data_source_ibm_pi_workspaces.go +++ b/ibm/service/power/data_source_ibm_pi_workspaces.go @@ -7,7 +7,6 @@ import ( "context" "github.com/IBM-Cloud/power-go-client/clients/instance" - "github.com/IBM-Cloud/power-go-client/helpers" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -15,62 +14,61 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -const ( - Workspaces = "workspaces" -) - func DatasourceIBMPIWorkspaces() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceIBMPIWorkspacesRead, Schema: map[string]*schema.Schema{ + // Arguments Arg_CloudInstanceID: { - Type: schema.TypeString, + Description: "The GUID of the service instance associated with an account.", Required: true, + Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, - Workspaces: { + + // Attributes + Attr_Workspaces: { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - Attr_WorkspaceCapabilities: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace Capabilities", + Description: "Workspace Capabilities.", Elem: &schema.Schema{ Type: schema.TypeBool, }, + Type: schema.TypeMap, }, Attr_WorkspaceDetails: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace information", + Description: "Workspace information.", + Type: schema.TypeMap, }, Attr_WorkspaceID: { - Type: schema.TypeString, Computed: true, - Description: "Workspace ID", + Description: "Workspace ID.", + Type: schema.TypeString, }, Attr_WorkspaceLocation: { - Type: schema.TypeMap, Computed: true, - Description: "Workspace location", + Description: "Workspace location.", + Type: schema.TypeMap, }, Attr_WorkspaceName: { - Type: schema.TypeString, Computed: true, - Description: "Workspace name", + Description: "Workspace name.", + Type: schema.TypeString, }, Attr_WorkspaceStatus: { - Type: schema.TypeString, Computed: true, - Description: "Workspace status", + Description: "Workspace status, active, critical, failed, provisioning.", + Type: schema.TypeString, }, Attr_WorkspaceType: { - Type: schema.TypeString, Computed: true, - Description: "Workspace type", + Description: "Workspace type, off-premises or on-premises.", + Type: schema.TypeString, }, }, }, @@ -79,13 +77,12 @@ func DatasourceIBMPIWorkspaces() *schema.Resource { } } func dataSourceIBMPIWorkspacesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // session sess, err := meta.(conns.ClientSession).IBMPISession() if err != nil { return diag.FromErr(err) } - cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) client := instance.NewIBMPIWorkspacesClient(ctx, sess, cloudInstanceID) wsData, err := client.GetAll() if err != nil { @@ -95,26 +92,26 @@ func dataSourceIBMPIWorkspacesRead(ctx context.Context, d *schema.ResourceData, for _, ws := range wsData.Workspaces { if ws != nil { workspace := map[string]interface{}{ - Attr_WorkspaceName: ws.Name, - Attr_WorkspaceID: ws.ID, - Attr_WorkspaceStatus: ws.Status, - Attr_WorkspaceType: ws.Type, Attr_WorkspaceCapabilities: ws.Capabilities, Attr_WorkspaceDetails: map[string]interface{}{ - WorkspaceCreationDate: ws.Details.CreationDate.String(), - WorkspaceCRN: *ws.Details.Crn, + Attr_CreationDate: ws.Details.CreationDate.String(), + Attr_CRN: *ws.Details.Crn, }, + Attr_WorkspaceID: ws.ID, Attr_WorkspaceLocation: map[string]interface{}{ - WorkspaceRegion: *ws.Location.Region, - WorkspaceType: ws.Location.Type, - WorkspaceUrl: ws.Location.URL, + Attr_Region: *ws.Location.Region, + Attr_Type: ws.Location.Type, + Attr_URL: ws.Location.URL, }, + Attr_WorkspaceName: ws.Name, + Attr_WorkspaceStatus: ws.Status, + Attr_WorkspaceType: ws.Type, } workspaces = append(workspaces, workspace) } } var clientgenU, _ = uuid.GenerateUUID() d.SetId(clientgenU) - d.Set(Workspaces, workspaces) + d.Set(Attr_Workspaces, workspaces) return nil } diff --git a/ibm/service/power/data_source_ibm_pi_workspaces_test.go b/ibm/service/power/data_source_ibm_pi_workspaces_test.go index 076d65a342..10b38e677a 100644 --- a/ibm/service/power/data_source_ibm_pi_workspaces_test.go +++ b/ibm/service/power/data_source_ibm_pi_workspaces_test.go @@ -30,6 +30,5 @@ func testAccCheckIBMPIWorkspacesDataSourceConfig() string { return fmt.Sprintf(` data "ibm_pi_workspaces" "test" { pi_cloud_instance_id = "%s" - } - `, acc.Pi_cloud_instance_id) + }`, acc.Pi_cloud_instance_id) } diff --git a/ibm/service/power/ibm_pi_constants.go b/ibm/service/power/ibm_pi_constants.go index 8e66a4bd83..c790851491 100644 --- a/ibm/service/power/ibm_pi_constants.go +++ b/ibm/service/power/ibm_pi_constants.go @@ -3,18 +3,252 @@ package power import "time" const ( - // used by all - Arg_CloudInstanceID = "pi_cloud_instance_id" + // Arguments + Arg_CloudConnectionName = "pi_cloud_connection_name" + Arg_CloudInstanceID = "pi_cloud_instance_id" + Arg_ImageName = "pi_image_name" + Arg_InstanceName = "pi_instance_name" + Arg_Key = "pi_ssh_key" + Arg_KeyName = "pi_key_name" + Arg_NetworkName = "pi_network_name" + Arg_PlacementGroupName = "pi_placement_group_name" + Arg_SAP = "sap" + Arg_SAPProfileID = "pi_sap_profile_id" + Arg_SPPPlacementGroupID = "pi_spp_placement_group_id" + Arg_SPPPlacementGroupName = "pi_spp_placement_group_name" + Arg_SPPPlacementGroupPolicy = "pi_spp_placement_group_policy" + Arg_SharedProcessorPoolHostGroup = "pi_shared_processor_pool_host_group" + Arg_SharedProcessorPoolID = "pi_shared_processor_pool_id" + Arg_SharedProcessorPoolName = "pi_shared_processor_pool_name" + Arg_SharedProcessorPoolPlacementGroupID = "pi_shared_processor_pool_placement_group_id" + Arg_SharedProcessorPoolReservedCores = "pi_shared_processor_pool_reserved_cores" + Arg_StoragePool = "pi_storage_pool" + Arg_StorageType = "pi_storage_type" + Arg_VTL = "vtl" + Arg_VolumeGroupID = "pi_volume_group_id" + Arg_VolumeID = "pi_volume_id" + Arg_VolumeOnboardingID = "pi_volume_onboarding_id" - // Keys - Arg_KeyName = "pi_key_name" - Arg_Key = "pi_ssh_key" + // Attributes + Attr_AccessConfig = "access_config" + Attr_Action = "action" + Attr_Addresses = "addresses" + Attr_AllocatedCores = "allocated_cores" + Attr_Architecture = "architecture" + Attr_Auxiliary = "auxiliary" + Attr_AuxiliaryChangedVolumeName = "auxiliary_changed_volume_name" + Attr_AuxiliaryVolumeName = "auxiliary_volume_name" + Attr_AvailabilityZone = "availability_zone" + Attr_AvailableCores = "available_cores" + Attr_AvailableIPCount = "available_ip_count" + Attr_BootVolumeID = "boot_volume_id" + Attr_Bootable = "bootable" + Attr_CIDR = "cidr" + Attr_CPUs = "cpus" + Attr_CRN = "crn" + Attr_Capabilities = "capabilities" + Attr_Capacity = "capacity" + Attr_Certified = "certified" + Attr_ClassicEnabled = "classic_enabled" + Attr_CloudConnectionID = "cloud_connection_id" + Attr_CloudInstanceID = "cloud_instance_id" + Attr_CloudInstances = "cloud_instances" + Attr_Code = "code" + Attr_ConnectionMode = "connection_mode" + Attr_Connections = "connections" + Attr_ConsistencyGroupName = "consistency_group_name" + Attr_ConsoleLanguages = "console_languages" + Attr_ContainerFormat = "container_format" + Attr_CopyRate = "copy_rate" + Attr_CopyType = "copy_type" + Attr_CoreMemoryRatio = "core_memory_ratio" + Attr_Cores = "cores" + Attr_CreateTime = "create_time" + Attr_CreationDate = "creation_date" + Attr_CyclePeriodSeconds = "cycle_period_seconds" + Attr_CyclingMode = "cycling_mode" + Attr_DNS = "dns" + Attr_Datacenters = "datacenters" + Attr_Default = "default" + Attr_DeploymentType = "deployment_type" + Attr_Description = "description" + Attr_DisasterRecoveryLocations = "disaster_recovery_locations" + Attr_DiskFormat = "disk_format" + Attr_DiskType = "disk_type" + Attr_Enabled = "enabled" + Attr_Endianness = "endianness" + Attr_ExternalIP = "external_ip" + Attr_FailureMessage = "failure_message" + Attr_FlashCopyMappings = "flash_copy_mappings" + Attr_FlashCopyName = "flash_copy_name" + Attr_FreezeTime = "freeze_time" + Attr_Gateway = "gateway" + Attr_GlobalRouting = "global_routing" + Attr_GreDestinationAddress = "gre_destination_address" + Attr_GreSourceAddress = "gre_source_address" + Attr_GroupID = "group_id" + Attr_HealthStatus = "health_status" + Attr_HostID = "host_id" + Attr_Href = "href" + Attr_Hypervisor = "hypervisor" + Attr_HypervisorType = "hypervisor_type" + Attr_IBMIPAddress = "ibm_ip_address" + Attr_ID = "id" + Attr_IP = "ip" + Attr_IPAddress = "ipaddress" + Attr_IPOctet = "ipoctet" + Attr_ImageID = "image_id" + Attr_ImageInfo = "image_info" + Attr_ImageType = "image_type" + Attr_Images = "images" + Attr_InputVolumes = "input_volumes" + Attr_InstanceSnapshots = "instance_snapshots" + Attr_InstanceVolumes = "instance_volumes" + Attr_Instances = "instances" + Attr_IsActive = "is_active" + Attr_Jumbo = "jumbo" + Attr_Key = "key" + Attr_KeyID = "key_id" + Attr_KeyName = "name" + Attr_Keys = "keys" + Attr_Language = "language" + Attr_LastUpdateDate = "last_update_date" + Attr_LastUpdatedDate = "last_updated_date" + Attr_Leases = "leases" + Attr_LicenseRepositoryCapacity = "license_repository_capacity" + Attr_Location = "location" + Attr_MTU = "mtu" + Attr_MacAddress = "macaddress" + Attr_MasterChangedVolumeName = "master_changed_volume_name" + Attr_MasterVolumeName = "master_volume_name" + Attr_Max = "max" + Attr_MaxAllocationSize = "max_allocation_size" + Attr_MaxAvailable = "max_available" + Attr_MaxCoresAvailable = "max_cores_available" + Attr_MaxMem = "maxmem" + Attr_MaxMemoryAvailable = "max_memory_available" + Attr_MaxProc = "maxproc" + Attr_MaxVirtualCores = "max_virtual_cores" + Attr_MaximumStorageAllocation = "max_storage_allocation" + Attr_Members = "members" + Attr_Memory = "memory" + Attr_Message = "message" + Attr_Metered = "metered" + Attr_Min = "min" + Attr_MinMem = "minmem" + Attr_MinProc = "minproc" + Attr_MinVirtualCores = "min_virtual_cores" + Attr_MirroringState = "mirroring_state" + Attr_Name = "name" + Attr_NetworkID = "network_id" + Attr_NetworkName = "network_name" + Attr_NetworkPorts = "network_ports" + Attr_Networks = "networks" + Attr_NumberOfVolumes = "number_of_volumes" + Attr_Onboardings = "onboardings" + Attr_OperatingSystem = "operating_system" + Attr_PVMInstanceID = "pvm_instance_id" + Attr_PVMInstances = "pvm_instances" + Attr_PVMSnapshots = "pvm_snapshots" + Attr_PercentComplete = "percent_complete" + Attr_PinPolicy = "pin_policy" + Attr_PlacementGroupID = "placement_group_id" + Attr_PlacementGroups = "placement_groups" + Attr_Policy = "policy" + Attr_Pool = "pool" + Attr_PoolName = "pool_name" + Attr_Port = "port" + Attr_PortID = "portid" + Attr_PrimaryRole = "primary_role" + Attr_ProcType = "proctype" + Attr_Processors = "processors" + Attr_ProfileID = "profile_id" + Attr_Profiles = "profiles" + Attr_Progress = "progress" + Attr_PublicIP = "public_ip" + Attr_Region = "region" + Attr_RemoteCopyID = "remote_copy_id" + Attr_RemoteCopyRelationshipNames = "remote_copy_relationship_names" + Attr_RemoteCopyRelationships = "remote_copy_relationships" + Attr_ReplicationEnabled = "replication_enabled" + Attr_ReplicationSites = "replication_sites" + Attr_ReplicationStatus = "replication_status" + Attr_ReplicationType = "replication_type" + Attr_ReservedCores = "reserved_cores" + Attr_ResultsOnboardedVolumes = "results_onboarded_volumes" + Attr_ResultsVolumeOnboardingFailures = "results_volume_onboarding_failures" + Attr_SPPPlacementGroups = "spp_placement_groups" + Attr_SSHKey = "ssh_key" + Attr_Shareable = "shreable" + Attr_SharedCoreRatio = "shared_core_ratio" + Attr_SharedProcessorPool = "shared_processor_pool" + Attr_SharedProcessorPoolID = "shared_processor_pool_id" + Attr_SharedProcessorPoolPlacementGroups = "spp_placement_groups" + Attr_SharedProcessorPoolStatus = "status" + Attr_SharedProcessorPools = "shared_processor_pools" + Attr_SharedProcessorPoolName = "name" + Attr_SharedProcessorPoolHostID = "host_id" + Attr_SharedProcessorPoolReservedCores = "reserved_cores" + Attr_SharedProcessorPoolAvailableCores = "available_cores" + Attr_SharedProcessorPoolAllocatedCores = "allocated_cores" + Attr_SharedProcessorPoolStatusDetail = "status_detail" + Attr_SharedProcessorPoolInstances = "instances" + Attr_SharedProcessorPoolInstanceCpus = "cpus" + Attr_SharedProcessorPoolInstanceUncapped = "uncapped" + Attr_SharedProcessorPoolInstanceAvailabilityZone = "availability_zone" + Attr_SharedProcessorPoolInstanceId = "id" + Attr_SharedProcessorPoolInstanceMemory = "memory" + Attr_SharedProcessorPoolInstanceName = "name" + Attr_SharedProcessorPoolInstanceStatus = "status" + Attr_SharedProcessorPoolInstanceVcpus = "vcpus" + Attr_Size = "size" + Attr_SourceVolumeName = "source_volume_name" + Attr_Speed = "speed" + Attr_StartTime = "start_time" + Attr_State = "state" + Attr_Status = "status" + Attr_StatusDescriptionErrors = "status_description_errors" + Attr_StatusDetail = "status_detail" + Attr_StoragePool = "storage_pool" + Attr_StoragePoolAffinity = "storage_pool_affinity" + Attr_StoragePoolsCapacity = "storage_pools_capacity" + Attr_StorageType = "storage_type" + Attr_StorageTypesCapacity = "storage_types_capacity" + Attr_Synchronized = "synchronized" + Attr_SysType = "systype" + Attr_SystemPoolName = "system_pool_name" + Attr_SystemPools = "system_pools" + Attr_Systems = "systems" + Attr_TargetVolumeName = "target_volume_name" + Attr_TenantID = "tenant_id" + Attr_TenantName = "tenant_name" + Attr_TotalCapacity = "total_capacity" + Attr_TotalInstances = "total_instances" + Attr_TotalMemoryConsumed = "total_memory_consumed" + Attr_TotalProcessorsConsumed = "total_processors_consumed" + Attr_TotalSSDStorageConsumed = "total_ssd_storage_consumed" + Attr_TotalStandardStorageConsumed = "total_standard_storage_consumed" + Attr_Type = "type" + Attr_URL = "url" + Attr_Uncapped = "uncapped" + Attr_UsedIPCount = "used_ip_count" + Attr_UsedIPPercent = "used_ip_percent" + Attr_UserIPAddress = "user_ip_address" + Attr_VCPUs = "vcpus" + Attr_VLanID = "vlan_id" + Attr_VPCCRNs = "vpc_crns" + Attr_VPCEnabled = "vpc_enabled" + Attr_VirtualCoresAssigned = "virtual_cores_assigned" + Attr_VolumeGroupName = "volume_group_name" + Attr_VolumeGroups = "volume_groups" + Attr_VolumeIDs = "volume_ids" + Attr_VolumePool = "volume_pool" + Attr_VolumeSnapshots = "volume_snapshots" + Attr_Volumes = "volumes" + Attr_WWN = "wwn" + Attr_Workspaces = "workspaces" - Attr_KeyID = "key_id" - Attr_Keys = "keys" - Attr_KeyCreationDate = "creation_date" - Attr_Key = "ssh_key" - Attr_KeyName = "name" + // TODO: Second Half Cleanup, remove extra variables // SAP Profile PISAPProfiles = "profiles" @@ -43,13 +277,10 @@ const ( Attr_DhcpStatus = "status" // Instance - Arg_PVMInstanceId = "pi_instance_id" - Arg_PVMInstanceActionType = "pi_action" - Arg_PVMInstanceHealthStatus = "pi_health_status" - - Attr_Status = "status" - Attr_Progress = "progress" - Attr_HealthStatus = "health_status" + Arg_PVMInstanceId = "pi_instance_id" + Arg_PVMInstanceActionType = "pi_action" + Arg_PVMInstanceHealthStatus = "pi_health_status" + Arg_PIInstanceSharedProcessorPool = "pi_shared_processor_pool" PVMInstanceHealthOk = "OK" PVMInstanceHealthWarning = "WARNING" @@ -58,14 +289,31 @@ const ( warningTimeOut = 60 * time.Second activeTimeOut = 2 * time.Minute // power service instance capabilities - CUSTOM_VIRTUAL_CORES = "custom-virtualcores" - PIInstanceDeploymentType = "pi_deployment_type" - PIInstanceNetwork = "pi_network" - PIInstanceStoragePool = "pi_storage_pool" - PISAPInstanceProfileID = "pi_sap_profile_id" - PISAPInstanceDeploymentType = "pi_sap_deployment_type" - PIInstanceStoragePoolAffinity = "pi_storage_pool_affinity" - Arg_PIInstanceSharedProcessorPool = "pi_shared_processor_pool" + CUSTOM_VIRTUAL_CORES = "custom-virtualcores" + + PIConsoleLanguageCode = "pi_language_code" + PICloudConnectionId = "cloud_connection_id" + PICloudConnectionStatus = "status" + PICloudConnectionIBMIPAddress = "ibm_ip_address" + PICloudConnectionUserIPAddress = "user_ip_address" + PICloudConnectionPort = "port" + PICloudConnectionClassicGreSource = "gre_source_address" + PICloudConnectionConnectionMode = "connection_mode" + PIInstanceDeploymentType = "pi_deployment_type" + PIInstanceMigratable = "pi_migratable" + PIInstanceNetwork = "pi_network" + PIInstanceLicenseRepositoryCapacity = "pi_license_repository_capacity" + PIInstanceStoragePool = "pi_storage_pool" + PIInstanceStorageType = "pi_storage_type" + PISAPInstanceProfileID = "pi_sap_profile_id" + PISAPInstanceDeploymentType = "pi_sap_deployment_type" + PIInstanceSharedProcessorPool = "pi_shared_processor_pool" + PIInstanceStorageConnection = "pi_storage_connection" + PIInstanceStoragePoolAffinity = "pi_storage_pool_affinity" + + PIInstanceUserData = "pi_user_data" + PIInstanceVolumeIds = "pi_volume_ids" + Attr_PIInstanceSharedProcessorPool = "shared_processor_pool" Attr_PIInstanceSharedProcessorPoolID = "shared_processor_pool_id" @@ -74,15 +322,20 @@ const ( PIPlacementGroupMembers = "members" // Volume + PIVolumeIds = "pi_volume_ids" PIAffinityPolicy = "pi_affinity_policy" PIAffinityVolume = "pi_affinity_volume" PIAffinityInstance = "pi_affinity_instance" PIAntiAffinityInstances = "pi_anti_affinity_instances" PIAntiAffinityVolumes = "pi_anti_affinity_volumes" + // Volume Clone + PIVolumeCloneName = "pi_volume_clone_name" + PIVolumeCloneTaskID = "pi_volume_clone_task_id" + PITargetStorageTier = "pi_target_storage_tier" + // IBM PI Volume Group PIVolumeGroupName = "pi_volume_group_name" - PIVolumeGroupsVolumeIds = "pi_volume_ids" PIVolumeGroupConsistencyGroupName = "pi_consistency_group_name" PIVolumeGroupID = "pi_volume_group_id" PIVolumeGroupAction = "pi_volume_group_action" @@ -104,37 +357,10 @@ const ( // Cloud Connections PICloudConnectionTransitEnabled = "pi_cloud_connection_transit_enabled" - // Shared Processor Pool - Arg_SharedProcessorPoolName = "pi_shared_processor_pool_name" - Arg_SharedProcessorPoolHostGroup = "pi_shared_processor_pool_host_group" - Arg_SharedProcessorPoolPlacementGroupID = "pi_shared_processor_pool_placement_group_id" - Arg_SharedProcessorPoolReservedCores = "pi_shared_processor_pool_reserved_cores" - Arg_SharedProcessorPoolID = "pi_shared_processor_pool_id" - Attr_SharedProcessorPoolID = "shared_processor_pool_id" - Attr_SharedProcessorPoolName = "name" - Attr_SharedProcessorPoolReservedCores = "reserved_cores" - Attr_SharedProcessorPoolAvailableCores = "available_cores" - Attr_SharedProcessorPoolAllocatedCores = "allocated_cores" - Attr_SharedProcessorPoolHostID = "host_id" - Attr_SharedProcessorPoolStatus = "status" - Attr_SharedProcessorPoolStatusDetail = "status_detail" - Attr_SharedProcessorPoolPlacementGroups = "spp_placement_groups" - Attr_SharedProcessorPoolInstances = "instances" - Attr_SharedProcessorPoolInstanceCpus = "cpus" - Attr_SharedProcessorPoolInstanceUncapped = "uncapped" - Attr_SharedProcessorPoolInstanceAvailabilityZone = "availability_zone" - Attr_SharedProcessorPoolInstanceId = "id" - Attr_SharedProcessorPoolInstanceMemory = "memory" - Attr_SharedProcessorPoolInstanceName = "name" - Attr_SharedProcessorPoolInstanceStatus = "status" - Attr_SharedProcessorPoolInstanceVcpus = "vcpus" - // SPP Placement Group - Arg_SPPPlacementGroupName = "pi_spp_placement_group_name" - Arg_SPPPlacementGroupPolicy = "pi_spp_placement_group_policy" + Attr_SPPPlacementGroupID = "spp_placement_group_id" Attr_SPPPlacementGroupMembers = "members" - Arg_SPPPlacementGroupID = "pi_spp_placement_group_id" Attr_SPPPlacementGroupPolicy = "policy" Attr_SPPPlacementGroupName = "name" @@ -149,6 +375,10 @@ const ( SctionStart = "start" SctionStop = "stop" + // volume clone task status + VolumeCloneCompleted = "completed" + VolumeCloneRunning = "running" + // Workspaces Attr_WorkspaceCapabilities = "pi_workspace_capabilities" Attr_WorkspaceDetails = "pi_workspace_details" diff --git a/ibm/service/power/resource_ibm_pi_image.go b/ibm/service/power/resource_ibm_pi_image.go index 3f83a8df26..f2f5ab423c 100644 --- a/ibm/service/power/resource_ibm_pi_image.go +++ b/ibm/service/power/resource_ibm_pi_image.go @@ -113,13 +113,13 @@ func ResourceIBMPIImage() *schema.Resource { helpers.PIImageStorageType: { Type: schema.TypeString, Optional: true, - Description: "Type of storage", + Description: "Type of storage; If not specified, default is tier3", ForceNew: true, }, helpers.PIImageStoragePool: { Type: schema.TypeString, Optional: true, - Description: "Storage pool where the image will be loaded, if provided then pi_image_storage_type and pi_affinity_policy will be ignored", + Description: "Storage pool where the image will be loaded, if provided then pi_affinity_policy will be ignored", ForceNew: true, }, PIAffinityPolicy: { diff --git a/ibm/service/power/resource_ibm_pi_instance.go b/ibm/service/power/resource_ibm_pi_instance.go index 610bf44813..d48834dc25 100644 --- a/ibm/service/power/resource_ibm_pi_instance.go +++ b/ibm/service/power/resource_ibm_pi_instance.go @@ -83,31 +83,30 @@ func ResourceIBMPIInstance() *schema.Resource { Description: "Maximum memory size", }, helpers.PIInstanceVolumeIds: { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - DiffSuppressFunc: flex.ApplyOnce, - Description: "List of PI volumes", + Type: schema.TypeSet, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of PI volumes", }, - helpers.PIInstanceUserData: { Type: schema.TypeString, + ForceNew: true, Optional: true, Description: "Base64 encoded data to be passed in for invoking a cloud init script", }, - helpers.PIInstanceStorageType: { Type: schema.TypeString, Optional: true, Computed: true, - Description: "Storage type for server deployment", + Description: "Storage type for server deployment; if pi_storage_type is not provided the storage type will default to tier3", }, PIInstanceStoragePool: { Type: schema.TypeString, Optional: true, Computed: true, - Description: "Storage Pool for server deployment; if provided then pi_affinity_policy and pi_storage_type will be ignored", + Description: "Storage Pool for server deployment; if provided then pi_storage_pool_affinity will be ignored; Only valid when you deploy one of the IBM supplied stock images. Storage pool for a custom image (an imported image or an image that is created from a VM capture) defaults to the storage pool the image was created in", }, PIAffinityPolicy: { Type: schema.TypeString, @@ -154,10 +153,10 @@ func ResourceIBMPIInstance() *schema.Resource { Description: "Indicates if all volumes attached to the server must reside in the same storage pool", }, PIInstanceNetwork: { - Type: schema.TypeList, - Required: true, - DiffSuppressFunc: flex.ApplyOnce, - Description: "List of one or more networks to attach to the instance", + Type: schema.TypeList, + ForceNew: true, + Required: true, + Description: "List of one or more networks to attach to the instance", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ip_address": { @@ -190,13 +189,14 @@ func ResourceIBMPIInstance() *schema.Resource { }, helpers.PIPlacementGroupID: { Type: schema.TypeString, + ForceNew: true, Optional: true, Description: "Placement group ID", }, Arg_PIInstanceSharedProcessorPool: { Type: schema.TypeString, - Optional: true, ForceNew: true, + Optional: true, ConflictsWith: []string{PISAPInstanceProfileID}, Description: "Shared Processor Pool the instance is deployed on", }, @@ -247,10 +247,10 @@ func ResourceIBMPIInstance() *schema.Resource { Description: "Instance processor type", }, helpers.PIInstanceSSHKeyName: { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: flex.ApplyOnce, - Description: "SSH key name", + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "SSH key name", }, helpers.PIInstanceMemory: { Type: schema.TypeFloat, @@ -261,6 +261,7 @@ func ResourceIBMPIInstance() *schema.Resource { }, PIInstanceDeploymentType: { Type: schema.TypeString, + ForceNew: true, Optional: true, Description: "Custom Deployment Type Information", }, @@ -272,23 +273,27 @@ func ResourceIBMPIInstance() *schema.Resource { }, PISAPInstanceDeploymentType: { Type: schema.TypeString, + ForceNew: true, Optional: true, Description: "Custom SAP Deployment Type Information", }, helpers.PIInstanceSystemType: { Type: schema.TypeString, + ForceNew: true, Optional: true, Computed: true, Description: "PI Instance system type", }, helpers.PIInstanceReplicants: { Type: schema.TypeInt, + ForceNew: true, Optional: true, Default: 1, Description: "PI Instance replicas count", }, helpers.PIInstanceReplicationPolicy: { Type: schema.TypeString, + ForceNew: true, Optional: true, ValidateFunc: validate.ValidateAllowedStringValues([]string{"affinity", "anti-affinity", "none"}), Default: "none", @@ -296,6 +301,7 @@ func ResourceIBMPIInstance() *schema.Resource { }, helpers.PIInstanceReplicationScheme: { Type: schema.TypeString, + ForceNew: true, Optional: true, ValidateFunc: validate.ValidateAllowedStringValues([]string{"prefix", "suffix"}), Default: "suffix", diff --git a/ibm/service/power/resource_ibm_pi_instance_console_language.go b/ibm/service/power/resource_ibm_pi_instance_console_language.go index a825760d04..4f065a0a36 100644 --- a/ibm/service/power/resource_ibm_pi_instance_console_language.go +++ b/ibm/service/power/resource_ibm_pi_instance_console_language.go @@ -18,10 +18,6 @@ import ( "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" ) -const ( - PIConsoleLanguageCode = "pi_language_code" -) - func ResourceIBMPIInstanceConsoleLanguage() *schema.Resource { return &schema.Resource{ CreateContext: resourceIBMPIInstanceConsoleLanguageCreate, @@ -93,7 +89,7 @@ func resourceIBMPIInstanceConsoleLanguageUpdate(ctx context.Context, d *schema.R return diag.FromErr(err) } - if d.HasChange(ConsoleLanguageCode) { + if d.HasChange(PIConsoleLanguageCode) { cloudInstanceID := d.Get(helpers.PICloudInstanceId).(string) instanceName := d.Get(helpers.PIInstanceName).(string) code := d.Get(PIConsoleLanguageCode).(string) diff --git a/ibm/service/power/resource_ibm_pi_instance_test.go b/ibm/service/power/resource_ibm_pi_instance_test.go index 9e5555fadd..d28e294322 100644 --- a/ibm/service/power/resource_ibm_pi_instance_test.go +++ b/ibm/service/power/resource_ibm_pi_instance_test.go @@ -41,6 +41,7 @@ func testAccCheckIBMPIInstanceConfig(name, instanceHealthStatus string) string { pi_volume_name = "%[2]s" pi_volume_shareable = true pi_volume_pool = data.ibm_pi_image.power_image.storage_pool + pi_volume_type = "%[6]s" pi_cloud_instance_id = "%[1]s" } resource "ibm_pi_instance" "power_instance" { @@ -53,13 +54,14 @@ func testAccCheckIBMPIInstanceConfig(name, instanceHealthStatus string) string { pi_sys_type = "s922" pi_cloud_instance_id = "%[1]s" pi_storage_pool = data.ibm_pi_image.power_image.storage_pool + pi_storage_type = "%[6]s" pi_health_status = "%[5]s" pi_volume_ids = [ibm_pi_volume.power_volume.volume_id] pi_network { network_id = data.ibm_pi_network.power_networks.id } } - `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus) + `, acc.Pi_cloud_instance_id, name, acc.Pi_image, acc.Pi_network_name, instanceHealthStatus, acc.PiStorageType) } func testAccCheckIBMPIInstanceUserDataConfig(name, instanceHealthStatus string) string { diff --git a/ibm/service/power/resource_ibm_pi_key.go b/ibm/service/power/resource_ibm_pi_key.go index 3f305e4da2..be0c542be2 100644 --- a/ibm/service/power/resource_ibm_pi_key.go +++ b/ibm/service/power/resource_ibm_pi_key.go @@ -50,7 +50,7 @@ func ResourceIBMPIKey() *schema.Resource { }, // Attributes - Attr_KeyCreationDate: { + Attr_CreationDate: { Type: schema.TypeString, Computed: true, Description: "Date of SSH Key creation", @@ -129,7 +129,7 @@ func resourceIBMPIKeyRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set(Attr_KeyName, sshkeydata.Name) d.Set(Attr_KeyID, sshkeydata.Name) d.Set(Attr_Key, sshkeydata.SSHKey) - d.Set(Attr_KeyCreationDate, sshkeydata.CreationDate.String()) + d.Set(Attr_CreationDate, sshkeydata.CreationDate.String()) return nil } diff --git a/ibm/service/power/resource_ibm_pi_network.go b/ibm/service/power/resource_ibm_pi_network.go index 49e85d9aa5..d6ad8826c6 100644 --- a/ibm/service/power/resource_ibm_pi_network.go +++ b/ibm/service/power/resource_ibm_pi_network.go @@ -75,19 +75,19 @@ func ResourceIBMPINetwork() *schema.Resource { Description: "PI network gateway", }, helpers.PINetworkJumbo: { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "deprecated use pi_network_mtu instead", - ExactlyOneOf: []string{helpers.PINetworkMtu, helpers.PINetworkJumbo}, - Description: "PI network enable MTU Jumbo option", + Type: schema.TypeBool, + Optional: true, + Computed: true, + Deprecated: "deprecated use pi_network_mtu instead", + ConflictsWith: []string{helpers.PINetworkMtu}, + Description: "PI network enable MTU Jumbo option", }, helpers.PINetworkMtu: { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ExactlyOneOf: []string{helpers.PINetworkMtu, helpers.PINetworkJumbo}, - Description: "PI Maximum Transmission Unit", + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{helpers.PINetworkJumbo}, + Description: "PI Maximum Transmission Unit", }, helpers.PINetworkAccessConfig: { Type: schema.TypeString, diff --git a/ibm/service/power/resource_ibm_pi_volume.go b/ibm/service/power/resource_ibm_pi_volume.go index 9ceae9e174..63f8d4e1f0 100644 --- a/ibm/service/power/resource_ibm_pi_volume.go +++ b/ibm/service/power/resource_ibm_pi_volume.go @@ -51,7 +51,7 @@ func ResourceIBMPIVolume() *schema.Resource { helpers.PIVolumeShareable: { Type: schema.TypeBool, Optional: true, - Description: "Flag to indicate if the volume can be shared across multiple instances?", + Description: "Flag to indicate if the volume can be shared across multiple instances.", }, helpers.PIVolumeSize: { Type: schema.TypeFloat, @@ -62,16 +62,16 @@ func ResourceIBMPIVolume() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validate.ValidateAllowedStringValues([]string{"ssd", "standard", "tier1", "tier3"}), + ValidateFunc: validate.ValidateAllowedStringValues([]string{"tier0", "tier1", "tier3", "tier5k"}), DiffSuppressFunc: flex.ApplyOnce, - Description: "Type of Disk, required if pi_affinity_policy and pi_volume_pool not provided, otherwise ignored", + Description: "Type of disk, if disk type is not provided the disk type will default to tier3", }, helpers.PIVolumePool: { Type: schema.TypeString, Optional: true, Computed: true, DiffSuppressFunc: flex.ApplyOnce, - Description: "Volume pool where the volume will be created; if provided then pi_volume_type and pi_affinity_policy values will be ignored", + Description: "Volume pool where the volume will be created; if provided then pi_affinity_policy values will be ignored", }, PIAffinityPolicy: { Type: schema.TypeString, @@ -184,6 +184,11 @@ func ResourceIBMPIVolume() *schema.Resource { Computed: true, Description: "Indicates master volume name", }, + "io_throttle_rate": { + Type: schema.TypeString, + Computed: true, + Description: "Amount of iops assigned to the volume", + }, }, } } @@ -320,6 +325,7 @@ func resourceIBMPIVolumeRead(ctx context.Context, d *schema.ResourceData, meta i } d.Set("wwn", vol.Wwn) d.Set(helpers.PICloudInstanceId, cloudInstanceID) + d.Set("io_throttle_rate", vol.IoThrottleRate) return nil } @@ -357,12 +363,14 @@ func resourceIBMPIVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta return diag.FromErr(err) } - if d.HasChange(helpers.PIReplicationEnabled) { - replicationEnabled := d.Get(helpers.PIReplicationEnabled).(bool) - volActionBody := models.VolumeAction{ - ReplicationEnabled: &replicationEnabled, + if d.HasChanges(helpers.PIReplicationEnabled, helpers.PIVolumeType) { + volActionBody := models.VolumeAction{} + if d.HasChange(helpers.PIReplicationEnabled) { + volActionBody.ReplicationEnabled = flex.PtrToBool(d.Get(helpers.PIReplicationEnabled).(bool)) + } + if d.HasChange(helpers.PIVolumeType) { + volActionBody.TargetStorageTier = flex.PtrToString(d.Get(helpers.PIVolumeType).(string)) } - err = client.VolumeAction(volumeID, &volActionBody) if err != nil { return diag.FromErr(err) diff --git a/ibm/service/power/resource_ibm_pi_volume_clone.go b/ibm/service/power/resource_ibm_pi_volume_clone.go new file mode 100644 index 0000000000..6d43f4efba --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_volume_clone.go @@ -0,0 +1,235 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power + +import ( + "context" + "fmt" + "log" + "time" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + "github.com/IBM-Cloud/power-go-client/helpers" + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceIBMPIVolumeClone() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceIBMPIVolumeCloneCreate, + ReadContext: resourceIBMPIVolumeCloneRead, + DeleteContext: resourceIBMPIVolumeCloneDelete, + Importer: &schema.ResourceImporter{}, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + Arg_CloudInstanceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The GUID of the service instance associated with an account.", + }, + PIVolumeCloneName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The base name of the newly cloned volume(s).", + }, + PIVolumeIds: { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: "List of volumes to be cloned.", + }, + PITargetStorageTier: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The storage tier for the cloned volume(s).", + }, + helpers.PIReplicationEnabled: { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Indicates whether the cloned volume should have replication enabled. If no value is provided, it will default to the replication status of the source volume(s).", + }, + + // Computed attributes + "task_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the volume clone task.", + }, + "cloned_volumes": clonedVolumesSchema(), + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: "The reason for the failure of the volume clone task.", + }, + "percent_complete": { + Type: schema.TypeInt, + Computed: true, + Description: "The completion percentage of the volume clone task.", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the volume clone task.", + }, + }, + } +} + +func resourceIBMPIVolumeCloneCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID := d.Get(Arg_CloudInstanceID).(string) + vcName := d.Get(PIVolumeCloneName).(string) + volids := flex.ExpandStringList((d.Get(PIVolumeIds).(*schema.Set)).List()) + + body := &models.VolumesCloneAsyncRequest{ + Name: &vcName, + VolumeIDs: volids, + } + + if v, ok := d.GetOk(PITargetStorageTier); ok { + body.TargetStorageTier = v.(string) + } + + if !d.GetRawConfig().GetAttr(helpers.PIReplicationEnabled).IsNull() { + body.TargetReplicationEnabled = flex.PtrToBool(d.Get(helpers.PIReplicationEnabled).(bool)) + } + + client := st.NewIBMPICloneVolumeClient(ctx, sess, cloudInstanceID) + volClone, err := client.Create(body) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, *volClone.CloneTaskID)) + + _, err = isWaitForIBMPIVolumeCloneCompletion(ctx, client, *volClone.CloneTaskID, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return diag.FromErr(err) + } + + return resourceIBMPIVolumeCloneRead(ctx, d, meta) +} + +func resourceIBMPIVolumeCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + sess, err := meta.(conns.ClientSession).IBMPISession() + if err != nil { + return diag.FromErr(err) + } + + cloudInstanceID, vcTaskID, err := splitID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + client := st.NewIBMPICloneVolumeClient(ctx, sess, cloudInstanceID) + volCloneTask, err := client.Get(vcTaskID) + if err != nil { + return diag.FromErr(err) + } + + d.Set("task_id", vcTaskID) + if volCloneTask.Status != nil { + d.Set("status", *volCloneTask.Status) + } + d.Set("failure_reason", volCloneTask.FailedReason) + if volCloneTask.PercentComplete != nil { + d.Set("percent_complete", *volCloneTask.PercentComplete) + } + d.Set("cloned_volumes", flattenClonedVolumes(volCloneTask.ClonedVolumes)) + + return nil +} + +func resourceIBMPIVolumeCloneDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + // There is no delete or unset concept for volume clone + d.SetId("") + return nil +} + +func flattenClonedVolumes(list []*models.ClonedVolume) (cloneVolumes []map[string]interface{}) { + if list != nil { + cloneVolumes := make([]map[string]interface{}, len(list)) + for i, data := range list { + l := map[string]interface{}{ + "clone_volume_id": data.ClonedVolumeID, + "source_volume_id": data.SourceVolumeID, + } + cloneVolumes[i] = l + } + return cloneVolumes + } + return +} + +func isWaitForIBMPIVolumeCloneCompletion(ctx context.Context, client *st.IBMPICloneVolumeClient, id string, timeout time.Duration) (interface{}, error) { + log.Printf("Waiting for Volume clone (%s) to be completed.", id) + + stateConf := &resource.StateChangeConf{ + Pending: []string{VolumeCloneRunning}, + Target: []string{VolumeCloneCompleted}, + Refresh: isIBMPIVolumeCloneRefreshFunc(client, id), + Delay: 10 * time.Second, + MinTimeout: 2 * time.Minute, + Timeout: timeout, + } + + return stateConf.WaitForStateContext(ctx) +} + +func isIBMPIVolumeCloneRefreshFunc(client *st.IBMPICloneVolumeClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + volClone, err := client.Get(id) + if err != nil { + return nil, "", err + } + + if *volClone.Status == VolumeCloneCompleted { + return volClone, VolumeCloneCompleted, nil + } + + return volClone, VolumeCloneRunning, nil + } +} + +func clonedVolumesSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The List of cloned volumes.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clone_volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the newly cloned volume.", + }, + "source_volume_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the source volume.", + }, + }, + }, + } +} diff --git a/ibm/service/power/resource_ibm_pi_volume_clone_test.go b/ibm/service/power/resource_ibm_pi_volume_clone_test.go new file mode 100644 index 0000000000..4a5da2446f --- /dev/null +++ b/ibm/service/power/resource_ibm_pi_volume_clone_test.go @@ -0,0 +1,99 @@ +// Copyright IBM Corp. 2024 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package power_test + +import ( + "context" + "errors" + "fmt" + "testing" + + st "github.com/IBM-Cloud/power-go-client/clients/instance" + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccIBMPIVolumeClone(t *testing.T) { + resVolumeClone := "ibm_pi_volume_clone.power_volume_clone" + name := fmt.Sprintf("tf-pi-volume-clone-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeCloneConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIVolumeCloneExists(resVolumeClone), + resource.TestCheckResourceAttrSet(resVolumeClone, "id"), + resource.TestCheckResourceAttrSet(resVolumeClone, "status"), + resource.TestCheckResourceAttr(resVolumeClone, "status", "completed"), + resource.TestCheckResourceAttrSet(resVolumeClone, "percent_complete"), + resource.TestCheckResourceAttr(resVolumeClone, "percent_complete", "100"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeCloneExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + sess, err := acc.TestAccProvider.Meta().(conns.ClientSession).IBMPISession() + if err != nil { + return err + } + + ids, err := flex.IdParts(rs.Primary.ID) + if err != nil { + return err + } + cloudInstanceID, vcTaskID := ids[0], ids[1] + client := st.NewIBMPICloneVolumeClient(context.Background(), sess, cloudInstanceID) + + _, err = client.Get(vcTaskID) + if err != nil { + return err + } + return nil + } +} + +func testAccCheckIBMPIVolumeCloneConfig(name string) string { + return volumesCloneConfig(name, true) + fmt.Sprintf(` + resource "ibm_pi_volume_clone" "power_volume_clone" { + pi_cloud_instance_id = "%[1]s" + pi_volume_clone_name = "%[2]s" + pi_volume_ids = ibm_pi_volume.power_volume.*.volume_id + pi_target_storage_tier = "%[3]s" + pi_replication_enabled = %[4]v + } + `, acc.Pi_cloud_instance_id, name, acc.Pi_target_storage_tier, false) +} + +func volumesCloneConfig(name string, volumeReplicationEnabled bool) string { + return fmt.Sprintf(` + resource "ibm_pi_volume" "power_volume" { + count = 2 + pi_volume_size = 2 + pi_volume_name = "%[1]s-${count.index}" + pi_volume_pool = "%[3]s" + pi_cloud_instance_id = "%[2]s" + pi_replication_enabled = %[4]v + } + `, name, acc.Pi_cloud_instance_id, acc.PiStoragePool, volumeReplicationEnabled) +} diff --git a/ibm/service/power/resource_ibm_pi_volume_group.go b/ibm/service/power/resource_ibm_pi_volume_group.go index 6e8097b835..1b44814b70 100644 --- a/ibm/service/power/resource_ibm_pi_volume_group.go +++ b/ibm/service/power/resource_ibm_pi_volume_group.go @@ -52,7 +52,7 @@ func ResourceIBMPIVolumeGroup() *schema.Resource { Description: "The name of consistency group at storage controller level", ConflictsWith: []string{PIVolumeGroupName}, }, - PIVolumeGroupsVolumeIds: { + PIVolumeIds: { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -98,7 +98,7 @@ func resourceIBMPIVolumeGroupCreate(ctx context.Context, d *schema.ResourceData, Name: vgName, } - volids := flex.ExpandStringList((d.Get(PIVolumeGroupsVolumeIds).(*schema.Set)).List()) + volids := flex.ExpandStringList((d.Get(PIVolumeIds).(*schema.Set)).List()) body.VolumeIDs = volids if v, ok := d.GetOk(PIVolumeGroupConsistencyGroupName); ok { @@ -144,7 +144,7 @@ func resourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set("consistency_group_name", vg.ConsistencyGroupName) d.Set("replication_status", vg.ReplicationStatus) d.Set(PIVolumeGroupName, vg.Name) - d.Set(PIVolumeGroupsVolumeIds, vg.VolumeIDs) + d.Set(PIVolumeIds, vg.VolumeIDs) d.Set("status_description_errors", flattenVolumeGroupStatusDescription(vg.StatusDescription.Errors)) return nil @@ -163,8 +163,8 @@ func resourceIBMPIVolumeGroupUpdate(ctx context.Context, d *schema.ResourceData, } client := st.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID) - if d.HasChanges(PIVolumeGroupsVolumeIds) { - old, new := d.GetChange(PIVolumeGroupsVolumeIds) + if d.HasChanges(PIVolumeIds) { + old, new := d.GetChange(PIVolumeIds) oldList := old.(*schema.Set) newList := new.(*schema.Set) body := &models.VolumeGroupUpdate{ @@ -196,7 +196,7 @@ func resourceIBMPIVolumeGroupDelete(ctx context.Context, d *schema.ResourceData, client := st.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID) - volids := flex.ExpandStringList((d.Get(PIVolumeGroupsVolumeIds).(*schema.Set)).List()) + volids := flex.ExpandStringList((d.Get(PIVolumeIds).(*schema.Set)).List()) if len(volids) > 0 { body := &models.VolumeGroupUpdate{ RemoveVolumes: volids, diff --git a/ibm/service/power/resource_ibm_pi_volume_group_action.go b/ibm/service/power/resource_ibm_pi_volume_group_action.go index 83f636eb33..6c7e7a2b4a 100644 --- a/ibm/service/power/resource_ibm_pi_volume_group_action.go +++ b/ibm/service/power/resource_ibm_pi_volume_group_action.go @@ -176,7 +176,7 @@ func resourceIBMPIVolumeGroupActionRead(ctx context.Context, d *schema.ResourceD } func resourceIBMPIVolumeGroupActionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // There is no delete or unset concept for instance action + // There is no delete or unset concept for volume group action d.SetId("") return nil } diff --git a/ibm/service/power/resource_ibm_pi_volume_group_test.go b/ibm/service/power/resource_ibm_pi_volume_group_test.go index 5b05f8a096..5d55c1981b 100644 --- a/ibm/service/power/resource_ibm_pi_volume_group_test.go +++ b/ibm/service/power/resource_ibm_pi_volume_group_test.go @@ -146,9 +146,9 @@ func volumeConfig(name, cloud_instance_id string) string { pi_volume_size = 2 pi_volume_name = "%[1]s-${count.index}" pi_volume_shareable = true - pi_volume_pool = "Tier1-Flash-1" + pi_volume_pool = "%[3]s" pi_cloud_instance_id = "%[2]s" pi_replication_enabled = true } - `, name, cloud_instance_id) + `, name, cloud_instance_id, acc.PiStoragePool) } diff --git a/ibm/service/power/resource_ibm_pi_volume_test.go b/ibm/service/power/resource_ibm_pi_volume_test.go index df5119f75c..5622b183ec 100644 --- a/ibm/service/power/resource_ibm_pi_volume_test.go +++ b/ibm/service/power/resource_ibm_pi_volume_test.go @@ -214,6 +214,55 @@ func testAccCheckIBMPIVolumeGRSBasicConfig(name, piCloudInstanceId, piStoragePoo pi_volume_shareable = true pi_cloud_instance_id = "%[2]s" pi_replication_enabled = %[4]v + pi_volume_type = "tier3" } `, name, piCloudInstanceId, piStoragePool, replicationEnabled) } + +// TestAccIBMPIVolumeUpdate test the volume update +func TestAccIBMPIVolumeUpdate(t *testing.T) { + name := fmt.Sprintf("tf-pi-volume-%d", acctest.RandIntRange(10, 100)) + sType := acc.PiStorageType // tier 3 + sTypeUpdate := "tier1" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMPIVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMPIVolumeUpdateStorageConfig(name, sType), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIVolumeExists("ibm_pi_volume.power_volume"), + resource.TestCheckResourceAttr( + "ibm_pi_volume.power_volume", "pi_volume_name", name), + ), + }, + { + Config: testAccCheckIBMPIVolumeUpdateStorageConfig(name, sTypeUpdate), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMPIVolumeExists("ibm_pi_volume.power_volume"), + resource.TestCheckResourceAttr( + "ibm_pi_volume.power_volume", "pi_volume_name", name), + resource.TestCheckResourceAttrSet("ibm_pi_volume.power_volume", "pi_volume_type"), + ), + }, + }, + }) +} + +func testAccCheckIBMPIVolumeUpdateStorageConfig(name, piStorageType string) string { + return testAccCheckIBMPIVolumeUpdateBasicConfig(name, acc.Pi_cloud_instance_id, acc.PiStoragePool, piStorageType) +} + +func testAccCheckIBMPIVolumeUpdateBasicConfig(name, piCloudInstanceId, piStoragePool, piStorageType string) string { + return fmt.Sprintf(` + resource "ibm_pi_volume" "power_volume"{ + pi_volume_size = 20 + pi_volume_name = "%[1]s" + pi_volume_pool = "%[3]s" + pi_volume_shareable = true + pi_cloud_instance_id = "%[2]s" + pi_volume_type = "%[4]v" + } + `, name, piCloudInstanceId, piStoragePool, piStorageType) +} diff --git a/ibm/service/project/data_source_ibm_project.go b/ibm/service/project/data_source_ibm_project.go index 1ba775f958..0d2fd70942 100644 --- a/ibm/service/project/data_source_ibm_project.go +++ b/ibm/service/project/data_source_ibm_project.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project @@ -85,6 +85,11 @@ func DataSourceIbmProject() *schema.Resource { Computed: true, Description: "The project status value.", }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A URL.", + }, "resource_group": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -137,15 +142,15 @@ func DataSourceIbmProject() *schema.Resource { Description: "The name and description of a project configuration.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The configuration name. It is unique within the account across projects and regions.", + Description: "A project configuration description.", }, - "description": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "A project configuration description.", + Description: "The configuration name. It is unique within the account across projects and regions.", }, }, }, @@ -188,6 +193,11 @@ func DataSourceIbmProject() *schema.Resource { }, }, }, + "deployment_model": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The configuration type.", + }, }, }, }, @@ -256,15 +266,15 @@ func DataSourceIbmProject() *schema.Resource { Description: "The environment definition used in the project collection.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The name of the environment. It is unique within the account across projects and regions.", + Description: "The description of the environment.", }, - "description": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The description of the environment.", + Description: "The name of the environment. It is unique within the account across projects and regions.", }, }, }, @@ -283,16 +293,16 @@ func DataSourceIbmProject() *schema.Resource { Computed: true, Description: "The name of the project. It is unique within the account across regions.", }, - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.", - }, "destroy_on_delete": &schema.Schema{ Type: schema.TypeBool, Computed: true, Description: "The policy that indicates whether the resources are destroyed or not when a project is deleted.", }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.", + }, }, }, }, @@ -356,6 +366,10 @@ func dataSourceIbmProjectRead(context context.Context, d *schema.ResourceData, m return diag.FromErr(fmt.Errorf("Error setting state: %s", err)) } + if err = d.Set("href", project.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + if err = d.Set("resource_group", project.ResourceGroup); err != nil { return diag.FromErr(fmt.Errorf("Error setting resource_group: %s", err)) } @@ -446,7 +460,7 @@ func dataSourceIbmProjectProjectConfigSummaryToMap(model *projectv1.ProjectConfi modelMap["created_at"] = model.CreatedAt.String() modelMap["modified_at"] = model.ModifiedAt.String() modelMap["href"] = model.Href - definitionMap, err := dataSourceIbmProjectProjectConfigDefinitionNameDescriptionToMap(model.Definition) + definitionMap, err := dataSourceIbmProjectProjectConfigSummaryDefinitionToMap(model.Definition) if err != nil { return modelMap, err } @@ -456,6 +470,9 @@ func dataSourceIbmProjectProjectConfigSummaryToMap(model *projectv1.ProjectConfi return modelMap, err } modelMap["project"] = []map[string]interface{}{projectMap} + if model.DeploymentModel != nil { + modelMap["deployment_model"] = model.DeploymentModel + } return modelMap, nil } @@ -467,14 +484,14 @@ func dataSourceIbmProjectProjectConfigVersionSummaryToMap(model *projectv1.Proje return modelMap, nil } -func dataSourceIbmProjectProjectConfigDefinitionNameDescriptionToMap(model *projectv1.ProjectConfigDefinitionNameDescription) (map[string]interface{}, error) { +func dataSourceIbmProjectProjectConfigSummaryDefinitionToMap(model *projectv1.ProjectConfigSummaryDefinition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Name != nil { - modelMap["name"] = model.Name - } if model.Description != nil { modelMap["description"] = model.Description } + if model.Name != nil { + modelMap["name"] = model.Name + } return modelMap, nil } @@ -507,7 +524,7 @@ func dataSourceIbmProjectProjectEnvironmentSummaryToMap(model *projectv1.Project modelMap["project"] = []map[string]interface{}{projectMap} modelMap["created_at"] = model.CreatedAt.String() modelMap["href"] = model.Href - definitionMap, err := dataSourceIbmProjectEnvironmentDefinitionNameDescriptionToMap(model.Definition) + definitionMap, err := dataSourceIbmProjectProjectEnvironmentSummaryDefinitionToMap(model.Definition) if err != nil { return modelMap, err } @@ -515,23 +532,19 @@ func dataSourceIbmProjectProjectEnvironmentSummaryToMap(model *projectv1.Project return modelMap, nil } -func dataSourceIbmProjectEnvironmentDefinitionNameDescriptionToMap(model *projectv1.EnvironmentDefinitionNameDescription) (map[string]interface{}, error) { +func dataSourceIbmProjectProjectEnvironmentSummaryDefinitionToMap(model *projectv1.ProjectEnvironmentSummaryDefinition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Name != nil { - modelMap["name"] = model.Name - } if model.Description != nil { modelMap["description"] = model.Description } + modelMap["name"] = model.Name return modelMap, nil } func dataSourceIbmProjectProjectDefinitionPropertiesToMap(model *projectv1.ProjectDefinitionProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) modelMap["name"] = model.Name - if model.Description != nil { - modelMap["description"] = model.Description - } modelMap["destroy_on_delete"] = model.DestroyOnDelete + modelMap["description"] = model.Description return modelMap, nil } diff --git a/ibm/service/project/data_source_ibm_project_config.go b/ibm/service/project/data_source_ibm_project_config.go index 8fadc48a9f..98ac04280c 100644 --- a/ibm/service/project/data_source_ibm_project_config.go +++ b/ibm/service/project/data_source_ibm_project_config.go @@ -1,11 +1,10 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project import ( "context" - "encoding/json" "fmt" "log" @@ -82,9 +81,12 @@ func DataSourceIbmProjectConfig() *schema.Resource { Description: "A short explanation of the output value.", }, "value": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeMap, Computed: true, Description: "Can be any value - a string, number, boolean, array, or object.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, }, }, @@ -295,52 +297,16 @@ func DataSourceIbmProjectConfig() *schema.Resource { Computed: true, Description: "The flag that indicates whether a configuration update is available.", }, - "definition": &schema.Schema{ - Type: schema.TypeList, + "href": &schema.Schema{ + Type: schema.TypeString, Computed: true, - Description: "The name and description of a project configuration.", + Description: "A URL.", + }, + "definition": &schema.Schema{ + Type: schema.TypeList, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The configuration name. It is unique within the account across projects and regions.", - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "A project configuration description.", - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The ID of the project environment.", - }, - "authorizations": &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Description: "The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "trusted_profile_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The trusted profile ID.", - }, - "method": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Description: "The authorization method. You can authorize by using a trusted profile or an API key in Secrets Manager.", - }, - "api_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Sensitive: true, - Description: "The IBM Cloud API Key.", - }, - }, - }, - }, "compliance_profile": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -380,6 +346,46 @@ func DataSourceIbmProjectConfig() *schema.Resource { Computed: true, Description: "A unique concatenation of catalogID.versionID that identifies the DA in the catalog. Either schematics.workspace_crn, definition.locator_id, or both must be specified.", }, + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A project configuration description.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The configuration name. It is unique within the account across projects and regions.", + }, + "environment_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The ID of the project environment.", + }, + "authorizations": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The trusted profile ID.", + }, + "method": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The authorization method. You can authorize by using a trusted profile or an API key in Secrets Manager.", + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "The IBM Cloud API Key.", + }, + }, + }, + }, "inputs": &schema.Schema{ Type: schema.TypeMap, Computed: true, @@ -396,6 +402,14 @@ func DataSourceIbmProjectConfig() *schema.Resource { Type: schema.TypeString, }, }, + "resource_crns": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The CRNs of resources associated with this configuration.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, }, }, @@ -430,10 +444,6 @@ func dataSourceIbmProjectConfigRead(context context.Context, d *schema.ResourceD return diag.FromErr(fmt.Errorf("Error setting is_draft: %s", err)) } - if err = d.Set("needs_attention_state", projectConfig.NeedsAttentionState); err != nil { - return diag.FromErr(fmt.Errorf("Error setting needs_attention_state: %s", err)) - } - if err = d.Set("created_at", flex.DateTimeToString(projectConfig.CreatedAt)); err != nil { return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) } @@ -492,6 +502,10 @@ func dataSourceIbmProjectConfigRead(context context.Context, d *schema.ResourceD return diag.FromErr(fmt.Errorf("Error setting update_available: %s", err)) } + if err = d.Set("href", projectConfig.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + definition := []map[string]interface{}{} if projectConfig.Definition != nil { modelMap, err := dataSourceIbmProjectConfigProjectConfigResponseDefinitionToMap(projectConfig.Definition) @@ -514,7 +528,11 @@ func dataSourceIbmProjectConfigOutputValueToMap(model *projectv1.OutputValue) (m modelMap["description"] = model.Description } if model.Value != nil { - modelMap["value"] = model.Value + value := make(map[string]interface{}) + for k, v := range model.Value { + value[k] = fmt.Sprintf("%v", v) + } + modelMap["value"] = value } return modelMap, nil } @@ -602,12 +620,113 @@ func dataSourceIbmProjectConfigScriptToMap(model *projectv1.Script) (map[string] return modelMap, nil } -func dataSourceIbmProjectConfigProjectConfigResponseDefinitionToMap(model *projectv1.ProjectConfigResponseDefinition) (map[string]interface{}, error) { +func dataSourceIbmProjectConfigProjectConfigResponseDefinitionToMap(model projectv1.ProjectConfigResponseDefinitionIntf) (map[string]interface{}, error) { + if _, ok := model.(*projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties); ok { + return dataSourceIbmProjectConfigProjectConfigResponseDefinitionDAConfigDefinitionPropertiesToMap(model.(*projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties)) + } else if _, ok := model.(*projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties); ok { + return dataSourceIbmProjectConfigProjectConfigResponseDefinitionResourceConfigDefinitionPropertiesToMap(model.(*projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties)) + } else if _, ok := model.(*projectv1.ProjectConfigResponseDefinition); ok { + modelMap := make(map[string]interface{}) + model := model.(*projectv1.ProjectConfigResponseDefinition) + if model.ComplianceProfile != nil { + complianceProfileMap, err := dataSourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) + if err != nil { + return modelMap, err + } + modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} + } + if model.LocatorID != nil { + modelMap["locator_id"] = model.LocatorID + } + if model.Description != nil { + modelMap["description"] = model.Description + } + modelMap["name"] = model.Name + if model.EnvironmentID != nil { + modelMap["environment_id"] = model.EnvironmentID + } + if model.Authorizations != nil { + authorizationsMap, err := dataSourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) + if err != nil { + return modelMap, err + } + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + } + if model.Inputs != nil { + inputs := make(map[string]interface{}) + for k, v := range model.Inputs { + inputs[k] = fmt.Sprintf("%v", v) + } + modelMap["inputs"] = inputs + } + if model.Settings != nil { + settings := make(map[string]interface{}) + for k, v := range model.Settings { + settings[k] = fmt.Sprintf("%v", v) + } + modelMap["settings"] = settings + } + if model.ResourceCrns != nil { + modelMap["resource_crns"] = model.ResourceCrns + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized projectv1.ProjectConfigResponseDefinitionIntf subtype encountered") + } +} + +func dataSourceIbmProjectConfigProjectComplianceProfileToMap(model *projectv1.ProjectComplianceProfile) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = model.ID + } + if model.InstanceID != nil { + modelMap["instance_id"] = model.InstanceID + } + if model.InstanceLocation != nil { + modelMap["instance_location"] = model.InstanceLocation + } + if model.AttachmentID != nil { + modelMap["attachment_id"] = model.AttachmentID + } + if model.ProfileName != nil { + modelMap["profile_name"] = model.ProfileName + } + return modelMap, nil +} + +func dataSourceIbmProjectConfigProjectConfigAuthToMap(model *projectv1.ProjectConfigAuth) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name + if model.TrustedProfileID != nil { + modelMap["trusted_profile_id"] = model.TrustedProfileID + } + if model.Method != nil { + modelMap["method"] = model.Method + } + if model.ApiKey != nil { + modelMap["api_key"] = model.ApiKey + } + return modelMap, nil +} + +func dataSourceIbmProjectConfigProjectConfigResponseDefinitionDAConfigDefinitionPropertiesToMap(model *projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ComplianceProfile != nil { + complianceProfileMap, err := dataSourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) + if err != nil { + return modelMap, err + } + modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} + } + if model.LocatorID != nil { + modelMap["locator_id"] = model.LocatorID + } if model.Description != nil { modelMap["description"] = model.Description } + if model.Name != nil { + modelMap["name"] = model.Name + } if model.EnvironmentID != nil { modelMap["environment_id"] = model.EnvironmentID } @@ -618,69 +737,57 @@ func dataSourceIbmProjectConfigProjectConfigResponseDefinitionToMap(model *proje } modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} } - if model.ComplianceProfile != nil { - complianceProfileMap, err := dataSourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) - if err != nil { - return modelMap, err - } - modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} - } - modelMap["locator_id"] = model.LocatorID if model.Inputs != nil { inputs := make(map[string]interface{}) for k, v := range model.Inputs { - bytes, err := json.Marshal(v) - if err != nil { - return modelMap, err - } - inputs[k] = string(bytes) + inputs[k] = fmt.Sprintf("%v", v) } modelMap["inputs"] = inputs } if model.Settings != nil { settings := make(map[string]interface{}) for k, v := range model.Settings { - bytes, err := json.Marshal(v) - if err != nil { - return modelMap, err - } - settings[k] = string(bytes) + settings[k] = fmt.Sprintf("%v", v) } modelMap["settings"] = settings } return modelMap, nil } -func dataSourceIbmProjectConfigProjectConfigAuthToMap(model *projectv1.ProjectConfigAuth) (map[string]interface{}, error) { +func dataSourceIbmProjectConfigProjectConfigResponseDefinitionResourceConfigDefinitionPropertiesToMap(model *projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.TrustedProfileID != nil { - modelMap["trusted_profile_id"] = model.TrustedProfileID + if model.ResourceCrns != nil { + modelMap["resource_crns"] = model.ResourceCrns } - if model.Method != nil { - modelMap["method"] = model.Method + if model.Description != nil { + modelMap["description"] = model.Description } - if model.ApiKey != nil { - modelMap["api_key"] = model.ApiKey + if model.Name != nil { + modelMap["name"] = model.Name } - return modelMap, nil -} - -func dataSourceIbmProjectConfigProjectComplianceProfileToMap(model *projectv1.ProjectComplianceProfile) (map[string]interface{}, error) { - modelMap := make(map[string]interface{}) - if model.ID != nil { - modelMap["id"] = model.ID - } - if model.InstanceID != nil { - modelMap["instance_id"] = model.InstanceID + if model.EnvironmentID != nil { + modelMap["environment_id"] = model.EnvironmentID } - if model.InstanceLocation != nil { - modelMap["instance_location"] = model.InstanceLocation + if model.Authorizations != nil { + authorizationsMap, err := dataSourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) + if err != nil { + return modelMap, err + } + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} } - if model.AttachmentID != nil { - modelMap["attachment_id"] = model.AttachmentID + if model.Inputs != nil { + inputs := make(map[string]interface{}) + for k, v := range model.Inputs { + inputs[k] = fmt.Sprintf("%v", v) + } + modelMap["inputs"] = inputs } - if model.ProfileName != nil { - modelMap["profile_name"] = model.ProfileName + if model.Settings != nil { + settings := make(map[string]interface{}) + for k, v := range model.Settings { + settings[k] = fmt.Sprintf("%v", v) + } + modelMap["settings"] = settings } return modelMap, nil } diff --git a/ibm/service/project/data_source_ibm_project_config_test.go b/ibm/service/project/data_source_ibm_project_config_test.go index e009ba0704..3218341c74 100644 --- a/ibm/service/project/data_source_ibm_project_config_test.go +++ b/ibm/service/project/data_source_ibm_project_config_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test @@ -27,9 +27,10 @@ func TestAccIbmProjectConfigDataSourceBasic(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "is_draft"), resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "created_at"), resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "modified_at"), + resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "outputs.#"), resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "project.#"), resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "state"), - resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "update_available"), + resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "href"), resource.TestCheckResourceAttrSet("data.ibm_project_config.project_config_instance", "definition.#"), ), }, diff --git a/ibm/service/project/data_source_ibm_project_environment.go b/ibm/service/project/data_source_ibm_project_environment.go index dc095eea94..e3d317edb6 100644 --- a/ibm/service/project/data_source_ibm_project_environment.go +++ b/ibm/service/project/data_source_ibm_project_environment.go @@ -1,11 +1,10 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project import ( "context" - "encoding/json" "fmt" "log" @@ -85,21 +84,26 @@ func DataSourceIbmProjectEnvironment() *schema.Resource { Computed: true, Description: "A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.", }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A URL.", + }, "definition": &schema.Schema{ Type: schema.TypeList, Computed: true, Description: "The environment definition.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The name of the environment. It is unique within the account across projects and regions.", + Description: "The description of the environment.", }, - "description": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The description of the environment.", + Description: "The name of the environment. It is unique within the account across projects and regions.", }, "authorizations": &schema.Schema{ Type: schema.TypeList, @@ -218,6 +222,10 @@ func dataSourceIbmProjectEnvironmentRead(context context.Context, d *schema.Reso return diag.FromErr(fmt.Errorf("Error setting modified_at: %s", err)) } + if err = d.Set("href", environment.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } + definition := []map[string]interface{}{} if environment.Definition != nil { modelMap, err := dataSourceIbmProjectEnvironmentEnvironmentDefinitionRequiredPropertiesToMap(environment.Definition) @@ -254,10 +262,10 @@ func dataSourceIbmProjectEnvironmentProjectDefinitionReferenceToMap(model *proje func dataSourceIbmProjectEnvironmentEnvironmentDefinitionRequiredPropertiesToMap(model *projectv1.EnvironmentDefinitionRequiredProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name if model.Description != nil { modelMap["description"] = model.Description } + modelMap["name"] = model.Name if model.Authorizations != nil { authorizationsMap, err := dataSourceIbmProjectEnvironmentProjectConfigAuthToMap(model.Authorizations) if err != nil { @@ -268,11 +276,7 @@ func dataSourceIbmProjectEnvironmentEnvironmentDefinitionRequiredPropertiesToMap if model.Inputs != nil { inputs := make(map[string]interface{}) for k, v := range model.Inputs { - bytes, err := json.Marshal(v) - if err != nil { - return modelMap, err - } - inputs[k] = string(bytes) + inputs[k] = fmt.Sprintf("%v", v) } modelMap["inputs"] = inputs } diff --git a/ibm/service/project/data_source_ibm_project_environment_test.go b/ibm/service/project/data_source_ibm_project_environment_test.go index 7edd07a90e..b8226f3ee3 100644 --- a/ibm/service/project/data_source_ibm_project_environment_test.go +++ b/ibm/service/project/data_source_ibm_project_environment_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test @@ -26,6 +26,7 @@ func TestAccIbmProjectEnvironmentDataSourceBasic(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_project_environment.project_environment_instance", "project.#"), resource.TestCheckResourceAttrSet("data.ibm_project_environment.project_environment_instance", "created_at"), resource.TestCheckResourceAttrSet("data.ibm_project_environment.project_environment_instance", "modified_at"), + resource.TestCheckResourceAttrSet("data.ibm_project_environment.project_environment_instance", "href"), resource.TestCheckResourceAttrSet("data.ibm_project_environment.project_environment_instance", "definition.#"), ), }, diff --git a/ibm/service/project/data_source_ibm_project_test.go b/ibm/service/project/data_source_ibm_project_test.go index e8e3ade5c8..f0eec67569 100644 --- a/ibm/service/project/data_source_ibm_project_test.go +++ b/ibm/service/project/data_source_ibm_project_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test @@ -27,10 +27,14 @@ func TestAccIbmProjectDataSourceBasic(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "project_id"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "crn"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "created_at"), + resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "cumulative_needs_attention_view.#"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "location"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "resource_group_id"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "state"), + resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "href"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "resource_group"), + resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "configs.#"), + resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "environments.#"), resource.TestCheckResourceAttrSet("data.ibm_project.project_instance", "definition.#"), ), }, diff --git a/ibm/service/project/resource_ibm_project.go b/ibm/service/project/resource_ibm_project.go index 4e51734983..dfd7610586 100644 --- a/ibm/service/project/resource_ibm_project.go +++ b/ibm/service/project/resource_ibm_project.go @@ -1,11 +1,10 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project import ( "context" - "encoding/json" "fmt" "log" @@ -39,7 +38,7 @@ func ResourceIbmProject() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The resource group where the project's data and tools are created.", + Description: "The resource group name where the project's data and tools are created.", }, "definition": &schema.Schema{ Type: schema.TypeList, @@ -54,16 +53,16 @@ func ResourceIbmProject() *schema.Resource { Required: true, Description: "The name of the project. It is unique within the account across regions.", }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.", - }, "destroy_on_delete": &schema.Schema{ Type: schema.TypeBool, Required: true, Description: "The policy that indicates whether the resources are destroyed or not when a project is deleted.", }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.", + }, }, }, }, @@ -121,6 +120,11 @@ func ResourceIbmProject() *schema.Resource { Computed: true, Description: "The project status value.", }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A URL.", + }, "event_notifications_crn": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -168,15 +172,15 @@ func ResourceIbmProject() *schema.Resource { Description: "The name and description of a project configuration.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The configuration name. It is unique within the account across projects and regions.", + Description: "A project configuration description.", }, - "description": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "A project configuration description.", + Description: "The configuration name. It is unique within the account across projects and regions.", }, }, }, @@ -219,6 +223,11 @@ func ResourceIbmProject() *schema.Resource { }, }, }, + "deployment_model": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The configuration type.", + }, }, }, }, @@ -287,15 +296,15 @@ func ResourceIbmProject() *schema.Resource { Description: "The environment definition used in the project collection.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The name of the environment. It is unique within the account across projects and regions.", + Description: "The description of the environment.", }, - "description": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, - Description: "The description of the environment.", + Description: "The name of the environment. It is unique within the account across projects and regions.", }, }, }, @@ -349,6 +358,30 @@ func resourceIbmProjectCreate(context context.Context, d *schema.ResourceData, m createProjectOptions.SetDefinition(definitionModel) createProjectOptions.SetLocation(d.Get("location").(string)) createProjectOptions.SetResourceGroup(d.Get("resource_group").(string)) + if _, ok := d.GetOk("configs"); ok { + var configs []projectv1.ProjectConfigPrototype + for _, v := range d.Get("configs").([]interface{}) { + value := v.(map[string]interface{}) + configsItem, err := resourceIbmProjectMapToProjectConfigPrototype(value) + if err != nil { + return diag.FromErr(err) + } + configs = append(configs, *configsItem) + } + createProjectOptions.SetConfigs(configs) + } + if _, ok := d.GetOk("environments"); ok { + var environments []projectv1.EnvironmentPrototype + for _, v := range d.Get("environments").([]interface{}) { + value := v.(map[string]interface{}) + environmentsItem, err := resourceIbmProjectMapToEnvironmentPrototype(value) + if err != nil { + return diag.FromErr(err) + } + environments = append(environments, *environmentsItem) + } + createProjectOptions.SetEnvironments(environments) + } project, response, err := projectClient.CreateProjectWithContext(context, createProjectOptions) if err != nil { @@ -400,18 +433,16 @@ func resourceIbmProjectRead(context context.Context, d *schema.ResourceData, met if err = d.Set("created_at", flex.DateTimeToString(project.CreatedAt)); err != nil { return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) } - if !core.IsNil(project.CumulativeNeedsAttentionView) { - cumulativeNeedsAttentionView := []map[string]interface{}{} - for _, cumulativeNeedsAttentionViewItem := range project.CumulativeNeedsAttentionView { - cumulativeNeedsAttentionViewItemMap, err := resourceIbmProjectCumulativeNeedsAttentionToMap(&cumulativeNeedsAttentionViewItem) - if err != nil { - return diag.FromErr(err) - } - cumulativeNeedsAttentionView = append(cumulativeNeedsAttentionView, cumulativeNeedsAttentionViewItemMap) - } - if err = d.Set("cumulative_needs_attention_view", cumulativeNeedsAttentionView); err != nil { - return diag.FromErr(fmt.Errorf("Error setting cumulative_needs_attention_view: %s", err)) + cumulativeNeedsAttentionView := []map[string]interface{}{} + for _, cumulativeNeedsAttentionViewItem := range project.CumulativeNeedsAttentionView { + cumulativeNeedsAttentionViewItemMap, err := resourceIbmProjectCumulativeNeedsAttentionToMap(&cumulativeNeedsAttentionViewItem) + if err != nil { + return diag.FromErr(err) } + cumulativeNeedsAttentionView = append(cumulativeNeedsAttentionView, cumulativeNeedsAttentionViewItemMap) + } + if err = d.Set("cumulative_needs_attention_view", cumulativeNeedsAttentionView); err != nil { + return diag.FromErr(fmt.Errorf("Error setting cumulative_needs_attention_view: %s", err)) } if !core.IsNil(project.CumulativeNeedsAttentionViewError) { if err = d.Set("cumulative_needs_attention_view_error", project.CumulativeNeedsAttentionViewError); err != nil { @@ -424,36 +455,35 @@ func resourceIbmProjectRead(context context.Context, d *schema.ResourceData, met if err = d.Set("state", project.State); err != nil { return diag.FromErr(fmt.Errorf("Error setting state: %s", err)) } + if err = d.Set("href", project.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } if !core.IsNil(project.EventNotificationsCrn) { if err = d.Set("event_notifications_crn", project.EventNotificationsCrn); err != nil { return diag.FromErr(fmt.Errorf("Error setting event_notifications_crn: %s", err)) } } - if !core.IsNil(project.Configs) { - configs := []map[string]interface{}{} - for _, configsItem := range project.Configs { - configsItemMap, err := resourceIbmProjectProjectConfigSummaryToMap(&configsItem) - if err != nil { - return diag.FromErr(err) - } - configs = append(configs, configsItemMap) - } - if err = d.Set("configs", configs); err != nil { - return diag.FromErr(fmt.Errorf("Error setting configs: %s", err)) + configs := []map[string]interface{}{} + for _, configsItem := range project.Configs { + configsItemMap, err := resourceIbmProjectProjectConfigSummaryToMap(&configsItem) + if err != nil { + return diag.FromErr(err) } + configs = append(configs, configsItemMap) } - if !core.IsNil(project.Environments) { - environments := []map[string]interface{}{} - for _, environmentsItem := range project.Environments { - environmentsItemMap, err := resourceIbmProjectProjectEnvironmentSummaryToMap(&environmentsItem) - if err != nil { - return diag.FromErr(err) - } - environments = append(environments, environmentsItemMap) - } - if err = d.Set("environments", environments); err != nil { - return diag.FromErr(fmt.Errorf("Error setting environments: %s", err)) + if err = d.Set("configs", configs); err != nil { + return diag.FromErr(fmt.Errorf("Error setting configs: %s", err)) + } + environments := []map[string]interface{}{} + for _, environmentsItem := range project.Environments { + environmentsItemMap, err := resourceIbmProjectProjectEnvironmentSummaryToMap(&environmentsItem) + if err != nil { + return diag.FromErr(err) } + environments = append(environments, environmentsItemMap) + } + if err = d.Set("environments", environments); err != nil { + return diag.FromErr(fmt.Errorf("Error setting environments: %s", err)) } return nil @@ -515,18 +545,18 @@ func resourceIbmProjectDelete(context context.Context, d *schema.ResourceData, m func resourceIbmProjectMapToProjectPrototypeDefinition(modelMap map[string]interface{}) (*projectv1.ProjectPrototypeDefinition, error) { model := &projectv1.ProjectPrototypeDefinition{} model.Name = core.StringPtr(modelMap["name"].(string)) - if modelMap["description"] != nil && modelMap["description"].(string) != "" { - model.Description = core.StringPtr(modelMap["description"].(string)) - } if modelMap["destroy_on_delete"] != nil { model.DestroyOnDelete = core.BoolPtr(modelMap["destroy_on_delete"].(bool)) } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } return model, nil } func resourceIbmProjectMapToProjectConfigPrototype(modelMap map[string]interface{}) (*projectv1.ProjectConfigPrototype, error) { model := &projectv1.ProjectConfigPrototype{} - DefinitionModel, err := resourceIbmProjectMapToProjectConfigPrototypeDefinitionBlock(modelMap["definition"].([]interface{})[0].(map[string]interface{})) + DefinitionModel, err := resourceIbmProjectMapToProjectConfigDefinitionBlockPrototype(modelMap["definition"].([]interface{})[0].(map[string]interface{})) if err != nil { return model, err } @@ -541,12 +571,22 @@ func resourceIbmProjectMapToProjectConfigPrototype(modelMap map[string]interface return model, nil } -func resourceIbmProjectMapToProjectConfigPrototypeDefinitionBlock(modelMap map[string]interface{}) (*projectv1.ProjectConfigPrototypeDefinitionBlock, error) { - model := &projectv1.ProjectConfigPrototypeDefinitionBlock{} - model.Name = core.StringPtr(modelMap["name"].(string)) +func resourceIbmProjectMapToProjectConfigDefinitionBlockPrototype(modelMap map[string]interface{}) (projectv1.ProjectConfigDefinitionBlockPrototypeIntf, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototype{} + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel + } + if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { + model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) + } if modelMap["description"] != nil && modelMap["description"].(string) != "" { model.Description = core.StringPtr(modelMap["description"].(string)) } + model.Name = core.StringPtr(modelMap["name"].(string)) if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) } @@ -557,27 +597,38 @@ func resourceIbmProjectMapToProjectConfigPrototypeDefinitionBlock(modelMap map[s } model.Authorizations = AuthorizationsModel } - if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { - ComplianceProfileModel, err := resourceIbmProjectMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) - if err != nil { - return model, err + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) } - model.ComplianceProfile = ComplianceProfileModel + model.ResourceCrns = resourceCrns } - if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { - model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) + return model, nil +} + +func resourceIbmProjectMapToProjectComplianceProfile(modelMap map[string]interface{}) (*projectv1.ProjectComplianceProfile, error) { + model := &projectv1.ProjectComplianceProfile{} + if modelMap["id"] != nil && modelMap["id"].(string) != "" { + model.ID = core.StringPtr(modelMap["id"].(string)) } - if modelMap["inputs"] != nil { - bytes, _ := json.Marshal(modelMap["inputs"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Inputs = newMap + if modelMap["instance_id"] != nil && modelMap["instance_id"].(string) != "" { + model.InstanceID = core.StringPtr(modelMap["instance_id"].(string)) } - if modelMap["settings"] != nil { - bytes, _ := json.Marshal(modelMap["settings"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Settings = newMap + if modelMap["instance_location"] != nil && modelMap["instance_location"].(string) != "" { + model.InstanceLocation = core.StringPtr(modelMap["instance_location"].(string)) + } + if modelMap["attachment_id"] != nil && modelMap["attachment_id"].(string) != "" { + model.AttachmentID = core.StringPtr(modelMap["attachment_id"].(string)) + } + if modelMap["profile_name"] != nil && modelMap["profile_name"].(string) != "" { + model.ProfileName = core.StringPtr(modelMap["profile_name"].(string)) } return model, nil } @@ -596,22 +647,73 @@ func resourceIbmProjectMapToProjectConfigAuth(modelMap map[string]interface{}) ( return model, nil } -func resourceIbmProjectMapToProjectComplianceProfile(modelMap map[string]interface{}) (*projectv1.ProjectComplianceProfile, error) { - model := &projectv1.ProjectComplianceProfile{} - if modelMap["id"] != nil && modelMap["id"].(string) != "" { - model.ID = core.StringPtr(modelMap["id"].(string)) +func resourceIbmProjectMapToProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties{} + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel } - if modelMap["instance_id"] != nil && modelMap["instance_id"].(string) != "" { - model.InstanceID = core.StringPtr(modelMap["instance_id"].(string)) + if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { + model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) } - if modelMap["instance_location"] != nil && modelMap["instance_location"].(string) != "" { - model.InstanceLocation = core.StringPtr(modelMap["instance_location"].(string)) + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) } - if modelMap["attachment_id"] != nil && modelMap["attachment_id"].(string) != "" { - model.AttachmentID = core.StringPtr(modelMap["attachment_id"].(string)) + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) } - if modelMap["profile_name"] != nil && modelMap["profile_name"].(string) != "" { - model.ProfileName = core.StringPtr(modelMap["profile_name"].(string)) + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + return model, nil +} + +func resourceIbmProjectMapToProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties{} + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) + } + model.ResourceCrns = resourceCrns + } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) } return model, nil } @@ -624,27 +726,61 @@ func resourceIbmProjectMapToSchematicsWorkspace(modelMap map[string]interface{}) return model, nil } +func resourceIbmProjectMapToEnvironmentPrototype(modelMap map[string]interface{}) (*projectv1.EnvironmentPrototype, error) { + model := &projectv1.EnvironmentPrototype{} + DefinitionModel, err := resourceIbmProjectMapToEnvironmentDefinitionRequiredProperties(modelMap["definition"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Definition = DefinitionModel + return model, nil +} + +func resourceIbmProjectMapToEnvironmentDefinitionRequiredProperties(modelMap map[string]interface{}) (*projectv1.EnvironmentDefinitionRequiredProperties, error) { + model := &projectv1.EnvironmentDefinitionRequiredProperties{} + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } + model.Name = core.StringPtr(modelMap["name"].(string)) + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel + } + return model, nil +} + func resourceIbmProjectMapToProjectPatchDefinitionBlock(modelMap map[string]interface{}) (*projectv1.ProjectPatchDefinitionBlock, error) { model := &projectv1.ProjectPatchDefinitionBlock{} if modelMap["name"] != nil && modelMap["name"].(string) != "" { model.Name = core.StringPtr(modelMap["name"].(string)) } - if modelMap["description"] != nil && modelMap["description"].(string) != "" { - model.Description = core.StringPtr(modelMap["description"].(string)) - } if modelMap["destroy_on_delete"] != nil { model.DestroyOnDelete = core.BoolPtr(modelMap["destroy_on_delete"].(bool)) } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } return model, nil } func resourceIbmProjectProjectDefinitionPropertiesToMap(model *projectv1.ProjectDefinitionProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) modelMap["name"] = model.Name - if model.Description != nil { - modelMap["description"] = model.Description - } modelMap["destroy_on_delete"] = model.DestroyOnDelete + modelMap["description"] = model.Description return modelMap, nil } @@ -687,7 +823,7 @@ func resourceIbmProjectProjectConfigSummaryToMap(model *projectv1.ProjectConfigS modelMap["created_at"] = model.CreatedAt.String() modelMap["modified_at"] = model.ModifiedAt.String() modelMap["href"] = model.Href - definitionMap, err := resourceIbmProjectProjectConfigDefinitionNameDescriptionToMap(model.Definition) + definitionMap, err := resourceIbmProjectProjectConfigSummaryDefinitionToMap(model.Definition) if err != nil { return modelMap, err } @@ -697,6 +833,9 @@ func resourceIbmProjectProjectConfigSummaryToMap(model *projectv1.ProjectConfigS return modelMap, err } modelMap["project"] = []map[string]interface{}{projectMap} + if model.DeploymentModel != nil { + modelMap["deployment_model"] = model.DeploymentModel + } return modelMap, nil } @@ -708,14 +847,14 @@ func resourceIbmProjectProjectConfigVersionSummaryToMap(model *projectv1.Project return modelMap, nil } -func resourceIbmProjectProjectConfigDefinitionNameDescriptionToMap(model *projectv1.ProjectConfigDefinitionNameDescription) (map[string]interface{}, error) { +func resourceIbmProjectProjectConfigSummaryDefinitionToMap(model *projectv1.ProjectConfigSummaryDefinition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Name != nil { - modelMap["name"] = model.Name - } if model.Description != nil { modelMap["description"] = model.Description } + if model.Name != nil { + modelMap["name"] = model.Name + } return modelMap, nil } @@ -748,7 +887,7 @@ func resourceIbmProjectProjectEnvironmentSummaryToMap(model *projectv1.ProjectEn modelMap["project"] = []map[string]interface{}{projectMap} modelMap["created_at"] = model.CreatedAt.String() modelMap["href"] = model.Href - definitionMap, err := resourceIbmProjectEnvironmentDefinitionNameDescriptionToMap(model.Definition) + definitionMap, err := resourceIbmProjectProjectEnvironmentSummaryDefinitionToMap(model.Definition) if err != nil { return modelMap, err } @@ -756,13 +895,11 @@ func resourceIbmProjectProjectEnvironmentSummaryToMap(model *projectv1.ProjectEn return modelMap, nil } -func resourceIbmProjectEnvironmentDefinitionNameDescriptionToMap(model *projectv1.EnvironmentDefinitionNameDescription) (map[string]interface{}, error) { +func resourceIbmProjectProjectEnvironmentSummaryDefinitionToMap(model *projectv1.ProjectEnvironmentSummaryDefinition) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.Name != nil { - modelMap["name"] = model.Name - } if model.Description != nil { modelMap["description"] = model.Description } + modelMap["name"] = model.Name return modelMap, nil } diff --git a/ibm/service/project/resource_ibm_project_config.go b/ibm/service/project/resource_ibm_project_config.go index 161c7a41f1..da7c69446e 100644 --- a/ibm/service/project/resource_ibm_project_config.go +++ b/ibm/service/project/resource_ibm_project_config.go @@ -1,11 +1,10 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project import ( "context" - "encoding/json" "fmt" "log" @@ -195,54 +194,12 @@ func ResourceIbmProjectConfig() *schema.Resource { }, }, "definition": &schema.Schema{ - Type: schema.TypeList, - MinItems: 1, - MaxItems: 1, - Required: true, - Description: "The name and description of a project configuration.", + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The configuration name. It is unique within the account across projects and regions.", - }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "A project configuration description.", - }, - "environment_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The ID of the project environment.", - }, - "authorizations": &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Description: "The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager.", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "trusted_profile_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The trusted profile ID.", - }, - "method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The authorization method. You can authorize by using a trusted profile or an API key in Secrets Manager.", - }, - "api_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Sensitive: true, - Description: "The IBM Cloud API Key.", - }, - }, - }, - }, "compliance_profile": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, @@ -280,10 +237,52 @@ func ResourceIbmProjectConfig() *schema.Resource { }, "locator_id": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Description: "A unique concatenation of catalogID.versionID that identifies the DA in the catalog. Either schematics.workspace_crn, definition.locator_id, or both must be specified.", }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + Description: "A project configuration description.", + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The configuration name. It is unique within the account across projects and regions.", + }, + "environment_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project environment.", + }, + "authorizations": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trusted_profile_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The trusted profile ID.", + }, + "method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The authorization method. You can authorize by using a trusted profile or an API key in Secrets Manager.", + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "The IBM Cloud API Key.", + }, + }, + }, + }, "inputs": &schema.Schema{ Type: schema.TypeMap, Optional: true, @@ -296,6 +295,12 @@ func ResourceIbmProjectConfig() *schema.Resource { Description: "Schematics environment variables to use to deploy the configuration. Settings are only available if they were specified when the configuration was initially created.", Elem: &schema.Schema{Type: schema.TypeString}, }, + "resource_crns": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "The CRNs of resources associated with this configuration.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, @@ -313,7 +318,7 @@ func ResourceIbmProjectConfig() *schema.Resource { Type: schema.TypeList, Computed: true, Description: "The needs attention state of a configuration.", - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{Type: schema.TypeMap, Elem: &schema.Schema{Type: schema.TypeString}}, }, "created_at": &schema.Schema{ Type: schema.TypeString, @@ -347,9 +352,10 @@ func ResourceIbmProjectConfig() *schema.Resource { Description: "A short explanation of the output value.", }, "value": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeMap, Computed: true, Description: "Can be any value - a string, number, boolean, array, or object.", + Elem: &schema.Schema{Type: schema.TypeString}, }, }, }, @@ -402,6 +408,11 @@ func ResourceIbmProjectConfig() *schema.Resource { Computed: true, Description: "The flag that indicates whether a configuration update is available.", }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A URL.", + }, "project_config_id": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -437,7 +448,7 @@ func resourceIbmProjectConfigCreate(context context.Context, d *schema.ResourceD createConfigOptions := &projectv1.CreateConfigOptions{} createConfigOptions.SetProjectID(d.Get("project_id").(string)) - definitionModel, err := resourceIbmProjectConfigMapToProjectConfigPrototypeDefinitionBlock(d.Get("definition.0").(map[string]interface{})) + definitionModel, err := resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPrototype(d.Get("definition.0").(map[string]interface{})) if err != nil { return diag.FromErr(err) } @@ -500,14 +511,12 @@ func resourceIbmProjectConfigRead(context context.Context, d *schema.ResourceDat if err = d.Set("is_draft", projectConfig.IsDraft); err != nil { return diag.FromErr(fmt.Errorf("Error setting is_draft: %s", err)) } - if !core.IsNil(projectConfig.NeedsAttentionState) { - needsAttentionState := []interface{}{} - for _, needsAttentionStateItem := range projectConfig.NeedsAttentionState { - needsAttentionState = append(needsAttentionState, needsAttentionStateItem) - } - if err = d.Set("needs_attention_state", needsAttentionState); err != nil { - return diag.FromErr(fmt.Errorf("Error setting needs_attention_state: %s", err)) - } + needsAttentionState := []interface{}{} + for _, needsAttentionStateItem := range projectConfig.NeedsAttentionState { + needsAttentionState = append(needsAttentionState, needsAttentionStateItem) + } + if err = d.Set("needs_attention_state", needsAttentionState); err != nil { + return diag.FromErr(fmt.Errorf("Error setting needs_attention_state: %s", err)) } if err = d.Set("created_at", flex.DateTimeToString(projectConfig.CreatedAt)); err != nil { return diag.FromErr(fmt.Errorf("Error setting created_at: %s", err)) @@ -520,18 +529,16 @@ func resourceIbmProjectConfigRead(context context.Context, d *schema.ResourceDat return diag.FromErr(fmt.Errorf("Error setting last_saved_at: %s", err)) } } - if !core.IsNil(projectConfig.Outputs) { - outputs := []map[string]interface{}{} - for _, outputsItem := range projectConfig.Outputs { - outputsItemMap, err := resourceIbmProjectConfigOutputValueToMap(&outputsItem) - if err != nil { - return diag.FromErr(err) - } - outputs = append(outputs, outputsItemMap) - } - if err = d.Set("outputs", outputs); err != nil { - return diag.FromErr(fmt.Errorf("Error setting outputs: %s", err)) + outputs := []map[string]interface{}{} + for _, outputsItem := range projectConfig.Outputs { + outputsItemMap, err := resourceIbmProjectConfigOutputValueToMap(&outputsItem) + if err != nil { + return diag.FromErr(err) } + outputs = append(outputs, outputsItemMap) + } + if err = d.Set("outputs", outputs); err != nil { + return diag.FromErr(fmt.Errorf("Error setting outputs: %s", err)) } projectMap, err := resourceIbmProjectConfigProjectReferenceToMap(projectConfig.Project) if err != nil { @@ -543,8 +550,13 @@ func resourceIbmProjectConfigRead(context context.Context, d *schema.ResourceDat if err = d.Set("state", projectConfig.State); err != nil { return diag.FromErr(fmt.Errorf("Error setting state: %s", err)) } - if err = d.Set("update_available", projectConfig.UpdateAvailable); err != nil { - return diag.FromErr(fmt.Errorf("Error setting update_available: %s", err)) + if !core.IsNil(projectConfig.UpdateAvailable) { + if err = d.Set("update_available", projectConfig.UpdateAvailable); err != nil { + return diag.FromErr(fmt.Errorf("Error setting update_available: %s", err)) + } + } + if err = d.Set("href", projectConfig.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) } if err = d.Set("project_config_id", projectConfig.ID); err != nil { return diag.FromErr(fmt.Errorf("Error setting project_config_id: %s", err)) @@ -571,8 +583,12 @@ func resourceIbmProjectConfigUpdate(context context.Context, d *schema.ResourceD hasChange := false + if d.HasChange("project_id") { + return diag.FromErr(fmt.Errorf("Cannot update resource property \"%s\" with the ForceNew annotation."+ + " The resource must be re-created to update this property.", "project_id")) + } if d.HasChange("definition") { - definition, err := resourceIbmProjectConfigMapToProjectConfigPatchDefinitionBlock(d.Get("definition.0").(map[string]interface{})) + definition, err := resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPatch(d.Get("definition.0").(map[string]interface{})) if err != nil { return diag.FromErr(err) } @@ -618,12 +634,22 @@ func resourceIbmProjectConfigDelete(context context.Context, d *schema.ResourceD return nil } -func resourceIbmProjectConfigMapToProjectConfigPrototypeDefinitionBlock(modelMap map[string]interface{}) (*projectv1.ProjectConfigPrototypeDefinitionBlock, error) { - model := &projectv1.ProjectConfigPrototypeDefinitionBlock{} - model.Name = core.StringPtr(modelMap["name"].(string)) +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPrototype(modelMap map[string]interface{}) (projectv1.ProjectConfigDefinitionBlockPrototypeIntf, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototype{} + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel + } + if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { + model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) + } if modelMap["description"] != nil && modelMap["description"].(string) != "" { model.Description = core.StringPtr(modelMap["description"].(string)) } + model.Name = core.StringPtr(modelMap["name"].(string)) if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) } @@ -634,27 +660,38 @@ func resourceIbmProjectConfigMapToProjectConfigPrototypeDefinitionBlock(modelMap } model.Authorizations = AuthorizationsModel } - if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { - ComplianceProfileModel, err := resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) - if err != nil { - return model, err + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) } - model.ComplianceProfile = ComplianceProfileModel + model.ResourceCrns = resourceCrns } - if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { - model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) + return model, nil +} + +func resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap map[string]interface{}) (*projectv1.ProjectComplianceProfile, error) { + model := &projectv1.ProjectComplianceProfile{} + if modelMap["id"] != nil && modelMap["id"].(string) != "" { + model.ID = core.StringPtr(modelMap["id"].(string)) } - if modelMap["inputs"] != nil { - bytes, _ := json.Marshal(modelMap["inputs"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Inputs = newMap + if modelMap["instance_id"] != nil && modelMap["instance_id"].(string) != "" { + model.InstanceID = core.StringPtr(modelMap["instance_id"].(string)) } - if modelMap["settings"] != nil { - bytes, _ := json.Marshal(modelMap["settings"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Settings = newMap + if modelMap["instance_location"] != nil && modelMap["instance_location"].(string) != "" { + model.InstanceLocation = core.StringPtr(modelMap["instance_location"].(string)) + } + if modelMap["attachment_id"] != nil && modelMap["attachment_id"].(string) != "" { + model.AttachmentID = core.StringPtr(modelMap["attachment_id"].(string)) + } + if modelMap["profile_name"] != nil && modelMap["profile_name"].(string) != "" { + model.ProfileName = core.StringPtr(modelMap["profile_name"].(string)) } return model, nil } @@ -673,22 +710,73 @@ func resourceIbmProjectConfigMapToProjectConfigAuth(modelMap map[string]interfac return model, nil } -func resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap map[string]interface{}) (*projectv1.ProjectComplianceProfile, error) { - model := &projectv1.ProjectComplianceProfile{} - if modelMap["id"] != nil && modelMap["id"].(string) != "" { - model.ID = core.StringPtr(modelMap["id"].(string)) +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototypeDAConfigDefinitionProperties{} + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel } - if modelMap["instance_id"] != nil && modelMap["instance_id"].(string) != "" { - model.InstanceID = core.StringPtr(modelMap["instance_id"].(string)) + if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { + model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) } - if modelMap["instance_location"] != nil && modelMap["instance_location"].(string) != "" { - model.InstanceLocation = core.StringPtr(modelMap["instance_location"].(string)) + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) } - if modelMap["attachment_id"] != nil && modelMap["attachment_id"].(string) != "" { - model.AttachmentID = core.StringPtr(modelMap["attachment_id"].(string)) + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) } - if modelMap["profile_name"] != nil && modelMap["profile_name"].(string) != "" { - model.ProfileName = core.StringPtr(modelMap["profile_name"].(string)) + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectConfigMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + return model, nil +} + +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties, error) { + model := &projectv1.ProjectConfigDefinitionBlockPrototypeResourceConfigDefinitionProperties{} + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) + } + model.ResourceCrns = resourceCrns + } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectConfigMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) } return model, nil } @@ -701,14 +789,24 @@ func resourceIbmProjectConfigMapToSchematicsWorkspace(modelMap map[string]interf return model, nil } -func resourceIbmProjectConfigMapToProjectConfigPatchDefinitionBlock(modelMap map[string]interface{}) (*projectv1.ProjectConfigPatchDefinitionBlock, error) { - model := &projectv1.ProjectConfigPatchDefinitionBlock{} - if modelMap["name"] != nil && modelMap["name"].(string) != "" { - model.Name = core.StringPtr(modelMap["name"].(string)) +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPatch(modelMap map[string]interface{}) (projectv1.ProjectConfigDefinitionBlockPatchIntf, error) { + model := &projectv1.ProjectConfigDefinitionBlockPatch{} + if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { + ComplianceProfileModel, err := resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.ComplianceProfile = ComplianceProfileModel + } + if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { + model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) } if modelMap["description"] != nil && modelMap["description"].(string) != "" { model.Description = core.StringPtr(modelMap["description"].(string)) } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) } @@ -719,6 +817,24 @@ func resourceIbmProjectConfigMapToProjectConfigPatchDefinitionBlock(modelMap map } model.Authorizations = AuthorizationsModel } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) + } + model.ResourceCrns = resourceCrns + } + return model, nil +} + +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPatchDAConfigDefinitionPropertiesPatch(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPatchDAConfigDefinitionPropertiesPatch, error) { + model := &projectv1.ProjectConfigDefinitionBlockPatchDAConfigDefinitionPropertiesPatch{} if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { ComplianceProfileModel, err := resourceIbmProjectConfigMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) if err != nil { @@ -729,17 +845,61 @@ func resourceIbmProjectConfigMapToProjectConfigPatchDefinitionBlock(modelMap map if modelMap["locator_id"] != nil && modelMap["locator_id"].(string) != "" { model.LocatorID = core.StringPtr(modelMap["locator_id"].(string)) } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectConfigMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } + if modelMap["inputs"] != nil { + model.Inputs = modelMap["inputs"].(map[string]interface{}) + } + if modelMap["settings"] != nil { + model.Settings = modelMap["settings"].(map[string]interface{}) + } + return model, nil +} + +func resourceIbmProjectConfigMapToProjectConfigDefinitionBlockPatchResourceConfigDefinitionPropertiesPatch(modelMap map[string]interface{}) (*projectv1.ProjectConfigDefinitionBlockPatchResourceConfigDefinitionPropertiesPatch, error) { + model := &projectv1.ProjectConfigDefinitionBlockPatchResourceConfigDefinitionPropertiesPatch{} + if modelMap["resource_crns"] != nil { + resourceCrns := []string{} + for _, resourceCrnsItem := range modelMap["resource_crns"].([]interface{}) { + resourceCrns = append(resourceCrns, resourceCrnsItem.(string)) + } + model.ResourceCrns = resourceCrns + } + if modelMap["description"] != nil && modelMap["description"].(string) != "" { + model.Description = core.StringPtr(modelMap["description"].(string)) + } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } + if modelMap["environment_id"] != nil && modelMap["environment_id"].(string) != "" { + model.EnvironmentID = core.StringPtr(modelMap["environment_id"].(string)) + } + if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { + AuthorizationsModel, err := resourceIbmProjectConfigMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) + if err != nil { + return model, err + } + model.Authorizations = AuthorizationsModel + } if modelMap["inputs"] != nil { - bytes, _ := json.Marshal(modelMap["inputs"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Inputs = newMap + model.Inputs = modelMap["inputs"].(map[string]interface{}) } if modelMap["settings"] != nil { - bytes, _ := json.Marshal(modelMap["settings"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Settings = newMap + model.Settings = modelMap["settings"].(map[string]interface{}) } return model, nil } @@ -808,57 +968,81 @@ func resourceIbmProjectConfigScriptToMap(model *projectv1.Script) (map[string]in return modelMap, nil } -func resourceIbmProjectConfigProjectConfigResponseDefinitionToMap(model *projectv1.ProjectConfigResponseDefinition) (map[string]interface{}, error) { - modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name - if model.Description != nil { - modelMap["description"] = model.Description - } - if model.EnvironmentID != nil { - modelMap["environment_id"] = model.EnvironmentID - } - if model.Authorizations != nil { - authorizationsMap, err := resourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) - if err != nil { - return modelMap, err +func resourceIbmProjectConfigProjectConfigResponseDefinitionToMap(model projectv1.ProjectConfigResponseDefinitionIntf) (map[string]interface{}, error) { + if _, ok := model.(*projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties); ok { + return resourceIbmProjectConfigProjectConfigResponseDefinitionDAConfigDefinitionPropertiesToMap(model.(*projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties)) + } else if _, ok := model.(*projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties); ok { + return resourceIbmProjectConfigProjectConfigResponseDefinitionResourceConfigDefinitionPropertiesToMap(model.(*projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties)) + } else if _, ok := model.(*projectv1.ProjectConfigResponseDefinition); ok { + modelMap := make(map[string]interface{}) + model := model.(*projectv1.ProjectConfigResponseDefinition) + if model.ComplianceProfile != nil { + complianceProfileMap, err := resourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) + if err != nil { + return modelMap, err + } + if len(complianceProfileMap) > 0 { + modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} + } } - modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} - } - if model.ComplianceProfile != nil { - complianceProfileMap, err := resourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) - if err != nil { - return modelMap, err + if model.LocatorID != nil { + modelMap["locator_id"] = model.LocatorID } - if len(complianceProfileMap) > 0 { - modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} + if model.Description != nil { + modelMap["description"] = model.Description } - } - modelMap["locator_id"] = model.LocatorID - if model.Inputs != nil { - inputs := make(map[string]interface{}) - for k, v := range model.Inputs { - bytes, err := json.Marshal(v) + modelMap["name"] = model.Name + if model.EnvironmentID != nil { + modelMap["environment_id"] = model.EnvironmentID + } + if model.Authorizations != nil { + authorizationsMap, err := resourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) if err != nil { return modelMap, err } - inputs[k] = string(bytes) + if len(authorizationsMap) > 0 { + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + } } - if len(inputs) > 0 { + if model.Inputs != nil { + inputs := make(map[string]interface{}) + for k, v := range model.Inputs { + inputs[k] = fmt.Sprintf("%v", v) + } modelMap["inputs"] = inputs } - } - if model.Settings != nil { - settings := make(map[string]interface{}) - for k, v := range model.Settings { - bytes, err := json.Marshal(v) - if err != nil { - return modelMap, err + if model.Settings != nil { + settings := make(map[string]interface{}) + for k, v := range model.Settings { + settings[k] = fmt.Sprintf("%v", v) } - settings[k] = string(bytes) - } - if len(settings) > 0 { modelMap["settings"] = settings } + if model.ResourceCrns != nil { + modelMap["resource_crns"] = model.ResourceCrns + } + return modelMap, nil + } else { + return nil, fmt.Errorf("Unrecognized projectv1.ProjectConfigResponseDefinitionIntf subtype encountered") + } +} + +func resourceIbmProjectConfigProjectComplianceProfileToMap(model *projectv1.ProjectComplianceProfile) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ID != nil { + modelMap["id"] = model.ID + } + if model.InstanceID != nil { + modelMap["instance_id"] = model.InstanceID + } + if model.InstanceLocation != nil { + modelMap["instance_location"] = model.InstanceLocation + } + if model.AttachmentID != nil { + modelMap["attachment_id"] = model.AttachmentID + } + if model.ProfileName != nil { + modelMap["profile_name"] = model.ProfileName } return modelMap, nil } @@ -877,22 +1061,85 @@ func resourceIbmProjectConfigProjectConfigAuthToMap(model *projectv1.ProjectConf return modelMap, nil } -func resourceIbmProjectConfigProjectComplianceProfileToMap(model *projectv1.ProjectComplianceProfile) (map[string]interface{}, error) { +func resourceIbmProjectConfigProjectConfigResponseDefinitionDAConfigDefinitionPropertiesToMap(model *projectv1.ProjectConfigResponseDefinitionDAConfigDefinitionProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - if model.ID != nil { - modelMap["id"] = model.ID + if model.ComplianceProfile != nil { + complianceProfileMap, err := resourceIbmProjectConfigProjectComplianceProfileToMap(model.ComplianceProfile) + if err != nil { + return modelMap, err + } + modelMap["compliance_profile"] = []map[string]interface{}{complianceProfileMap} } - if model.InstanceID != nil { - modelMap["instance_id"] = model.InstanceID + if model.LocatorID != nil { + modelMap["locator_id"] = model.LocatorID } - if model.InstanceLocation != nil { - modelMap["instance_location"] = model.InstanceLocation + if model.Description != nil { + modelMap["description"] = model.Description } - if model.AttachmentID != nil { - modelMap["attachment_id"] = model.AttachmentID + if model.Name != nil { + modelMap["name"] = model.Name } - if model.ProfileName != nil { - modelMap["profile_name"] = model.ProfileName + if model.EnvironmentID != nil { + modelMap["environment_id"] = model.EnvironmentID + } + if model.Authorizations != nil { + authorizationsMap, err := resourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) + if err != nil { + return modelMap, err + } + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + } + if model.Inputs != nil { + inputs := make(map[string]interface{}) + for k, v := range model.Inputs { + inputs[k] = fmt.Sprintf("%v", v) + } + modelMap["inputs"] = inputs + } + if model.Settings != nil { + settings := make(map[string]interface{}) + for k, v := range model.Settings { + settings[k] = fmt.Sprintf("%v", v) + } + modelMap["settings"] = settings + } + return modelMap, nil +} + +func resourceIbmProjectConfigProjectConfigResponseDefinitionResourceConfigDefinitionPropertiesToMap(model *projectv1.ProjectConfigResponseDefinitionResourceConfigDefinitionProperties) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.ResourceCrns != nil { + modelMap["resource_crns"] = model.ResourceCrns + } + if model.Description != nil { + modelMap["description"] = model.Description + } + if model.Name != nil { + modelMap["name"] = model.Name + } + if model.EnvironmentID != nil { + modelMap["environment_id"] = model.EnvironmentID + } + if model.Authorizations != nil { + authorizationsMap, err := resourceIbmProjectConfigProjectConfigAuthToMap(model.Authorizations) + if err != nil { + return modelMap, err + } + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + } + if model.Inputs != nil { + inputs := make(map[string]interface{}) + for k, v := range model.Inputs { + inputs[k] = fmt.Sprintf("%v", v) + } + modelMap["inputs"] = inputs + } + if model.Settings != nil { + settings := make(map[string]interface{}) + for k, v := range model.Settings { + settings[k] = fmt.Sprintf("%v", v) + } + modelMap["settings"] = settings } return modelMap, nil } @@ -904,7 +1151,11 @@ func resourceIbmProjectConfigOutputValueToMap(model *projectv1.OutputValue) (map modelMap["description"] = model.Description } if model.Value != nil { - modelMap["value"] = model.Value + value := make(map[string]interface{}) + for k, v := range model.Value { + value[k] = fmt.Sprintf("%v", v) + } + modelMap["value"] = value } return modelMap, nil } diff --git a/ibm/service/project/resource_ibm_project_config_test.go b/ibm/service/project/resource_ibm_project_config_test.go index 8a827c81f1..bcbc58de64 100644 --- a/ibm/service/project/resource_ibm_project_config_test.go +++ b/ibm/service/project/resource_ibm_project_config_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test diff --git a/ibm/service/project/resource_ibm_project_environment.go b/ibm/service/project/resource_ibm_project_environment.go index d9fa73b4d9..e005537f17 100644 --- a/ibm/service/project/resource_ibm_project_environment.go +++ b/ibm/service/project/resource_ibm_project_environment.go @@ -1,11 +1,10 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project import ( "context" - "encoding/json" "fmt" "log" @@ -43,16 +42,17 @@ func ResourceIbmProjectEnvironment() *schema.Resource { Description: "The environment definition.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The name of the environment. It is unique within the account across projects and regions.", - }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, + Default: "", Description: "The description of the environment.", }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the environment. It is unique within the account across projects and regions.", + }, "authorizations": &schema.Schema{ Type: schema.TypeList, MaxItems: 1, @@ -176,6 +176,11 @@ func ResourceIbmProjectEnvironment() *schema.Resource { Computed: true, Description: "A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.", }, + "href": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A URL.", + }, "project_environment_id": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -279,6 +284,9 @@ func resourceIbmProjectEnvironmentRead(context context.Context, d *schema.Resour if err = d.Set("modified_at", flex.DateTimeToString(environment.ModifiedAt)); err != nil { return diag.FromErr(fmt.Errorf("Error setting modified_at: %s", err)) } + if err = d.Set("href", environment.Href); err != nil { + return diag.FromErr(fmt.Errorf("Error setting href: %s", err)) + } if err = d.Set("project_environment_id", environment.ID); err != nil { return diag.FromErr(fmt.Errorf("Error setting project_environment_id: %s", err)) } @@ -309,7 +317,7 @@ func resourceIbmProjectEnvironmentUpdate(context context.Context, d *schema.Reso " The resource must be re-created to update this property.", "project_id")) } if d.HasChange("definition") { - definition, err := resourceIbmProjectEnvironmentMapToEnvironmentDefinitionProperties(d.Get("definition.0").(map[string]interface{})) + definition, err := resourceIbmProjectEnvironmentMapToEnvironmentDefinitionPropertiesPatch(d.Get("definition.0").(map[string]interface{})) if err != nil { return diag.FromErr(err) } @@ -357,10 +365,10 @@ func resourceIbmProjectEnvironmentDelete(context context.Context, d *schema.Reso func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionRequiredProperties(modelMap map[string]interface{}) (*projectv1.EnvironmentDefinitionRequiredProperties, error) { model := &projectv1.EnvironmentDefinitionRequiredProperties{} - model.Name = core.StringPtr(modelMap["name"].(string)) if modelMap["description"] != nil && modelMap["description"].(string) != "" { model.Description = core.StringPtr(modelMap["description"].(string)) } + model.Name = core.StringPtr(modelMap["name"].(string)) if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { AuthorizationsModel, err := resourceIbmProjectEnvironmentMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) if err != nil { @@ -369,10 +377,7 @@ func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionRequiredProperties(m model.Authorizations = AuthorizationsModel } if modelMap["inputs"] != nil { - bytes, _ := json.Marshal(modelMap["inputs"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Inputs = newMap + model.Inputs = modelMap["inputs"].(map[string]interface{}) } if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { ComplianceProfileModel, err := resourceIbmProjectEnvironmentMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) @@ -418,14 +423,14 @@ func resourceIbmProjectEnvironmentMapToProjectComplianceProfile(modelMap map[str return model, nil } -func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionProperties(modelMap map[string]interface{}) (*projectv1.EnvironmentDefinitionProperties, error) { - model := &projectv1.EnvironmentDefinitionProperties{} - if modelMap["name"] != nil && modelMap["name"].(string) != "" { - model.Name = core.StringPtr(modelMap["name"].(string)) - } +func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionPropertiesPatch(modelMap map[string]interface{}) (*projectv1.EnvironmentDefinitionPropertiesPatch, error) { + model := &projectv1.EnvironmentDefinitionPropertiesPatch{} if modelMap["description"] != nil && modelMap["description"].(string) != "" { model.Description = core.StringPtr(modelMap["description"].(string)) } + if modelMap["name"] != nil && modelMap["name"].(string) != "" { + model.Name = core.StringPtr(modelMap["name"].(string)) + } if modelMap["authorizations"] != nil && len(modelMap["authorizations"].([]interface{})) > 0 { AuthorizationsModel, err := resourceIbmProjectEnvironmentMapToProjectConfigAuth(modelMap["authorizations"].([]interface{})[0].(map[string]interface{})) if err != nil { @@ -434,10 +439,7 @@ func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionProperties(modelMap model.Authorizations = AuthorizationsModel } if modelMap["inputs"] != nil { - bytes, _ := json.Marshal(modelMap["inputs"].(map[string]interface{})) - newMap := make(map[string]interface{}) - json.Unmarshal(bytes, &newMap) - model.Inputs = newMap + model.Inputs = modelMap["inputs"].(map[string]interface{}) } if modelMap["compliance_profile"] != nil && len(modelMap["compliance_profile"].([]interface{})) > 0 { ComplianceProfileModel, err := resourceIbmProjectEnvironmentMapToProjectComplianceProfile(modelMap["compliance_profile"].([]interface{})[0].(map[string]interface{})) @@ -451,29 +453,25 @@ func resourceIbmProjectEnvironmentMapToEnvironmentDefinitionProperties(modelMap func resourceIbmProjectEnvironmentEnvironmentDefinitionRequiredPropertiesToMap(model *projectv1.EnvironmentDefinitionRequiredProperties) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) - modelMap["name"] = model.Name if model.Description != nil { modelMap["description"] = model.Description } + modelMap["name"] = model.Name if model.Authorizations != nil { authorizationsMap, err := resourceIbmProjectEnvironmentProjectConfigAuthToMap(model.Authorizations) if err != nil { return modelMap, err } - modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + if len(authorizationsMap) > 0 { + modelMap["authorizations"] = []map[string]interface{}{authorizationsMap} + } } if model.Inputs != nil { inputs := make(map[string]interface{}) for k, v := range model.Inputs { - bytes, err := json.Marshal(v) - if err != nil { - return modelMap, err - } - inputs[k] = string(bytes) - } - if len(inputs) > 0 { - modelMap["inputs"] = inputs + inputs[k] = fmt.Sprintf("%v", v) } + modelMap["inputs"] = inputs } if model.ComplianceProfile != nil { complianceProfileMap, err := resourceIbmProjectEnvironmentProjectComplianceProfileToMap(model.ComplianceProfile) diff --git a/ibm/service/project/resource_ibm_project_environment_test.go b/ibm/service/project/resource_ibm_project_environment_test.go index 5d33d6db32..6a976e3eb8 100644 --- a/ibm/service/project/resource_ibm_project_environment_test.go +++ b/ibm/service/project/resource_ibm_project_environment_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test diff --git a/ibm/service/project/resource_ibm_project_test.go b/ibm/service/project/resource_ibm_project_test.go index 20174656ec..661cbb9d89 100644 --- a/ibm/service/project/resource_ibm_project_test.go +++ b/ibm/service/project/resource_ibm_project_test.go @@ -1,4 +1,4 @@ -// Copyright IBM Corp. 2023 All Rights Reserved. +// Copyright IBM Corp. 2024 All Rights Reserved. // Licensed under the Mozilla Public License v2.0 package project_test diff --git a/ibm/service/resourcecontroller/data_source_ibm_resource_instance.go b/ibm/service/resourcecontroller/data_source_ibm_resource_instance.go index f0dcc93c43..caf7896750 100644 --- a/ibm/service/resourcecontroller/data_source_ibm_resource_instance.go +++ b/ibm/service/resourcecontroller/data_source_ibm_resource_instance.go @@ -4,6 +4,7 @@ package resourcecontroller import ( + "encoding/json" "fmt" "log" "net/url" @@ -84,6 +85,12 @@ func DataSourceIBMResourceInstance() *schema.Resource { Description: "Guid of resource instance", }, + "parameters_json": { + Description: "Parameters asociated with instance in json string", + Type: schema.TypeString, + Computed: true, + }, + flex.ResourceName: { Type: schema.TypeString, Computed: true, @@ -246,6 +253,15 @@ func DataSourceIBMResourceInstanceRead(d *schema.ResourceData, meta interface{}) d.Set(flex.ResourceName, instance.Name) d.Set(flex.ResourceCRN, instance.CRN) d.Set(flex.ResourceStatus, instance.State) + if instance.Parameters != nil { + params, err := json.Marshal(instance.Parameters) + if err != nil { + return fmt.Errorf("[ERROR] Error marshalling instance parameters: %s", err) + } + if err = d.Set("parameters_json", string(params)); err != nil { + return fmt.Errorf("[ERROR] Error setting instance parameters json: %s", err) + } + } rMgtClient, err := meta.(conns.ClientSession).ResourceManagerV2API() if err != nil { return err diff --git a/ibm/service/satellite/data_source_ibm_satellite_host_script.go b/ibm/service/satellite/data_source_ibm_satellite_host_script.go index 48ac821502..f502b07d11 100644 --- a/ibm/service/satellite/data_source_ibm_satellite_host_script.go +++ b/ibm/service/satellite/data_source_ibm_satellite_host_script.go @@ -125,7 +125,7 @@ func dataSourceIBMSatelliteAttachHostScriptRead(d *schema.ResourceData, meta int labels := make(map[string]string) if v, ok := d.GetOk("labels"); ok { l := v.(*schema.Set) - labels = flex.FlattenHostLabels(l.List()) + labels = flex.FlattenKeyValues(l.List()) d.Set("labels", l) } diff --git a/ibm/service/satellite/data_source_ibm_satellite_location.go b/ibm/service/satellite/data_source_ibm_satellite_location.go index f73f227872..c1c8651fcf 100644 --- a/ibm/service/satellite/data_source_ibm_satellite_location.go +++ b/ibm/service/satellite/data_source_ibm_satellite_location.go @@ -138,6 +138,16 @@ func DataSourceIBMSatelliteLocation() *schema.Resource { }, }, }, + "service_subnet": { + Type: schema.TypeString, + Computed: true, + Description: "Custom subnet CIDR to provide private IP addresses for services", + }, + "pod_subnet": { + Type: schema.TypeString, + Computed: true, + Description: "Custom subnet CIDR to provide private IP addresses for pods", + }, }, } } @@ -215,5 +225,12 @@ func dataSourceIBMSatelliteLocationRead(d *schema.ResourceData, meta interface{} } d.Set("tags", tags) + if instance.PodSubnet != nil { + d.Set("pod_subnet", *instance.PodSubnet) + } + if instance.ServiceSubnet != nil { + d.Set("service_subnet", *instance.ServiceSubnet) + } + return nil } diff --git a/ibm/service/satellite/resource_ibm_satellite_cluster.go b/ibm/service/satellite/resource_ibm_satellite_cluster.go index 98177e1b0c..27042ec954 100644 --- a/ibm/service/satellite/resource_ibm_satellite_cluster.go +++ b/ibm/service/satellite/resource_ibm_satellite_cluster.go @@ -280,6 +280,13 @@ func ResourceIBMSatelliteCluster() *schema.Resource { Sensitive: true, Description: "The IBM Cloud Identity and Access Management (IAM) service CRN token for the service that creates the cluster.", }, + "calico_ip_autodetection": { + Type: schema.TypeMap, + Optional: true, + Description: "Set IP autodetection to use correct interface for Calico", + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: flex.ApplyOnce, + }, }, } } @@ -392,7 +399,7 @@ func resourceIBMSatelliteClusterCreate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("host_labels"); ok { hostLabels := make(map[string]string) hl := v.(*schema.Set) - hostLabels = flex.FlattenHostLabels(hl.List()) + hostLabels = flex.FlattenKeyValues(hl.List()) createClusterOptions.Labels = hostLabels } @@ -401,6 +408,14 @@ func resourceIBMSatelliteClusterCreate(d *schema.ResourceData, meta interface{}) createClusterOptions.DefaultWorkerPoolEntitlement = &entitlement } + if m, ok := d.GetOk("calico_ip_autodetection"); ok { + methods := make(map[string]string) + for k, v := range m.(map[string]interface{}) { + methods[k] = v.(string) + } + createClusterOptions.SetCalicoIPAutodetectionMethods(methods) + } + if v, ok := d.GetOk("crn_token"); ok { crnToken := v.(string) createRemoteClusterOptions := &kubernetesserviceapiv1.CreateSatelliteClusterRemoteOptions{} diff --git a/ibm/service/satellite/resource_ibm_satellite_cluster_worker_pool.go b/ibm/service/satellite/resource_ibm_satellite_cluster_worker_pool.go index e02f41ed12..0eb93846c4 100644 --- a/ibm/service/satellite/resource_ibm_satellite_cluster_worker_pool.go +++ b/ibm/service/satellite/resource_ibm_satellite_cluster_worker_pool.go @@ -232,7 +232,7 @@ func resourceIBMSatelliteClusterWorkerPoolCreate(d *schema.ResourceData, meta in hostLabels := make(map[string]string) if v, ok := d.GetOk("host_labels"); ok { hl := v.(*schema.Set) - hostLabels = flex.FlattenHostLabels(hl.List()) + hostLabels = flex.FlattenKeyValues(hl.List()) createWorkerPoolOptions.HostLabels = hostLabels } else { createWorkerPoolOptions.HostLabels = hostLabels diff --git a/ibm/service/satellite/resource_ibm_satellite_host.go b/ibm/service/satellite/resource_ibm_satellite_host.go index 57e161b45f..99882ae3cf 100644 --- a/ibm/service/satellite/resource_ibm_satellite_host.go +++ b/ibm/service/satellite/resource_ibm_satellite_host.go @@ -149,7 +149,7 @@ func resourceIBMSatelliteHostCreate(d *schema.ResourceData, meta interface{}) er labels := make(map[string]string) if _, ok := d.GetOk(hostLabels); ok { l := d.Get(hostLabels).(*schema.Set) - labels = flex.FlattenHostLabels(l.List()) + labels = flex.FlattenKeyValues(l.List()) hostAssignOptions.Labels = labels } else { hostAssignOptions.Labels = labels @@ -268,7 +268,7 @@ func resourceIBMSatelliteHostUpdate(d *schema.ResourceData, meta interface{}) er labels := make(map[string]string) if _, ok := d.GetOk(hostLabels); ok { l := d.Get(hostLabels).(*schema.Set) - labels = flex.FlattenHostLabels(l.List()) + labels = flex.FlattenKeyValues(l.List()) updateHostOptions.Labels = labels } response, err := satClient.UpdateSatelliteHost(updateHostOptions) diff --git a/ibm/service/satellite/resource_ibm_satellite_location.go b/ibm/service/satellite/resource_ibm_satellite_location.go index e605de1243..ccb0d40b81 100644 --- a/ibm/service/satellite/resource_ibm_satellite_location.go +++ b/ibm/service/satellite/resource_ibm_satellite_location.go @@ -209,6 +209,20 @@ func ResourceIBMSatelliteLocation() *schema.Resource { Computed: true, Sensitive: true, }, + "service_subnet": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Custom subnet CIDR to provide private IP addresses for services", + DiffSuppressFunc: flex.ApplyOnce, + }, + "pod_subnet": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Custom subnet CIDR to provide private IP addresses for pods", + DiffSuppressFunc: flex.ApplyOnce, + }, }, } } @@ -275,6 +289,16 @@ func resourceIBMSatelliteLocationCreate(d *schema.ResourceData, meta interface{} createSatLocOptions.Headers = pathParamsMap } + if v, ok := d.GetOk("pod_subnet"); ok { + podSubnet := v.(string) + createSatLocOptions.PodSubnet = &podSubnet + } + + if v, ok := d.GetOk("service_subnet"); ok { + serviceSubnet := v.(string) + createSatLocOptions.ServiceSubnet = &serviceSubnet + } + instance, response, err := satClient.CreateSatelliteLocation(createSatLocOptions) if err != nil || instance == nil { return fmt.Errorf("[ERROR] Error Creating Satellite Location: %s\n%s", err, response) @@ -357,6 +381,14 @@ func resourceIBMSatelliteLocationRead(d *schema.ResourceData, meta interface{}) d.Set("ingress_secret", *instance.Ingress.SecretName) } + if instance.PodSubnet != nil { + d.Set("pod_subnet", *instance.PodSubnet) + } + + if instance.ServiceSubnet != nil { + d.Set("service_subnet", *instance.ServiceSubnet) + } + return nil } diff --git a/ibm/service/satellite/resource_ibm_satellite_location_test.go b/ibm/service/satellite/resource_ibm_satellite_location_test.go index 19c534d910..3892f0a03d 100644 --- a/ibm/service/satellite/resource_ibm_satellite_location_test.go +++ b/ibm/service/satellite/resource_ibm_satellite_location_test.go @@ -29,7 +29,7 @@ func TestAccSatelliteLocation_Basic(t *testing.T) { Steps: []resource.TestStep{ { - Config: testAccCheckSatelliteLocationCreate(name, managed_from, coreos_enabled), + Config: testAccCheckSatelliteLocationCreate(name, managed_from, coreos_enabled, "", ""), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckSatelliteLocationExists("ibm_satellite_location.location", instance), resource.TestCheckResourceAttr("ibm_satellite_location.location", "location", name), @@ -54,7 +54,7 @@ func TestAccSatelliteLocation_Import(t *testing.T) { Steps: []resource.TestStep{ { - Config: testAccCheckSatelliteLocationCreate(name, managed_from, coreos_enabled), + Config: testAccCheckSatelliteLocationCreate(name, managed_from, coreos_enabled, "", ""), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckSatelliteLocationExists("ibm_satellite_location.location", instance), resource.TestCheckResourceAttr("ibm_satellite_location.location", "location", name), @@ -70,6 +70,34 @@ func TestAccSatelliteLocation_Import(t *testing.T) { }) } +func TestAccSatelliteLocation_PodAndServiceSubnet(t *testing.T) { + var instance string + name := fmt.Sprintf("tf-satellitelocation-%d", acctest.RandIntRange(10, 100)) + managed_from := "wdc04" + coreos_enabled := "true" + pod_subnet := "10.69.0.0/16" + service_subnet := "192.168.42.0/24" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + + { + Config: testAccCheckSatelliteLocationCreate(name, managed_from, coreos_enabled, pod_subnet, service_subnet), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSatelliteLocationExists("ibm_satellite_location.location", instance), + resource.TestCheckResourceAttr("ibm_satellite_location.location", "location", name), + resource.TestCheckResourceAttr("ibm_satellite_location.location", "managed_from", managed_from), + resource.TestCheckResourceAttr("ibm_satellite_location.location", "coreos_enabled", coreos_enabled), + resource.TestCheckResourceAttr("ibm_satellite_location.location", "pod_subnet", pod_subnet), + resource.TestCheckResourceAttr("ibm_satellite_location.location", "service_subnet", service_subnet), + ), + }, + }, + }) +} + func testAccCheckSatelliteLocationExists(n string, instance string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -126,7 +154,7 @@ func testAccCheckSatelliteLocationDestroy(s *terraform.State) error { return nil } -func testAccCheckSatelliteLocationCreate(name, managed_from string, coreos_enabled string) string { +func testAccCheckSatelliteLocationCreate(name, managed_from string, coreos_enabled string, pod_subnet, service_subnet string) string { return fmt.Sprintf(` data "ibm_resource_group" "res_group" { @@ -141,7 +169,9 @@ func testAccCheckSatelliteLocationCreate(name, managed_from string, coreos_enabl zones = ["us-east-1", "us-east-2", "us-east-3"] resource_group_id = data.ibm_resource_group.res_group.id tags = ["env:dev"] + pod_subnet = "%s" + service_subnet = "%s" } -`, name, managed_from, coreos_enabled) +`, name, managed_from, coreos_enabled, pod_subnet, service_subnet) } diff --git a/ibm/service/scc/data_source_ibm_scc_provider_type.go b/ibm/service/scc/data_source_ibm_scc_provider_type.go index 6e3bfffe0c..b63556cfcb 100644 --- a/ibm/service/scc/data_source_ibm_scc_provider_type.go +++ b/ibm/service/scc/data_source_ibm_scc_provider_type.go @@ -17,7 +17,7 @@ import ( ) func DataSourceIbmSccProviderType() *schema.Resource { - return &schema.Resource{ + return AddSchemaData(&schema.Resource{ ReadContext: dataSourceIbmSccProviderTypeRead, Schema: map[string]*schema.Schema{ @@ -109,7 +109,7 @@ func DataSourceIbmSccProviderType() *schema.Resource { Description: "Time at which resource was updated.", }, }, - } + }) } func dataSourceIbmSccProviderTypeRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -120,6 +120,7 @@ func dataSourceIbmSccProviderTypeRead(context context.Context, d *schema.Resourc getProviderTypeByIdOptions := &securityandcompliancecenterapiv3.GetProviderTypeByIdOptions{} + getProviderTypeByIdOptions.SetInstanceID(d.Get("instance_id").(string)) getProviderTypeByIdOptions.SetProviderTypeID(d.Get("provider_type_id").(string)) providerTypeItem, response, err := securityAndComplianceCenterApIsClient.GetProviderTypeByIDWithContext(context, getProviderTypeByIdOptions) diff --git a/ibm/service/scc/data_source_ibm_scc_provider_type_instance_test.go b/ibm/service/scc/data_source_ibm_scc_provider_type_instance_test.go index f5ae316512..bfdbf88791 100644 --- a/ibm/service/scc/data_source_ibm_scc_provider_type_instance_test.go +++ b/ibm/service/scc/data_source_ibm_scc_provider_type_instance_test.go @@ -20,7 +20,7 @@ func TestAccIbmSccProviderTypeInstanceDataSourceBasic(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckIbmSccProviderTypeInstanceDataSourceConfigBasic(acc.SccInstanceID, providerTypeInstanceName, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceDataSourceConfigBasic(acc.SccInstanceID, providerTypeInstanceName, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type_instance.scc_provider_type_instance_tf", "id"), resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type_instance.scc_provider_type_instance_tf", "provider_type_id"), @@ -39,7 +39,7 @@ func TestAccIbmSccProviderTypeInstanceDataSourceAllArgs(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckIbmSccProviderTypeInstanceDataSourceConfig(acc.SccInstanceID, providerTypeInstanceName, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceDataSourceConfig(acc.SccInstanceID, providerTypeInstanceName, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type_instance.scc_provider_type_instance_tf", "id"), resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type_instance.scc_provider_type_instance_tf", "provider_type_id"), @@ -55,11 +55,11 @@ func TestAccIbmSccProviderTypeInstanceDataSourceAllArgs(t *testing.T) { }) } -func testAccCheckIbmSccProviderTypeInstanceDataSourceConfigBasic(instanceID string, providerTypeInstanceName string, providerTypeInstanceAttributes string) string { +func testAccCheckIbmSccProviderTypeInstanceDataSourceConfigBasic(instanceID, providerTypeInstanceName, providerTypeInstanceAttributes, providerTypeInstanceID string) string { return fmt.Sprintf(` resource "ibm_scc_provider_type_instance" "scc_provider_type_instance" { instance_id = "%s" - provider_type_id = "afa2476ecfa5f09af248492fe991b4d1" + provider_type_id = "%s" name = "%s" attributes = %s } @@ -69,14 +69,14 @@ func testAccCheckIbmSccProviderTypeInstanceDataSourceConfigBasic(instanceID stri provider_type_id = ibm_scc_provider_type_instance.scc_provider_type_instance.provider_type_id provider_type_instance_id = ibm_scc_provider_type_instance.scc_provider_type_instance.provider_type_instance_id } - `, instanceID, providerTypeInstanceName, providerTypeInstanceAttributes) + `, instanceID, providerTypeInstanceID, providerTypeInstanceName, providerTypeInstanceAttributes) } -func testAccCheckIbmSccProviderTypeInstanceDataSourceConfig(instanceID string, providerTypeInstanceName string, providerTypeInstanceAttributes string) string { +func testAccCheckIbmSccProviderTypeInstanceDataSourceConfig(instanceID, providerTypeInstanceName, providerTypeInstanceAttributes, providerTypeInstanceID string) string { return fmt.Sprintf(` resource "ibm_scc_provider_type_instance" "scc_provider_type_instance" { instance_id = "%s" - provider_type_id = "afa2476ecfa5f09af248492fe991b4d1" + provider_type_id = "%s" name = "%s" attributes = %s } @@ -86,5 +86,5 @@ func testAccCheckIbmSccProviderTypeInstanceDataSourceConfig(instanceID string, p provider_type_id = ibm_scc_provider_type_instance.scc_provider_type_instance.provider_type_id provider_type_instance_id = ibm_scc_provider_type_instance.scc_provider_type_instance.provider_type_instance_id } - `, instanceID, providerTypeInstanceName, providerTypeInstanceAttributes) + `, instanceID, providerTypeInstanceID, providerTypeInstanceName, providerTypeInstanceAttributes) } diff --git a/ibm/service/scc/data_source_ibm_scc_provider_type_test.go b/ibm/service/scc/data_source_ibm_scc_provider_type_test.go index ea2db7e89e..d302b59a7f 100644 --- a/ibm/service/scc/data_source_ibm_scc_provider_type_test.go +++ b/ibm/service/scc/data_source_ibm_scc_provider_type_test.go @@ -18,7 +18,7 @@ func TestAccIbmSccProviderTypeDataSourceBasic(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckIbmSccProviderTypeDataSourceConfigBasic(), + Config: testAccCheckIbmSccProviderTypeDataSourceConfigBasic(acc.SccInstanceID, acc.SccProviderTypeID), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "provider_type_id"), @@ -30,17 +30,17 @@ func TestAccIbmSccProviderTypeDataSourceBasic(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "mode"), resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "data_type"), resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "icon"), - resource.TestCheckResourceAttrSet("data.ibm_scc_provider_type.scc_provider_type_instance", "attributes.%"), ), }, }, }) } -func testAccCheckIbmSccProviderTypeDataSourceConfigBasic() string { +func testAccCheckIbmSccProviderTypeDataSourceConfigBasic(instanceID, providerTypeID string) string { return fmt.Sprintf(` data "ibm_scc_provider_type" "scc_provider_type_instance" { - provider_type_id = "afa2476ecfa5f09af248492fe991b4d1" + instance_id = "%s" + provider_type_id = "%s" } - `) + `, instanceID, providerTypeID) } diff --git a/ibm/service/scc/resource_ibm_scc_instance_settings.go b/ibm/service/scc/resource_ibm_scc_instance_settings.go new file mode 100644 index 0000000000..deecd0f438 --- /dev/null +++ b/ibm/service/scc/resource_ibm_scc_instance_settings.go @@ -0,0 +1,340 @@ +package scc + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/scc-go-sdk/v5/securityandcompliancecenterapiv3" +) + +func ResourceIbmSccInstanceSettings() *schema.Resource { + return AddSchemaData(&schema.Resource{ + CreateContext: resourceIbmSccInstanceSettingsCreate, + ReadContext: resourceIbmSccInstanceSettingsRead, + UpdateContext: resourceIbmSccInstanceSettingsUpdate, + DeleteContext: resourceIbmSccInstanceSettingsDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "event_notifications": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: "The Event Notifications settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_crn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The Event Notifications instance CRN.", + }, + "updated_on": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date when the Event Notifications connection was updated.", + }, + "source_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The connected Security and Compliance Center instance CRN.", + }, + }, + }, + }, + "object_storage": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: "The Cloud Object Storage settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_crn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The connected Cloud Object Storage instance CRN.", + }, + "bucket": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The connected Cloud Object Storage bucket name.", + }, + "bucket_location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The connected Cloud Object Storage bucket location.", + }, + "bucket_endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The connected Cloud Object Storage bucket endpoint.", + }, + "updated_on": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The date when the bucket connection was updated.", + }, + }, + }, + }, + }, + }) +} + +func ResourceIbmSccInstanceSettingsValidator() *validate.ResourceValidator { + validateSchema := make([]validate.ValidateSchema, 0) + validateSchema = append(validateSchema, + validate.ValidateSchema{ + Identifier: "instance_id", + ValidateFunctionIdentifier: validate.ValidateRegexpLen, + Type: validate.TypeString, + Optional: true, + Regexp: `^[a-zA-Z0-9 ,\-_]+$`, + MinValueLength: 1, + MaxValueLength: 1024, + }, + ) + + resourceValidator := validate.ResourceValidator{ResourceName: "ibm_scc_instance_settings", Schema: validateSchema} + return &resourceValidator +} + +func resourceIbmSccInstanceSettingsCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminClient, err := meta.(conns.ClientSession).SecurityAndComplianceCenterV3() + if err != nil { + return diag.FromErr(err) + } + + updateSettingsOptions := &securityandcompliancecenterapiv3.UpdateSettingsOptions{} + instance_id := d.Get("instance_id").(string) + updateSettingsOptions.SetInstanceID(instance_id) + + var eventNotificationsModel *securityandcompliancecenterapiv3.EventNotifications + if _, ok := d.GetOk("event_notifications"); ok { + eventNotificationsData, err := resourceIbmSccInstanceSettingsMapToEventNotifications(d.Get("event_notifications.0").(map[string]interface{})) + if err != nil { + return diag.FromErr(err) + } + eventNotificationsModel = eventNotificationsData + eventNotificationsModel.SourceName = core.StringPtr("compliance") + eventNotificationsModel.SourceDescription = core.StringPtr("This source is used for integration with IBM Cloud Security and Compliance Center.") + } else { + eventNotificationsModel = &securityandcompliancecenterapiv3.EventNotifications{} + eventNotificationsModel.InstanceCrn = core.StringPtr("") + } + updateSettingsOptions.SetEventNotifications(eventNotificationsModel) + + var objectStorageModel *securityandcompliancecenterapiv3.ObjectStorage + if _, ok := d.GetOk("object_storage"); ok { + objectStorageData, err := resourceIbmSccInstanceSettingsMapToObjectStorage(d.Get("object_storage.0").(map[string]interface{})) + if err != nil { + return diag.FromErr(err) + } + objectStorageModel = objectStorageData + } else { + objectStorageModel := &securityandcompliancecenterapiv3.ObjectStorage{} + objectStorageModel.InstanceCrn = core.StringPtr("") + } + updateSettingsOptions.SetObjectStorage(objectStorageModel) + + _, response, err := adminClient.UpdateSettingsWithContext(context, updateSettingsOptions) + if err != nil { + log.Printf("[DEBUG] UpdateSettingsWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSettingsWithContext failed %s\n%s", err, response)) + } + + d.SetId(instance_id) + + return resourceIbmSccInstanceSettingsRead(context, d, meta) +} + +func resourceIbmSccInstanceSettingsRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminClient, err := meta.(conns.ClientSession).SecurityAndComplianceCenterV3() + if err != nil { + return diag.FromErr(err) + } + + getSettingsOptions := &securityandcompliancecenterapiv3.GetSettingsOptions{} + instance_id := d.Id() + getSettingsOptions.SetInstanceID(instance_id) + + settings, response, err := adminClient.GetSettingsWithContext(context, getSettingsOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetSettingsWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetSettingsWithContext failed %s\n%s", err, response)) + } + + if err = d.Set("instance_id", instance_id); err != nil { + return diag.FromErr(fmt.Errorf("Error setting instance_id: %s", err)) + } + if !core.IsNil(settings.EventNotifications) { + eventNotificationsMap, err := resourceIbmSccInstanceSettingsEventNotificationsToMap(settings.EventNotifications) + if err != nil { + return diag.FromErr(err) + } + if err = d.Set("event_notifications", []map[string]interface{}{eventNotificationsMap}); err != nil { + return diag.FromErr(fmt.Errorf("Error setting event_notifications: %s", err)) + } + } + if !core.IsNil(settings.ObjectStorage) { + objectStorageMap, err := resourceIbmSccInstanceSettingsObjectStorageToMap(settings.ObjectStorage) + if err != nil { + return diag.FromErr(err) + } + if err = d.Set("object_storage", []map[string]interface{}{objectStorageMap}); err != nil { + return diag.FromErr(fmt.Errorf("Error setting object_storage: %s", err)) + } + } + + return nil +} + +func resourceIbmSccInstanceSettingsUpdate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + adminClient, err := meta.(conns.ClientSession).SecurityAndComplianceCenterV3() + if err != nil { + return diag.FromErr(err) + } + + updateSettingsOptions := &securityandcompliancecenterapiv3.UpdateSettingsOptions{} + instance_id := d.Get("instance_id").(string) + updateSettingsOptions.SetInstanceID(instance_id) + + hasChange := false + + if d.HasChange("event_notifications") { + eventNotifications, err := resourceIbmSccInstanceSettingsMapToEventNotifications(d.Get("event_notifications.0").(map[string]interface{})) + if err != nil { + return diag.FromErr(err) + } + if eventNotifications.InstanceCrn != nil && *eventNotifications.InstanceCrn != "" { + eventNotifications.SourceName = core.StringPtr("compliance") + eventNotifications.SourceDescription = core.StringPtr("This source is used for integration with IBM Cloud Security and Compliance Center.") + } + updateSettingsOptions.SetEventNotifications(eventNotifications) + hasChange = true + } + if d.HasChange("object_storage") { + objectStorage, err := resourceIbmSccInstanceSettingsMapToObjectStorage(d.Get("object_storage.0").(map[string]interface{})) + if err != nil { + return diag.FromErr(err) + } + updateSettingsOptions.SetObjectStorage(objectStorage) + hasChange = true + } + + if hasChange { + _, response, err := adminClient.UpdateSettingsWithContext(context, updateSettingsOptions) + if err != nil { + log.Printf("[DEBUG] UpdateSettingsWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSettingsWithContext failed %s\n%s", err, response)) + } + } + + return resourceIbmSccInstanceSettingsRead(context, d, meta) +} + +func resourceIbmSccInstanceSettingsDelete(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + + d.SetId("") + + return nil +} + +func resourceIbmSccInstanceSettingsMapToEventNotifications(modelMap map[string]interface{}) (*securityandcompliancecenterapiv3.EventNotifications, error) { + model := &securityandcompliancecenterapiv3.EventNotifications{} + if modelMap["instance_crn"] != nil && modelMap["instance_crn"].(string) != "" { + model.InstanceCrn = core.StringPtr(modelMap["instance_crn"].(string)) + } + if modelMap["updated_on"] != nil { + dateTime, err := core.ParseDateTime(modelMap["updated_on"].(string)) + if err != nil { + return model, err + } + model.UpdatedOn = &dateTime + } + if modelMap["source_id"] != nil && modelMap["source_id"].(string) != "" { + model.SourceID = core.StringPtr(modelMap["source_id"].(string)) + } + return model, nil +} + +func resourceIbmSccInstanceSettingsMapToObjectStorage(modelMap map[string]interface{}) (*securityandcompliancecenterapiv3.ObjectStorage, error) { + model := &securityandcompliancecenterapiv3.ObjectStorage{} + instanceCrnSet := false + if modelMap["instance_crn"] != nil && modelMap["instance_crn"].(string) != "" { + model.InstanceCrn = core.StringPtr(modelMap["instance_crn"].(string)) + instanceCrnSet = true + } + if modelMap["bucket"] != nil && modelMap["bucket"].(string) != "" { + if instanceCrnSet { + model.Bucket = core.StringPtr(modelMap["bucket"].(string)) + } else { + return model, errors.New(`object_storage.instance_crn cannot be empty`) + } + } + if modelMap["bucket_location"] != nil && modelMap["bucket_location"].(string) != "" { + model.BucketLocation = core.StringPtr(modelMap["bucket_location"].(string)) + } + if modelMap["bucket_endpoint"] != nil && modelMap["bucket_endpoint"].(string) != "" { + model.BucketEndpoint = core.StringPtr(modelMap["bucket_endpoint"].(string)) + } + if modelMap["updated_on"] != nil { + dateTime, err := core.ParseDateTime(modelMap["updated_on"].(string)) + if err != nil { + return model, err + } + model.UpdatedOn = &dateTime + } + return model, nil +} + +func resourceIbmSccInstanceSettingsEventNotificationsToMap(model *securityandcompliancecenterapiv3.EventNotifications) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.InstanceCrn != nil { + modelMap["instance_crn"] = model.InstanceCrn + } + if model.UpdatedOn != nil { + modelMap["updated_on"] = model.UpdatedOn.String() + } + if model.SourceID != nil { + modelMap["source_id"] = model.SourceID + } + if model.SourceDescription != nil { + modelMap["source_description"] = model.SourceDescription + } + if model.SourceName != nil { + modelMap["source_name"] = model.SourceName + } + return modelMap, nil +} + +func resourceIbmSccInstanceSettingsObjectStorageToMap(model *securityandcompliancecenterapiv3.ObjectStorage) (map[string]interface{}, error) { + modelMap := make(map[string]interface{}) + if model.InstanceCrn != nil { + modelMap["instance_crn"] = model.InstanceCrn + } + if model.Bucket != nil { + modelMap["bucket"] = model.Bucket + } + if model.BucketLocation != nil { + modelMap["bucket_location"] = model.BucketLocation + } + if model.BucketEndpoint != nil { + modelMap["bucket_endpoint"] = model.BucketEndpoint + } + if model.UpdatedOn != nil { + modelMap["updated_on"] = model.UpdatedOn.String() + } + return modelMap, nil +} diff --git a/ibm/service/scc/resource_ibm_scc_instance_settings_test.go b/ibm/service/scc/resource_ibm_scc_instance_settings_test.go new file mode 100644 index 0000000000..ba476bf939 --- /dev/null +++ b/ibm/service/scc/resource_ibm_scc_instance_settings_test.go @@ -0,0 +1,139 @@ +// Copyright IBM Corp. 2023 All Rights Reserved. +// Licensed under the Mozilla Public License v2.0 + +package scc_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" + "github.com/IBM/scc-go-sdk/v5/securityandcompliancecenterapiv3" +) + +func TestAccIbmSccInstanceSettingsBasic(t *testing.T) { + var conf securityandcompliancecenterapiv3.Settings + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIbmSccInstanceSettingsDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIbmSccInstanceSettingsConfigBasic(acc.SccInstanceID), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIbmSccInstanceSettingsExists("ibm_scc_instance_settings.scc_instance_settings_instance", conf), + ), + }, + }, + }) +} + +func TestAccIbmSccInstanceSettingsAllArgs(t *testing.T) { + var conf securityandcompliancecenterapiv3.Settings + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIbmSccInstanceSettingsDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIbmSccInstanceSettingsConfigBasic(acc.SccInstanceID), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIbmSccInstanceSettingsExists("ibm_scc_instance_settings.scc_instance_settings_instance", conf), + ), + }, + resource.TestStep{ + Config: testAccCheckIbmSccInstanceSettingsConfig(acc.SccInstanceID, acc.SccEventNotificationsCRN, acc.SccObjectStorageCRN, acc.SccObjectStorageBucket), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIbmSccInstanceSettingsExists("ibm_scc_instance_settings.scc_instance_settings_instance", conf), + ), + }, + resource.TestStep{ + ResourceName: "ibm_scc_instance_settings.scc_instance_settings_instance", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckIbmSccInstanceSettingsConfigBasic(instanceID string) string { + return fmt.Sprintf(` + resource "ibm_scc_instance_settings" "scc_instance_settings_instance" { + instance_id = "%s" + event_notifications { } + object_storage { } + } + `, instanceID) +} + +func testAccCheckIbmSccInstanceSettingsConfig(instanceID, enInstanceCRN, objStorInstanceCRN, objStorBucket string) string { + return fmt.Sprintf(` + resource "ibm_scc_instance_settings" "scc_instance_settings_instance" { + instance_id = "%s" + event_notifications { + instance_crn = "%s" + } + object_storage { + instance_crn = "%s" + bucket = "%s" + } + } + `, instanceID, enInstanceCRN, objStorInstanceCRN, objStorBucket) +} + +func testAccCheckIbmSccInstanceSettingsExists(n string, obj securityandcompliancecenterapiv3.Settings) resource.TestCheckFunc { + + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + adminClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).SecurityAndComplianceCenterV3() + if err != nil { + return err + } + + getSettingsOptions := &securityandcompliancecenterapiv3.GetSettingsOptions{} + instanceID := acc.SccInstanceID + getSettingsOptions.SetInstanceID(instanceID) + + settings, _, err := adminClient.GetSettings(getSettingsOptions) + if err != nil { + return err + } + + obj = *settings + return nil + } +} + +func testAccCheckIbmSccInstanceSettingsDestroy(s *terraform.State) error { + adminClient, err := acc.TestAccProvider.Meta().(conns.ClientSession).SecurityAndComplianceCenterV3() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_scc_instance_settings" { + continue + } + + getSettingsOptions := &securityandcompliancecenterapiv3.GetSettingsOptions{} + instanceID := acc.SccInstanceID + getSettingsOptions.SetInstanceID(instanceID) + + // Deleting a instance_settings_resource doesn't delete the entity + _, response, err := adminClient.GetSettings(getSettingsOptions) + if response.StatusCode != 200 { + return fmt.Errorf("Error checking for scc_instance_settings (%s) has been destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} diff --git a/ibm/service/scc/resource_ibm_scc_provider_type_instance_test.go b/ibm/service/scc/resource_ibm_scc_provider_type_instance_test.go index cbf5170421..bbbe2da180 100644 --- a/ibm/service/scc/resource_ibm_scc_provider_type_instance_test.go +++ b/ibm/service/scc/resource_ibm_scc_provider_type_instance_test.go @@ -28,14 +28,14 @@ func TestAccIbmSccProviderTypeInstanceBasic(t *testing.T) { CheckDestroy: testAccCheckIbmSccProviderTypeInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckIbmSccProviderTypeInstanceConfigBasic(acc.SccInstanceID, name, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceConfigBasic(acc.SccInstanceID, name, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckIbmSccProviderTypeInstanceExists("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", conf), resource.TestCheckResourceAttr("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", "name", name), ), }, { - Config: testAccCheckIbmSccProviderTypeInstanceConfigBasic(acc.SccInstanceID, nameUpdate, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceConfigBasic(acc.SccInstanceID, nameUpdate, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", "name", nameUpdate), ), @@ -55,14 +55,14 @@ func TestAccIbmSccProviderTypeInstanceAllArgs(t *testing.T) { CheckDestroy: testAccCheckIbmSccProviderTypeInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckIbmSccProviderTypeInstanceConfig(acc.SccInstanceID, name, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceConfig(acc.SccInstanceID, name, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckIbmSccProviderTypeInstanceExists("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", conf), resource.TestCheckResourceAttr("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", "name", name), ), }, { - Config: testAccCheckIbmSccProviderTypeInstanceConfig(acc.SccInstanceID, nameUpdate, acc.SccProviderTypeAttributes), + Config: testAccCheckIbmSccProviderTypeInstanceConfig(acc.SccInstanceID, nameUpdate, acc.SccProviderTypeAttributes, acc.SccProviderTypeID), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("ibm_scc_provider_type_instance.scc_provider_type_instance_wlp", "name", nameUpdate), ), @@ -76,26 +76,26 @@ func TestAccIbmSccProviderTypeInstanceAllArgs(t *testing.T) { }) } -func testAccCheckIbmSccProviderTypeInstanceConfigBasic(instanceID string, name string, attributes string) string { +func testAccCheckIbmSccProviderTypeInstanceConfigBasic(instanceID string, name string, attributes string, providerTypeID string) string { return fmt.Sprintf(` resource "ibm_scc_provider_type_instance" "scc_provider_type_instance_wlp" { instance_id = "%s" - provider_type_id = "afa2476ecfa5f09af248492fe991b4d1" + provider_type_id = "%s" name = "%s" attributes = %s } - `, instanceID, name, attributes) + `, instanceID, providerTypeID, name, attributes) } -func testAccCheckIbmSccProviderTypeInstanceConfig(instanceID string, name string, attributes string) string { +func testAccCheckIbmSccProviderTypeInstanceConfig(instanceID string, name string, attributes string, providerTypeID string) string { return fmt.Sprintf(` resource "ibm_scc_provider_type_instance" "scc_provider_type_instance_wlp" { instance_id = "%s" - provider_type_id = "afa2476ecfa5f09af248492fe991b4d1" + provider_type_id = "%s" name = "%s" attributes = %s } - `, instanceID, name, attributes) + `, instanceID, providerTypeID, name, attributes) } func testAccCheckIbmSccProviderTypeInstanceExists(n string, obj securityandcompliancecenterapiv3.ProviderTypeInstanceItem) resource.TestCheckFunc { diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent.go b/ibm/service/schematics/data_source_ibm_schematics_agent.go index a5c21acfd5..f11932e05f 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent.go @@ -541,8 +541,7 @@ func dataSourceIbmSchematicsAgentRead(context context.Context, d *schema.Resourc } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(d.Get("agent_id").(string)) @@ -578,7 +577,11 @@ func dataSourceIbmSchematicsAgentRead(context context.Context, d *schema.Resourc if err = d.Set("agent_location", agentData.AgentLocation); err != nil { return diag.FromErr(fmt.Errorf("Error setting agent_location: %s", err)) } - + if agentData.Tags != nil { + if err = d.Set("tags", agentData.Tags); err != nil { + return diag.FromErr(fmt.Errorf("Error setting tags: %s", err)) + } + } agentInfrastructure := []map[string]interface{}{} if agentData.AgentInfrastructure != nil { modelMap, err := dataSourceIbmSchematicsAgentAgentInfrastructureToMap(agentData.AgentInfrastructure) diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_deploy.go b/ibm/service/schematics/data_source_ibm_schematics_agent_deploy.go index 7fa062e5b4..ebc1cafde8 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_deploy.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_deploy.go @@ -79,8 +79,7 @@ func dataSourceIbmSchematicsAgentDeployRead(context context.Context, d *schema.R } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(d.Get("agent_id").(string)) diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_deploy_test.go b/ibm/service/schematics/data_source_ibm_schematics_agent_deploy_test.go index dc89e8d162..1a00e2eef4 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_deploy_test.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_deploy_test.go @@ -22,25 +22,6 @@ func TestAccIbmSchematicsAgentDeployDataSourceBasic(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccCheckIbmSchematicsAgentDeployDataSourceConfigBasic(agentDeployJobAgentID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "agent_id"), - ), - }, - }, - }) -} - -func TestAccIbmSchematicsAgentDeployDataSourceAllArgs(t *testing.T) { - agentDeployJobAgentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - agentDeployJobForce := "false" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentDeployDataSourceConfig(agentDeployJobAgentID, agentDeployJobForce), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "agent_id"), @@ -60,25 +41,8 @@ func TestAccIbmSchematicsAgentDeployDataSourceAllArgs(t *testing.T) { func testAccCheckIbmSchematicsAgentDeployDataSourceConfigBasic(agentDeployJobAgentID string) string { return fmt.Sprintf(` - - resource "ibm_schematics_agent_deploy" "schematics_agent_deploy_instance" { - agent_id = "%s" - } data "ibm_schematics_agent_deploy" "schematics_agent_deploy_instance" { - agent_id = ibm_schematics_agent_deploy.schematics_agent_deploy_instance.agent_id - } - `, agentDeployJobAgentID) -} - -func testAccCheckIbmSchematicsAgentDeployDataSourceConfig(agentDeployJobAgentID string, agentDeployJobForce string) string { - return fmt.Sprintf(` - resource "ibm_schematics_agent_deploy" "schematics_agent_deploy_instance" { agent_id = "%s" - force = %s } - - data "ibm_schematics_agent_deploy" "schematics_agent_deploy_instance" { - agent_id = ibm_schematics_agent_deploy.schematics_agent_deploy_instance.agent_id - } - `, agentDeployJobAgentID, agentDeployJobForce) + `, agentDeployJobAgentID) } diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_health.go b/ibm/service/schematics/data_source_ibm_schematics_agent_health.go index 87ea05609d..637d819706 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_health.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_health.go @@ -74,8 +74,7 @@ func dataSourceIbmSchematicsAgentHealthRead(context context.Context, d *schema.R } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(d.Get("agent_id").(string)) diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_health_test.go b/ibm/service/schematics/data_source_ibm_schematics_agent_health_test.go index e7351e4e8e..0aae09bed8 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_health_test.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_health_test.go @@ -22,25 +22,6 @@ func TestAccIbmSchematicsAgentHealthDataSourceBasic(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccCheckIbmSchematicsAgentHealthDataSourceConfigBasic(agentHealthJobAgentID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_health.schematics_agent_health_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_health.schematics_agent_health_instance", "agent_id"), - ), - }, - }, - }) -} - -func TestAccIbmSchematicsAgentHealthDataSourceAllArgs(t *testing.T) { - agentHealthJobAgentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - agentHealthJobForce := "false" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentHealthDataSourceConfig(agentHealthJobAgentID, agentHealthJobForce), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_health.schematics_agent_health_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_health.schematics_agent_health_instance", "agent_id"), @@ -59,25 +40,8 @@ func TestAccIbmSchematicsAgentHealthDataSourceAllArgs(t *testing.T) { func testAccCheckIbmSchematicsAgentHealthDataSourceConfigBasic(agentHealthJobAgentID string) string { return fmt.Sprintf(` - resource "ibm_schematics_agent_health" "schematics_agent_health_instance" { - agent_id = "%s" - } - data "ibm_schematics_agent_health" "schematics_agent_health_instance" { - agent_id = ibm_schematics_agent_health.schematics_agent_health.agent_id - } - `, agentHealthJobAgentID) -} - -func testAccCheckIbmSchematicsAgentHealthDataSourceConfig(agentHealthJobAgentID string, agentHealthJobForce string) string { - return fmt.Sprintf(` - resource "ibm_schematics_agent_health" "schematics_agent_health_instance" { agent_id = "%s" - force = %s } - - data "ibm_schematics_agent_health" "schematics_agent_health_instance" { - agent_id = ibm_schematics_agent_health.schematics_agent_health.agent_id - } - `, agentHealthJobAgentID, agentHealthJobForce) + `, agentHealthJobAgentID) } diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_prs.go b/ibm/service/schematics/data_source_ibm_schematics_agent_prs.go index e0c4eb613f..d54b38d38c 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_prs.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_prs.go @@ -74,8 +74,7 @@ func dataSourceIbmSchematicsAgentPrsRead(context context.Context, d *schema.Reso } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(d.Get("agent_id").(string)) diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_prs_test.go b/ibm/service/schematics/data_source_ibm_schematics_agent_prs_test.go index 3a1e4be7fd..e872a6b1a6 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_prs_test.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_prs_test.go @@ -22,25 +22,6 @@ func TestAccIbmSchematicsAgentPrsDataSourceBasic(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccCheckIbmSchematicsAgentPrsDataSourceConfigBasic(agentPRSJobAgentID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_prs.schematics_agent_prs_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_prs.schematics_agent_prs_instance", "agent_id"), - ), - }, - }, - }) -} - -func TestAccIbmSchematicsAgentPrsDataSourceAllArgs(t *testing.T) { - agentPRSJobAgentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - agentPRSJobForce := "false" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentPrsDataSourceConfig(agentPRSJobAgentID, agentPRSJobForce), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_prs.schematics_agent_prs_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent_prs.schematics_agent_prs_instance", "agent_id"), @@ -59,25 +40,8 @@ func TestAccIbmSchematicsAgentPrsDataSourceAllArgs(t *testing.T) { func testAccCheckIbmSchematicsAgentPrsDataSourceConfigBasic(agentPRSJobAgentID string) string { return fmt.Sprintf(` - resource "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { - agent_id = "%s" - } - data "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { - agent_id = ibm_schematics_agent_prs.schematics_agent_prs_instance.agent_id - } - `, agentPRSJobAgentID) -} - -func testAccCheckIbmSchematicsAgentPrsDataSourceConfig(agentPRSJobAgentID string, agentPRSJobForce string) string { - return fmt.Sprintf(` - resource "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { agent_id = "%s" - force = %s } - - data "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { - agent_id = ibm_schematics_agent_prs.schematics_agent_prs_instance.agent_id - } - `, agentPRSJobAgentID, agentPRSJobForce) + `, agentPRSJobAgentID) } diff --git a/ibm/service/schematics/data_source_ibm_schematics_agent_test.go b/ibm/service/schematics/data_source_ibm_schematics_agent_test.go index f3ec02934b..c5d5ab56b5 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_agent_test.go +++ b/ibm/service/schematics/data_source_ibm_schematics_agent_test.go @@ -15,7 +15,7 @@ import ( func TestAccIbmSchematicsAgentDataSourceBasic(t *testing.T) { agentDataName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - agentDataVersion := "1.0.0-beta2" + agentDataVersion := "1.0.0" agentDataSchematicsLocation := "us-south" agentDataAgentLocation := "eu-de" @@ -42,7 +42,7 @@ func TestAccIbmSchematicsAgentDataSourceBasic(t *testing.T) { func TestAccIbmSchematicsAgentDataSourceAllArgs(t *testing.T) { agentDataName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - agentDataVersion := "1.0.0-beta2" + agentDataVersion := "1.0.0" agentDataSchematicsLocation := "us-south" agentDataAgentLocation := "eu-de" agentDataDescription := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) @@ -65,19 +65,12 @@ func TestAccIbmSchematicsAgentDataSourceAllArgs(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_location"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_infrastructure.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_metadata.#"), - resource.TestCheckResourceAttr("data.ibm_schematics_agent.schematics_agent_instance", "agent_metadata.0.name", agentDataName), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_inputs.#"), - resource.TestCheckResourceAttr("data.ibm_schematics_agent.schematics_agent_instance", "agent_inputs.0.name", agentDataName), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_inputs.0.value"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_inputs.0.use_default"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_inputs.0.link"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "user_state.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_crn"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "id"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "created_at"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "creation_by"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "updated_at"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "updated_by"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "system_state.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "agent_kpi.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_agent.schematics_agent_instance", "recent_prs_job.#"), @@ -130,49 +123,13 @@ func testAccCheckIbmSchematicsAgentDataSourceConfig(agentDataName string, agentD cos_bucket_region = "cos_bucket_region" } description = "%s" - tags = "FIXME" + tags = ["agent-tag"] agent_metadata { name = "purpose" value = ["git", "terraform", "ansible"] } - agent_inputs { - name = "name" - value = "value" - use_default = true - metadata { - type = "boolean" - aliases = [ "aliases" ] - description = "description" - cloud_data_type = "cloud_data_type" - default_value = "default_value" - link_status = "normal" - secure = true - immutable = true - hidden = true - required = true - options = [ "options" ] - min_value = 1 - max_value = 1 - min_length = 1 - max_length = 1 - matches = "matches" - position = 1 - group_by = "group_by" - source = "source" - } - link = "link" - } user_state { state = "enable" - set_by = "set_by" - set_at = "2021-01-31T09:44:12Z" - } - agent_kpi { - availability_indicator = "available" - lifecycle_indicator = "consistent" - percent_usage_indicator = "percent_usage_indicator" - application_indicators = [ null ] - infra_indicators = [ null ] } } diff --git a/ibm/service/schematics/data_source_ibm_schematics_job.go b/ibm/service/schematics/data_source_ibm_schematics_job.go index 64f35170d5..663e3fa14b 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_job.go +++ b/ibm/service/schematics/data_source_ibm_schematics_job.go @@ -4212,7 +4212,7 @@ func dataSourceJobLogSummaryToMap(logSummaryItem schematicsv1.JobLogSummary) (lo return logSummaryMap } -func dataSourceJobLogSummaryLogErrorsToMap(logErrorsItem schematicsv1.JobLogSummaryLogErrors) (logErrorsMap map[string]interface{}) { +func dataSourceJobLogSummaryLogErrorsToMap(logErrorsItem schematicsv1.JobLogSummaryLogErrorsItem) (logErrorsMap map[string]interface{}) { logErrorsMap = map[string]interface{}{} if logErrorsItem.ErrorCode != nil { diff --git a/ibm/service/schematics/data_source_ibm_schematics_output.go b/ibm/service/schematics/data_source_ibm_schematics_output.go index ec81390106..0e46b0c9c9 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_output.go +++ b/ibm/service/schematics/data_source_ibm_schematics_output.go @@ -123,37 +123,3 @@ func dataSourceIBMSchematicsOutputRead(d *schema.ResourceData, meta interface{}) func dataSourceIBMSchematicsOutputID(d *schema.ResourceData) string { return time.Now().UTC().String() } - -func dataSourceOutputValuesListFlattenOutputValues(result []schematicsv1.OutputValuesInner) (outputValues interface{}) { - for _, outputValuesItem := range result { - outputValues = dataSourceOutputValuesListOutputValuesToMap(outputValuesItem) - } - - return outputValues -} - -func dataSourceOutputValuesListOutputValuesToMap(outputValuesItem schematicsv1.OutputValuesInner) (outputValuesMap map[string]interface{}) { - outputValuesMap = map[string]interface{}{} - - if outputValuesItem.Folder != nil { - outputValuesMap["folder"] = outputValuesItem.Folder - } - if outputValuesItem.ID != nil { - outputValuesMap["id"] = outputValuesItem.ID - } - - m := []flex.Map{} - - for _, outputValues := range outputValuesItem.OutputValues { - m = append(m, flex.Flatten(outputValues)) - } - - if outputValuesItem.OutputValues != nil { - outputValuesMap["output_values"] = m - } - if outputValuesItem.ValueType != nil { - outputValuesMap["value_type"] = outputValuesItem.ValueType - } - - return outputValuesMap -} diff --git a/ibm/service/schematics/data_source_ibm_schematics_policy.go b/ibm/service/schematics/data_source_ibm_schematics_policy.go index 51b2c40256..5bab24dfe2 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_policy.go +++ b/ibm/service/schematics/data_source_ibm_schematics_policy.go @@ -315,6 +315,11 @@ func dataSourceIbmSchematicsPolicyRead(context context.Context, d *schema.Resour return diag.FromErr(fmt.Errorf("Error setting kind: %s", err)) } + if policy.Tags != nil { + if err = d.Set("tags", policy.Tags); err != nil { + return diag.FromErr(fmt.Errorf("Error setting tags: %s", err)) + } + } target := []map[string]interface{}{} if policy.Target != nil { modelMap, err := dataSourceIbmSchematicsPolicyPolicyObjectsToMap(policy.Target) diff --git a/ibm/service/schematics/data_source_ibm_schematics_policy_test.go b/ibm/service/schematics/data_source_ibm_schematics_policy_test.go index a31976aa94..d9075ac22d 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_policy_test.go +++ b/ibm/service/schematics/data_source_ibm_schematics_policy_test.go @@ -34,7 +34,7 @@ func TestAccIbmSchematicsPolicyDataSourceBasic(t *testing.T) { func TestAccIbmSchematicsPolicyDataSourceAllArgs(t *testing.T) { policyName := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) policyDescription := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) - policyResourceGroup := fmt.Sprintf("tf_resource_group_%d", acctest.RandIntRange(10, 100)) + policyResourceGroup := "Default" policyLocation := "us-south" policyKind := "agent_assignment_policy" @@ -57,11 +57,7 @@ func TestAccIbmSchematicsPolicyDataSourceAllArgs(t *testing.T) { resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "target.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "parameter.#"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "id"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "crn"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "account"), - resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "scoped_resources.#"), - resource.TestCheckResourceAttr("data.ibm_schematics_policy.schematics_policy_instance", "scoped_resources.0.kind", policyKind), - resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "scoped_resources.0.id"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "created_at"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "created_by"), resource.TestCheckResourceAttrSet("data.ibm_schematics_policy.schematics_policy_instance", "updated_at"), @@ -90,40 +86,24 @@ func testAccCheckIbmSchematicsPolicyDataSourceConfig(policyName string, policyDe name = "%s" description = "%s" resource_group = "%s" - tags = "FIXME" + tags = ["policy-tag"] location = "%s" - state { - state = "draft" - set_by = "set_by" - set_at = "2021-01-31T09:44:12Z" - } kind = "%s" target { selector_kind = "ids" selector_ids = [ "selector_ids" ] - selector_scope { - kind = "workspace" - tags = [ "tags" ] - resource_groups = [ "resource_groups" ] - locations = [ "us-south" ] - } } parameter { agent_assignment_policy_parameter { - selector_kind = "ids" - selector_ids = [ "selector_ids" ] + selector_kind = "scoped" selector_scope { kind = "workspace" tags = [ "tags" ] - resource_groups = [ "resource_groups" ] + resource_groups = [ "Default" ] locations = [ "us-south" ] } } } - scoped_resources { - kind = "workspace" - id = "id" - } } data "ibm_schematics_policy" "schematics_policy_instance" { diff --git a/ibm/service/schematics/data_source_ibm_schematics_workspace.go b/ibm/service/schematics/data_source_ibm_schematics_workspace.go index bdd9f28d1f..7e59232d2f 100644 --- a/ibm/service/schematics/data_source_ibm_schematics_workspace.go +++ b/ibm/service/schematics/data_source_ibm_schematics_workspace.go @@ -5,6 +5,7 @@ package schematics import ( "context" + "encoding/json" "fmt" "log" @@ -86,30 +87,10 @@ func DataSourceIBMSchematicsWorkspace() *schema.Resource { Description: "The version of the software template that you chose to install from the IBM Cloud catalog.", }, "service_extensions": { - Type: schema.TypeList, + Type: schema.TypeString, Computed: true, - Description: "List of service data", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the Service Data.", - }, - "value": { - Type: schema.TypeString, - Computed: true, - Description: "Value of the Service Data.", - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: "Type of the value string, int, bool.", - }, - }, - }, - }, - }}}, + Description: "Service extensions defined as string of json", + }}}}, "created_at": { Type: schema.TypeString, Computed: true, @@ -880,30 +861,16 @@ func dataSourceWorkspaceResponseCatalogRefToMap(catalogRefItem schematicsv1.Cata catalogRefMap["offering_version"] = catalogRefItem.OfferingVersion } if catalogRefItem.ServiceExtensions != nil { - serviceExtensionsList := []map[string]interface{}{} - for _, serviceExtensionsItem := range catalogRefItem.ServiceExtensions { - serviceExtensionsList = append(serviceExtensionsList, dataSourceWorkspaceResponseCatalogRefServiceExtensionsToMap(serviceExtensionsItem)) + serviceExtensionsByte, err := json.MarshalIndent(catalogRefItem.ServiceExtensions, "", "") + if err != nil { + log.Printf("[DEBUG] Marshelling of service extensions failed %s", err) } - catalogRefMap["service_extensions"] = serviceExtensionsList + serviceExtensionsJSON := string(serviceExtensionsByte[:]) + catalogRefMap["service_extensions"] = serviceExtensionsJSON } return catalogRefMap } -func dataSourceWorkspaceResponseCatalogRefServiceExtensionsToMap(serviceExtensionsItem schematicsv1.ServiceExtensions) (serviceExtensionMap map[string]interface{}) { - serviceExtensionMap = map[string]interface{}{} - - if serviceExtensionsItem.Name != nil { - serviceExtensionMap["name"] = *serviceExtensionsItem.Name - } - if serviceExtensionsItem.Type != nil { - serviceExtensionMap["type"] = serviceExtensionsItem.Type - } - if serviceExtensionsItem.Value != nil { - serviceExtensionMap["value"] = *serviceExtensionsItem.Value - } - return serviceExtensionMap -} - func dataSourceWorkspaceResponseFlattenRuntimeData(result []schematicsv1.TemplateRunTimeDataResponse) (runtimeData []map[string]interface{}) { for _, runtimeDataItem := range result { runtimeData = append(runtimeData, dataSourceWorkspaceResponseRuntimeDataToMap(runtimeDataItem)) diff --git a/ibm/service/schematics/resource_ibm_schematics_agent.go b/ibm/service/schematics/resource_ibm_schematics_agent.go index edfbf33540..36f440553f 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent.go @@ -40,6 +40,7 @@ func ResourceIbmSchematicsAgent() *schema.Resource { "resource_group": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, Description: "The resource-group name for the agent. By default, agent will be registered in Default Resource Group.", }, "version": &schema.Schema{ @@ -132,6 +133,7 @@ func ResourceIbmSchematicsAgent() *schema.Resource { "agent_inputs": &schema.Schema{ Type: schema.TypeList, Optional: true, + ForceNew: true, Description: "Additional input variables for the agent.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -270,6 +272,7 @@ func ResourceIbmSchematicsAgent() *schema.Resource { MaxItems: 1, Optional: true, Computed: true, + ForceNew: true, Description: "User defined status of the agent.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -294,40 +297,33 @@ func ResourceIbmSchematicsAgent() *schema.Resource { }, "agent_kpi": &schema.Schema{ Type: schema.TypeList, - MaxItems: 1, - Optional: true, Computed: true, Description: "Schematics Agent key performance indicators.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "availability_indicator": &schema.Schema{ Type: schema.TypeString, - Optional: true, Computed: true, Description: "Overall availability indicator reported by the agent.", }, "lifecycle_indicator": &schema.Schema{ Type: schema.TypeString, - Optional: true, Computed: true, Description: "Overall lifecycle indicator reported by the agents.", }, "percent_usage_indicator": &schema.Schema{ Type: schema.TypeString, - Optional: true, Computed: true, Description: "Percentage usage of the agent resources.", }, "application_indicators": &schema.Schema{ Type: schema.TypeList, - Optional: true, Computed: true, Description: "Agent application key performance indicators.", Elem: &schema.Schema{Type: schema.TypeMap}, }, "infra_indicators": &schema.Schema{ Type: schema.TypeList, - Optional: true, Computed: true, Description: "Agent infrastructure key performance indicators.", Elem: &schema.Schema{Type: schema.TypeMap}, @@ -559,9 +555,7 @@ func resourceIbmSchematicsAgentCreate(context context.Context, d *schema.Resourc return diag.FromErr(err) } - createAgentDataOptions := &schematicsv1.CreateAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - } + createAgentDataOptions := &schematicsv1.CreateAgentDataOptions{} createAgentDataOptions.SetName(d.Get("name").(string)) createAgentDataOptions.SetResourceGroup(d.Get("resource_group").(string)) @@ -577,7 +571,7 @@ func resourceIbmSchematicsAgentCreate(context context.Context, d *schema.Resourc createAgentDataOptions.SetDescription(d.Get("description").(string)) } if _, ok := d.GetOk("tags"); ok { - createAgentDataOptions.SetTags(d.Get("tags").([]string)) + createAgentDataOptions.SetTags(flex.ExpandStringList(d.Get("tags").([]interface{}))) } if _, ok := d.GetOk("agent_metadata"); ok { var agentMetadata []schematicsv1.AgentMetadataInfo @@ -610,13 +604,6 @@ func resourceIbmSchematicsAgentCreate(context context.Context, d *schema.Resourc } createAgentDataOptions.SetUserState(userStateModel) } - if _, ok := d.GetOk("agent_kpi"); ok { - agentKpiModel, err := resourceIbmSchematicsAgentMapToAgentKPIData(d.Get("agent_kpi.0").(map[string]interface{})) - if err != nil { - return diag.FromErr(err) - } - createAgentDataOptions.SetAgentKpi(agentKpiModel) - } agentData, response, err := schematicsClient.CreateAgentDataWithContext(context, createAgentDataOptions) if err != nil { @@ -636,8 +623,7 @@ func resourceIbmSchematicsAgentRead(context context.Context, d *schema.ResourceD } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(d.Id()) @@ -787,20 +773,87 @@ func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.Resourc return diag.FromErr(err) } - updateAgentDataOptions := &schematicsv1.UpdateAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), + updateAgentDataOptions := &schematicsv1.UpdateAgentDataOptions{} + session, err := meta.(conns.ClientSession).BluemixSession() + if err != nil { + return diag.FromErr(err) + } + iamRefreshToken := session.Config.IAMRefreshToken + ff := map[string]string{ + "refresh_token": iamRefreshToken, } + updateAgentDataOptions.Headers = ff updateAgentDataOptions.SetAgentID(d.Id()) hasChange := false - if d.HasChange("name") || d.HasChange("resource_group") || d.HasChange("version") || d.HasChange("schematics_location") || d.HasChange("agent_location") || d.HasChange("agent_infrastructure") { - updateAgentDataOptions.SetName(d.Get("name").(string)) - updateAgentDataOptions.SetResourceGroup(d.Get("resource_group").(string)) - updateAgentDataOptions.SetVersion(d.Get("version").(string)) + getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ + Profile: core.StringPtr("detailed"), + } + + getAgentDataOptions.SetAgentID(d.Id()) + + agentData, response, err := schematicsClient.GetAgentDataWithContext(context, getAgentDataOptions) + if err != nil { + if response != nil && response.StatusCode == 404 { + d.SetId("") + return nil + } + log.Printf("[DEBUG] GetAgentDataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetAgentDataWithContext failed %s\n%s", err, response)) + } + if agentData.Name != nil { + updateAgentDataOptions.Name = agentData.Name + } + if agentData.ResourceGroup != nil { + updateAgentDataOptions.ResourceGroup = agentData.ResourceGroup + } + if agentData.Version != nil { + updateAgentDataOptions.Version = agentData.Version + } + if agentData.SchematicsLocation != nil { + updateAgentDataOptions.SchematicsLocation = agentData.SchematicsLocation + } + if agentData.AgentLocation != nil { + updateAgentDataOptions.AgentLocation = agentData.AgentLocation + } + if agentData.AgentInfrastructure != nil { + updateAgentDataOptions.AgentInfrastructure = agentData.AgentInfrastructure + } + if agentData.Description != nil { + updateAgentDataOptions.Description = agentData.Description + } + if agentData.Tags != nil { + updateAgentDataOptions.Tags = agentData.Tags + } + if agentData.AgentMetadata != nil { + updateAgentDataOptions.AgentMetadata = agentData.AgentMetadata + } + if agentData.AgentInputs != nil { + updateAgentDataOptions.AgentInputs = agentData.AgentInputs + } + if agentData.UserState != nil { + updateAgentDataOptions.UserState = agentData.UserState + } + if agentData.AgentKpi != nil { + updateAgentDataOptions.AgentKpi = agentData.AgentKpi + } + + if d.HasChange("schematics_location") || d.HasChange("agent_location") { updateAgentDataOptions.SetSchematicsLocation(d.Get("schematics_location").(string)) updateAgentDataOptions.SetAgentLocation(d.Get("agent_location").(string)) + hasChange = true + } + if d.HasChange("name") { + updateAgentDataOptions.SetName(d.Get("name").(string)) + hasChange = true + } + if d.HasChange("version") { + updateAgentDataOptions.SetVersion(d.Get("version").(string)) + hasChange = true + } + if d.HasChange("agent_infrastructure") { agentInfrastructure, err := resourceIbmSchematicsAgentMapToAgentInfrastructure(d.Get("agent_infrastructure.0").(map[string]interface{})) if err != nil { return diag.FromErr(err) @@ -813,7 +866,7 @@ func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.Resourc hasChange = true } if d.HasChange("tags") { - updateAgentDataOptions.SetTags(d.Get("tags").([]string)) + updateAgentDataOptions.SetTags(flex.ExpandStringList(d.Get("tags").([]interface{}))) hasChange = true } if d.HasChange("agent_metadata") { @@ -829,36 +882,6 @@ func resourceIbmSchematicsAgentUpdate(context context.Context, d *schema.Resourc updateAgentDataOptions.SetAgentMetadata(agentMetadata) hasChange = true } - if d.HasChange("agent_inputs") { - // TODO: handle AgentInputs of type TypeList -- not primitive, not model - var agentInputs []schematicsv1.VariableData - for _, e := range d.Get("agent_inputs").([]interface{}) { - value := e.(map[string]interface{}) - agentInputsItem, err := resourceIbmSchematicsAgentMapToVariableData(value) - if err != nil { - return diag.FromErr(err) - } - agentInputs = append(agentInputs, *agentInputsItem) - } - updateAgentDataOptions.SetAgentInputs(agentInputs) - hasChange = true - } - if d.HasChange("user_state") { - userState, err := resourceIbmSchematicsAgentMapToAgentUserState(d.Get("user_state.0").(map[string]interface{})) - if err != nil { - return diag.FromErr(err) - } - updateAgentDataOptions.SetUserState(userState) - hasChange = true - } - if d.HasChange("agent_kpi") { - agentKpi, err := resourceIbmSchematicsAgentMapToAgentKPIData(d.Get("agent_kpi.0").(map[string]interface{})) - if err != nil { - return diag.FromErr(err) - } - updateAgentDataOptions.SetAgentKpi(agentKpi) - hasChange = true - } if hasChange { _, response, err := schematicsClient.UpdateAgentDataWithContext(context, updateAgentDataOptions) @@ -877,9 +900,16 @@ func resourceIbmSchematicsAgentDelete(context context.Context, d *schema.Resourc return diag.FromErr(err) } - deleteAgentDataOptions := &schematicsv1.DeleteAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), + deleteAgentDataOptions := &schematicsv1.DeleteAgentDataOptions{} + session, err := meta.(conns.ClientSession).BluemixSession() + if err != nil { + return diag.FromErr(err) } + iamRefreshToken := session.Config.IAMRefreshToken + ff := map[string]string{ + "refresh_token": iamRefreshToken, + } + deleteAgentDataOptions.Headers = ff deleteAgentDataOptions.SetAgentID(d.Id()) @@ -1040,34 +1070,6 @@ func resourceIbmSchematicsAgentMapToAgentUserState(modelMap map[string]interface return model, nil } -func resourceIbmSchematicsAgentMapToAgentKPIData(modelMap map[string]interface{}) (*schematicsv1.AgentKPIData, error) { - model := &schematicsv1.AgentKPIData{} - if modelMap["availability_indicator"] != nil && modelMap["availability_indicator"].(string) != "" { - model.AvailabilityIndicator = core.StringPtr(modelMap["availability_indicator"].(string)) - } - if modelMap["lifecycle_indicator"] != nil && modelMap["lifecycle_indicator"].(string) != "" { - model.LifecycleIndicator = core.StringPtr(modelMap["lifecycle_indicator"].(string)) - } - if modelMap["percent_usage_indicator"] != nil && modelMap["percent_usage_indicator"].(string) != "" { - model.PercentUsageIndicator = core.StringPtr(modelMap["percent_usage_indicator"].(string)) - } - if modelMap["application_indicators"] != nil { - applicationIndicators := []interface{}{} - for _, applicationIndicatorsItem := range modelMap["application_indicators"].([]interface{}) { - applicationIndicators = append(applicationIndicators, applicationIndicatorsItem) - } - model.ApplicationIndicators = applicationIndicators - } - if modelMap["infra_indicators"] != nil { - infraIndicators := []interface{}{} - for _, infraIndicatorsItem := range modelMap["infra_indicators"].([]interface{}) { - infraIndicators = append(infraIndicators, infraIndicatorsItem) - } - model.InfraIndicators = infraIndicators - } - return model, nil -} - func resourceIbmSchematicsAgentAgentInfrastructureToMap(model *schematicsv1.AgentInfrastructure) (map[string]interface{}, error) { modelMap := make(map[string]interface{}) if model.InfraType != nil { diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_deploy.go b/ibm/service/schematics/resource_ibm_schematics_agent_deploy.go index ec9564c686..5610a01ef6 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_deploy.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_deploy.go @@ -27,9 +27,9 @@ func ResourceIbmSchematicsAgentDeploy() *schema.Resource { DeleteContext: resourceIbmSchematicsAgentDeployDelete, Importer: &schema.ResourceImporter{}, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -102,9 +102,8 @@ func resourceIbmSchematicsAgentDeployCreate(context context.Context, d *schema.R deployAgentJobOptions := &schematicsv1.DeployAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } deployAgentJobOptions.Headers = ff deployAgentJobOptions.SetAgentID(d.Get("agent_id").(string)) @@ -120,7 +119,6 @@ func resourceIbmSchematicsAgentDeployCreate(context context.Context, d *schema.R d.SetId(fmt.Sprintf("%s/%s", *deployAgentJobOptions.AgentID, *agentDeployJob.JobID)) log.Printf("[INFO] Agent : %s", *deployAgentJobOptions.AgentID) - d.Set("status_message", *agentDeployJob.StatusMessage) _, err = isWaitForAgentAvailable(context, schematicsClient, *deployAgentJobOptions.AgentID, d.Timeout(schema.TimeoutCreate)) if err != nil { @@ -131,17 +129,21 @@ func resourceIbmSchematicsAgentDeployCreate(context context.Context, d *schema.R } const ( - agentProvisioningTriggered = "Triggered deployment" - agentProvisioningDone = "success" - agentProvisioningPending = "PENDING" - agentProvisioninFailed = "Job Failed" + agentProvisioningStatusCodeJobCancelled = "job_cancelled" + agentProvisioningStatusCodeJobFailed = "job_failed" + agentProvisioningStatusCodeJobFinished = "job_finished" + agentProvisioningStatusCodeJobInProgress = "job_in_progress" + agentProvisioningStatusCodeJobPending = "job_pending" + agentProvisioningStatusCodeJobReadyToExecute = "job_ready_to_execute" + agentProvisioningStatusCodeJobStopInProgress = "job_stop_in_progress" + agentProvisioningStatusCodeJobStopped = "job_stopped" ) func isWaitForAgentAvailable(context context.Context, schematicsClient *schematicsv1.SchematicsV1, id string, timeout time.Duration) (interface{}, error) { log.Printf("Waiting for agent (%s) to be available.", id) stateConf := &resource.StateChangeConf{ - Pending: []string{"retry", agentProvisioningPending, agentProvisioningTriggered}, - Target: []string{agentProvisioningDone, agentProvisioninFailed, ""}, + Pending: []string{"retry", agentProvisioningStatusCodeJobInProgress, agentProvisioningStatusCodeJobPending, agentProvisioningStatusCodeJobReadyToExecute, agentProvisioningStatusCodeJobStopInProgress}, + Target: []string{agentProvisioningStatusCodeJobFinished, agentProvisioningStatusCodeJobFailed, agentProvisioningStatusCodeJobCancelled, agentProvisioningStatusCodeJobStopped, ""}, Refresh: agentRefreshFunc(schematicsClient, id), Timeout: timeout, Delay: 10 * time.Second, @@ -152,19 +154,18 @@ func isWaitForAgentAvailable(context context.Context, schematicsClient *schemati func agentRefreshFunc(schematicsClient *schematicsv1.SchematicsV1, id string) resource.StateRefreshFunc { return func() (interface{}, string, error) { getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - AgentID: core.StringPtr(id), - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + AgentID: core.StringPtr(id), + Profile: core.StringPtr("detailed"), } agent, response, err := schematicsClient.GetAgentData(getAgentDataOptions) if err != nil { return nil, "", fmt.Errorf("[ERROR] Error Getting Agent: %s\n%s", err, response) } - if *agent.RecentDeployJob.StatusMessage == agentProvisioninFailed || *agent.RecentDeployJob.StatusMessage == agentProvisioningDone { - return agent, agentProvisioningDone, nil + if agent.RecentDeployJob.StatusCode != nil { + return agent, *agent.RecentDeployJob.StatusCode, nil } - return agent, agentProvisioningPending, nil + return agent, agentProvisioningStatusCodeJobPending, nil } } @@ -180,8 +181,7 @@ func resourceIbmSchematicsAgentDeployRead(context context.Context, d *schema.Res } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(parts[0]) @@ -238,9 +238,8 @@ func resourceIbmSchematicsAgentDeployUpdate(context context.Context, d *schema.R iamRefreshToken := session.Config.IAMRefreshToken deployAgentJobOptions := &schematicsv1.DeployAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } deployAgentJobOptions.Headers = ff @@ -269,7 +268,6 @@ func resourceIbmSchematicsAgentDeployUpdate(context context.Context, d *schema.R return diag.FromErr(fmt.Errorf("DeployAgentJobWithContext failed %s\n%s", err, response)) } d.SetId(fmt.Sprintf("%s/%s", *deployAgentJobOptions.AgentID, *agentDeployJob.JobID)) - d.Set("status_message", *agentDeployJob.StatusMessage) _, err = isWaitForAgentAvailable(context, schematicsClient, parts[0], d.Timeout(schema.TimeoutUpdate)) if err != nil { diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_deploy_test.go b/ibm/service/schematics/resource_ibm_schematics_agent_deploy_test.go index bd73dc2fcf..0ff719cb81 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_deploy_test.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_deploy_test.go @@ -38,41 +38,6 @@ func TestAccIbmSchematicsAgentDeployBasic(t *testing.T) { }) } -func TestAccIbmSchematicsAgentDeployAllArgs(t *testing.T) { - var conf *schematicsv1.AgentDataRecentDeployJob - agentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - force := "false" - forceUpdate := "true" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmSchematicsAgentDeployDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentDeployConfig(agentID, force), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmSchematicsAgentDeployExists("ibm_schematics_agent_deploy.schematics_agent_deploy_instance", conf), - resource.TestCheckResourceAttr("ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "force", force), - ), - }, - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentDeployConfig(agentID, forceUpdate), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_deploy.schematics_agent_deploy_instance", "force", forceUpdate), - ), - }, - resource.TestStep{ - ResourceName: "ibm_schematics_agent_deploy.schematics_agent_deploy_instance", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckIbmSchematicsAgentDeployConfigBasic(agentID string) string { return fmt.Sprintf(` @@ -81,17 +46,6 @@ func testAccCheckIbmSchematicsAgentDeployConfigBasic(agentID string) string { } `, agentID) } - -func testAccCheckIbmSchematicsAgentDeployConfig(agentID string, force string) string { - return fmt.Sprintf(` - - resource "ibm_schematics_agent_deploy" "schematics_agent_deploy_instance" { - agent_id = "%s" - force = %s - } - `, agentID, force) -} - func testAccCheckIbmSchematicsAgentDeployExists(n string, obj *schematicsv1.AgentDataRecentDeployJob) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -106,8 +60,7 @@ func testAccCheckIbmSchematicsAgentDeployExists(n string, obj *schematicsv1.Agen } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") @@ -138,8 +91,7 @@ func testAccCheckIbmSchematicsAgentDeployDestroy(s *terraform.State) error { } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_health.go b/ibm/service/schematics/resource_ibm_schematics_agent_health.go index 01f9e5c34b..a0765c75d9 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_health.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_health.go @@ -90,9 +90,8 @@ func resourceIbmSchematicsAgentHealthCreate(context context.Context, d *schema.R healthCheckAgentJobOptions := &schematicsv1.HealthCheckAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } healthCheckAgentJobOptions.Headers = ff @@ -124,8 +123,7 @@ func resourceIbmSchematicsAgentHealthRead(context context.Context, d *schema.Res } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(parts[0]) @@ -186,9 +184,8 @@ func resourceIbmSchematicsAgentHealthUpdate(context context.Context, d *schema.R healthCheckAgentJobOptions := &schematicsv1.HealthCheckAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } healthCheckAgentJobOptions.Headers = ff diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_health_test.go b/ibm/service/schematics/resource_ibm_schematics_agent_health_test.go index 3b20dc5dd5..9ab319b352 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_health_test.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_health_test.go @@ -38,41 +38,6 @@ func TestAccIbmSchematicsAgentHealthBasic(t *testing.T) { }) } -func TestAccIbmSchematicsAgentHealthAllArgs(t *testing.T) { - var conf *schematicsv1.AgentDataRecentHealthJob - agentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - force := "false" - forceUpdate := "true" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmSchematicsAgentHealthDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentHealthConfig(agentID, force), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmSchematicsAgentHealthExists("ibm_schematics_agent_health.schematics_agent_health_instance", conf), - resource.TestCheckResourceAttr("ibm_schematics_agent_health.schematics_agent_health_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_health.schematics_agent_health_instance", "force", force), - ), - }, - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentHealthConfig(agentID, forceUpdate), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_schematics_agent_health.schematics_agent_health_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_health.schematics_agent_health_instance", "force", forceUpdate), - ), - }, - resource.TestStep{ - ResourceName: "ibm_schematics_agent_health.schematics_agent_health_instance", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckIbmSchematicsAgentHealthConfigBasic(agentID string) string { return fmt.Sprintf(` @@ -82,16 +47,6 @@ func testAccCheckIbmSchematicsAgentHealthConfigBasic(agentID string) string { `, agentID) } -func testAccCheckIbmSchematicsAgentHealthConfig(agentID string, force string) string { - return fmt.Sprintf(` - - resource "ibm_schematics_agent_health" "schematics_agent_health_instance" { - agent_id = "%s" - force = %s - } - `, agentID, force) -} - func testAccCheckIbmSchematicsAgentHealthExists(n string, obj *schematicsv1.AgentDataRecentHealthJob) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -106,8 +61,7 @@ func testAccCheckIbmSchematicsAgentHealthExists(n string, obj *schematicsv1.Agen } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") @@ -138,8 +92,7 @@ func testAccCheckIbmSchematicsAgentHealthDestroy(s *terraform.State) error { } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_prs.go b/ibm/service/schematics/resource_ibm_schematics_agent_prs.go index a672152b16..73032e4bc3 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_prs.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_prs.go @@ -90,9 +90,8 @@ func resourceIbmSchematicsAgentPrsCreate(context context.Context, d *schema.Reso prsAgentJobOptions := &schematicsv1.PrsAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } prsAgentJobOptions.Headers = ff prsAgentJobOptions.SetAgentID(d.Get("agent_id").(string)) @@ -123,8 +122,7 @@ func resourceIbmSchematicsAgentPrsRead(context context.Context, d *schema.Resour } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } getAgentDataOptions.SetAgentID(parts[0]) @@ -183,9 +181,8 @@ func resourceIbmSchematicsAgentPrsUpdate(context context.Context, d *schema.Reso prsAgentJobOptions := &schematicsv1.PrsAgentJobOptions{} ff := map[string]string{ - "X-Feature-Agents": "true", - "Authorization": iamAccessToken, - "refresh_token": iamRefreshToken, + "Authorization": iamAccessToken, + "refresh_token": iamRefreshToken, } prsAgentJobOptions.Headers = ff diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_prs_test.go b/ibm/service/schematics/resource_ibm_schematics_agent_prs_test.go index 66894e0125..1f06d8db2b 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_prs_test.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_prs_test.go @@ -38,41 +38,6 @@ func TestAccIbmSchematicsAgentPrsBasic(t *testing.T) { }) } -func TestAccIbmSchematicsAgentPrsAllArgs(t *testing.T) { - var conf *schematicsv1.AgentDataRecentPrsJob - agentID := fmt.Sprintf("tf_agent_id_%d", acctest.RandIntRange(10, 100)) - force := "false" - forceUpdate := "true" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.TestAccPreCheck(t) }, - Providers: acc.TestAccProviders, - CheckDestroy: testAccCheckIbmSchematicsAgentPrsDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentPrsConfig(agentID, force), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckIbmSchematicsAgentPrsExists("ibm_schematics_agent_prs.schematics_agent_prs_instance", conf), - resource.TestCheckResourceAttr("ibm_schematics_agent_prs.schematics_agent_prs_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_prs.schematics_agent_prs_instance", "force", force), - ), - }, - resource.TestStep{ - Config: testAccCheckIbmSchematicsAgentPrsConfig(agentID, forceUpdate), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("ibm_schematics_agent_prs.schematics_agent_prs_instance", "agent_id", agentID), - resource.TestCheckResourceAttr("ibm_schematics_agent_prs.schematics_agent_prs_instance", "force", forceUpdate), - ), - }, - resource.TestStep{ - ResourceName: "ibm_schematics_agent_prs.schematics_agent_prs_instance", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckIbmSchematicsAgentPrsConfigBasic(agentID string) string { return fmt.Sprintf(` resource "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { @@ -81,16 +46,6 @@ func testAccCheckIbmSchematicsAgentPrsConfigBasic(agentID string) string { `, agentID) } -func testAccCheckIbmSchematicsAgentPrsConfig(agentID string, force string) string { - return fmt.Sprintf(` - - resource "ibm_schematics_agent_prs" "schematics_agent_prs_instance" { - agent_id = "%s" - force = %s - } - `, agentID, force) -} - func testAccCheckIbmSchematicsAgentPrsExists(n string, obj *schematicsv1.AgentDataRecentPrsJob) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -105,8 +60,7 @@ func testAccCheckIbmSchematicsAgentPrsExists(n string, obj *schematicsv1.AgentDa } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") @@ -137,8 +91,7 @@ func testAccCheckIbmSchematicsAgentPrsDestroy(s *terraform.State) error { } getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - Profile: core.StringPtr("detailed"), + Profile: core.StringPtr("detailed"), } parts, err := flex.SepIdParts(rs.Primary.ID, "/") diff --git a/ibm/service/schematics/resource_ibm_schematics_agent_test.go b/ibm/service/schematics/resource_ibm_schematics_agent_test.go index b4fcbcc098..f5a4c6e646 100644 --- a/ibm/service/schematics/resource_ibm_schematics_agent_test.go +++ b/ibm/service/schematics/resource_ibm_schematics_agent_test.go @@ -13,18 +13,17 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" - "github.com/IBM/go-sdk-core/v5/core" "github.com/IBM/schematics-go-sdk/schematicsv1" ) func TestAccIbmSchematicsAgentBasic(t *testing.T) { var conf schematicsv1.AgentData name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - version := "1.0.0-beta2" + version := "1.0.0-prega" schematicsLocation := "us-south" agentLocation := "eu-de" nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - versionUpdate := "1.0.0-beta2" + versionUpdate := "1.0.0" schematicsLocationUpdate := "us-east" agentLocationUpdate := "eu-gb" @@ -59,11 +58,11 @@ func TestAccIbmSchematicsAgentBasic(t *testing.T) { func TestAccIbmSchematicsAgentAllArgs(t *testing.T) { var conf schematicsv1.AgentData name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - version := "1.0.0-beta2" + version := "1.0.0-prega" schematicsLocation := "us-south" agentLocation := "eu-de" nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) - versionUpdate := "1.0.0-beta2" + versionUpdate := "1.0.0" schematicsLocationUpdate := "us-east" agentLocationUpdate := "eu-gb" description := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) @@ -130,7 +129,7 @@ func testAccCheckIbmSchematicsAgentConfig(name string, version string, schematic resource "ibm_schematics_agent" "schematics_agent_instance" { name = "%s" - resource_group = "default" + resource_group = "Default" version = "%s" schematics_location = "%s" agent_location = "%s" @@ -143,50 +142,11 @@ func testAccCheckIbmSchematicsAgentConfig(name string, version string, schematic cos_bucket_region = "cos_bucket_region" } description = "%s" - tags = "FIXME" + tags = ["tag-agent"] agent_metadata { name = "purpose" value = ["git", "terraform", "ansible"] } - agent_inputs { - name = "name" - value = "value" - use_default = true - metadata { - type = "boolean" - aliases = [ "aliases" ] - description = "description" - cloud_data_type = "cloud_data_type" - default_value = "default_value" - link_status = "normal" - secure = true - immutable = true - hidden = true - required = true - options = [ "options" ] - min_value = 1 - max_value = 1 - min_length = 1 - max_length = 1 - matches = "matches" - position = 1 - group_by = "group_by" - source = "source" - } - link = "link" - } - user_state { - state = "enable" - set_by = "set_by" - set_at = "2021-01-31T09:44:12Z" - } - agent_kpi { - availability_indicator = "available" - lifecycle_indicator = "consistent" - percent_usage_indicator = "percent_usage_indicator" - application_indicators = [ null ] - infra_indicators = [ null ] - } } `, name, version, schematicsLocation, agentLocation, description) } @@ -204,9 +164,7 @@ func testAccCheckIbmSchematicsAgentExists(n string, obj schematicsv1.AgentData) return err } - getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - } + getAgentDataOptions := &schematicsv1.GetAgentDataOptions{} getAgentDataOptions.SetAgentID(rs.Primary.ID) @@ -230,9 +188,7 @@ func testAccCheckIbmSchematicsAgentDestroy(s *terraform.State) error { continue } - getAgentDataOptions := &schematicsv1.GetAgentDataOptions{ - XFeatureAgents: core.BoolPtr(true), - } + getAgentDataOptions := &schematicsv1.GetAgentDataOptions{} getAgentDataOptions.SetAgentID(rs.Primary.ID) diff --git a/ibm/service/schematics/resource_ibm_schematics_job.go b/ibm/service/schematics/resource_ibm_schematics_job.go index 4893b8b7ae..b8927e5101 100644 --- a/ibm/service/schematics/resource_ibm_schematics_job.go +++ b/ibm/service/schematics/resource_ibm_schematics_job.go @@ -3701,7 +3701,7 @@ func resourceIBMSchematicsJobMapToJobLogSummary(jobLogSummaryMap map[string]inte jobLogSummary.ElapsedTime = core.Float64Ptr(jobLogSummaryMap["elapsed_time"].(float64)) } if jobLogSummaryMap["log_errors"] != nil { - logErrors := []schematicsv1.JobLogSummaryLogErrors{} + logErrors := []schematicsv1.JobLogSummaryLogErrorsItem{} for _, logErrorsItem := range jobLogSummaryMap["log_errors"].([]interface{}) { logErrorsItemModel := resourceIBMSchematicsJobMapToJobLogSummaryLogErrors(logErrorsItem.(map[string]interface{})) logErrors = append(logErrors, logErrorsItemModel) @@ -3732,8 +3732,8 @@ func resourceIBMSchematicsJobMapToJobLogSummary(jobLogSummaryMap map[string]inte return jobLogSummary } -func resourceIBMSchematicsJobMapToJobLogSummaryLogErrors(jobLogSummaryLogErrorsMap map[string]interface{}) schematicsv1.JobLogSummaryLogErrors { - jobLogSummaryLogErrors := schematicsv1.JobLogSummaryLogErrors{} +func resourceIBMSchematicsJobMapToJobLogSummaryLogErrors(jobLogSummaryLogErrorsMap map[string]interface{}) schematicsv1.JobLogSummaryLogErrorsItem { + jobLogSummaryLogErrors := schematicsv1.JobLogSummaryLogErrorsItem{} if jobLogSummaryLogErrorsMap["error_code"] != nil { jobLogSummaryLogErrors.ErrorCode = core.StringPtr(jobLogSummaryLogErrorsMap["error_code"].(string)) @@ -4803,7 +4803,7 @@ func resourceIBMSchematicsJobJobLogSummaryToMap(jobLogSummary schematicsv1.JobLo return jobLogSummaryMap } -func resourceIBMSchematicsJobJobLogSummaryLogErrorsToMap(jobLogSummaryLogErrors schematicsv1.JobLogSummaryLogErrors) map[string]interface{} { +func resourceIBMSchematicsJobJobLogSummaryLogErrorsToMap(jobLogSummaryLogErrors schematicsv1.JobLogSummaryLogErrorsItem) map[string]interface{} { jobLogSummaryLogErrorsMap := map[string]interface{}{} if jobLogSummaryLogErrors.ErrorCode != nil { diff --git a/ibm/service/schematics/resource_ibm_schematics_policy.go b/ibm/service/schematics/resource_ibm_schematics_policy.go index 66f2d6cea5..6d065fcfab 100644 --- a/ibm/service/schematics/resource_ibm_schematics_policy.go +++ b/ibm/service/schematics/resource_ibm_schematics_policy.go @@ -38,10 +38,11 @@ func ResourceIbmSchematicsPolicy() *schema.Resource { Description: "The description of Schematics customization policy.", }, "resource_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "The resource group name for the policy. By default, Policy will be created in `default` Resource Group.", + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: flex.ApplyOnce, + Description: "The resource group name for the policy. By default, Policy will be created in `default` Resource Group.", }, "tags": &schema.Schema{ Type: schema.TypeList, @@ -53,6 +54,7 @@ func ResourceIbmSchematicsPolicy() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, Description: "List of locations supported by IBM Cloud Schematics service. While creating your workspace or action, choose the right region, since it cannot be changed. Note, this does not limit the location of the IBM Cloud resources, provisioned using Schematics.", }, "state": &schema.Schema{ @@ -303,7 +305,7 @@ func resourceIbmSchematicsPolicyCreate(context context.Context, d *schema.Resour createPolicyOptions.SetResourceGroup(d.Get("resource_group").(string)) } if _, ok := d.GetOk("tags"); ok { - createPolicyOptions.SetTags(d.Get("tags").([]string)) + createPolicyOptions.SetTags(flex.ExpandStringList(d.Get("tags").([]interface{}))) } if _, ok := d.GetOk("location"); ok { createPolicyOptions.SetLocation(d.Get("location").(string)) @@ -480,11 +482,7 @@ func resourceIbmSchematicsPolicyUpdate(context context.Context, d *schema.Resour hasChange = true } if d.HasChange("tags") { - // TODO: handle Tags of type TypeList -- not primitive, not model - hasChange = true - } - if d.HasChange("location") { - updatePolicyOptions.SetLocation(d.Get("location").(string)) + updatePolicyOptions.SetTags(flex.ExpandStringList(d.Get("tags").([]interface{}))) hasChange = true } if d.HasChange("state") { diff --git a/ibm/service/schematics/resource_ibm_schematics_policy_test.go b/ibm/service/schematics/resource_ibm_schematics_policy_test.go index dbae396bd5..92807c354f 100644 --- a/ibm/service/schematics/resource_ibm_schematics_policy_test.go +++ b/ibm/service/schematics/resource_ibm_schematics_policy_test.go @@ -42,13 +42,11 @@ func TestAccIbmSchematicsPolicyAllArgs(t *testing.T) { var conf schematicsv1.Policy name := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) description := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) - resourceGroup := fmt.Sprintf("tf_resource_group_%d", acctest.RandIntRange(10, 100)) + resourceGroup := "Default" location := "us-south" kind := "agent_assignment_policy" nameUpdate := fmt.Sprintf("tf_name_%d", acctest.RandIntRange(10, 100)) descriptionUpdate := fmt.Sprintf("tf_description_%d", acctest.RandIntRange(10, 100)) - resourceGroupUpdate := fmt.Sprintf("tf_resource_group_%d", acctest.RandIntRange(10, 100)) - locationUpdate := "eu-de" kindUpdate := "agent_assignment_policy" resource.Test(t, resource.TestCase{ @@ -62,18 +60,15 @@ func TestAccIbmSchematicsPolicyAllArgs(t *testing.T) { testAccCheckIbmSchematicsPolicyExists("ibm_schematics_policy.schematics_policy_instance", conf), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "name", name), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "description", description), - resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "resource_group", resourceGroup), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "location", location), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "kind", kind), ), }, resource.TestStep{ - Config: testAccCheckIbmSchematicsPolicyConfig(nameUpdate, descriptionUpdate, resourceGroupUpdate, locationUpdate, kindUpdate), + Config: testAccCheckIbmSchematicsPolicyConfig(nameUpdate, descriptionUpdate, resourceGroup, location, kindUpdate), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "name", nameUpdate), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "description", descriptionUpdate), - resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "resource_group", resourceGroupUpdate), - resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "location", locationUpdate), resource.TestCheckResourceAttr("ibm_schematics_policy.schematics_policy_instance", "kind", kindUpdate), ), }, @@ -103,40 +98,24 @@ func testAccCheckIbmSchematicsPolicyConfig(name string, description string, reso name = "%s" description = "%s" resource_group = "%s" - tags = "FIXME" + tags = ["policy-tag"] location = "%s" - state { - state = "draft" - set_by = "set_by" - set_at = "2021-01-31T09:44:12Z" - } kind = "%s" target { selector_kind = "ids" selector_ids = [ "selector_ids" ] - selector_scope { - kind = "workspace" - tags = [ "tags" ] - resource_groups = [ "resource_groups" ] - locations = [ "us-south" ] - } } parameter { agent_assignment_policy_parameter { - selector_kind = "ids" - selector_ids = [ "selector_ids" ] + selector_kind = "scoped" selector_scope { kind = "workspace" tags = [ "tags" ] - resource_groups = [ "resource_groups" ] + resource_groups = [ "Default" ] locations = [ "us-south" ] } } } - scoped_resources { - kind = "workspace" - id = "id" - } } `, name, description, resourceGroup, location, kind) } diff --git a/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secret.go b/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secret.go index f130b9aef3..27dcd545e8 100644 --- a/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secret.go +++ b/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secret.go @@ -20,8 +20,10 @@ import ( func DataSourceIBMSecretsManagerSecret() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceIBMSecretsManagerSecretRead, - DeprecationMessage: "Data Source Removal: Data Source ibm_secrets_manager_secret is deprecated and will be removed. Use ibm_sm__secret for managing secret of a specific type.", + ReadContext: func(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return diag.Errorf("Data Source Removal: Data Source ibm_secrets_manager_secret is removed. Use ibm_sm__secret for managing secret of a specific type") + }, + DeprecationMessage: "Data Source Removal: Data Source ibm_secrets_manager_secret is removed. Use ibm_sm__secret for managing secret of a specific type.", Schema: map[string]*schema.Schema{ "instance_id": { Type: schema.TypeString, diff --git a/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secrets.go b/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secrets.go index a67ebf0f1f..17c7f09f8a 100644 --- a/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secrets.go +++ b/ibm/service/secretsmanager/data_source_ibm_secrets_manager_secrets.go @@ -19,8 +19,10 @@ import ( func DataSourceIBMSecretsManagerSecrets() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceIBMSecretsManagerSecretsRead, - DeprecationMessage: "Data Source Removal: Data Source ibm_secrets_manager_secrets is deprecated and will be removed. Use ibm_sm_secrets for listing secrets", + ReadContext: func(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return diag.Errorf("Data Source Removal: Data Source ibm_secrets_manager_secrets is removed. Use ibm_sm_secrets for listing secrets") + }, + DeprecationMessage: "Data Source Removal: Data Source ibm_secrets_manager_secrets is removed. Use ibm_sm_secrets for listing secrets", Schema: map[string]*schema.Schema{ "instance_id": { Type: schema.TypeString, diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret.go b/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret.go index 75ad6a8306..bce495303b 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret.go @@ -169,11 +169,6 @@ func DataSourceIbmSmIamCredentialsSecret() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -330,9 +325,6 @@ func dataSourceIbmSmIamCredentialsSecretRotationPolicyToMap(model secretsmanager if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret_metadata.go b/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret_metadata.go index d1124752b9..f58be6333d 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret_metadata.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_iam_credentials_secret_metadata.go @@ -161,11 +161,6 @@ func DataSourceIbmSmIamCredentialsSecretMetadata() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -325,9 +320,6 @@ func dataSourceIbmSmIamCredentialsSecretMetadataRotationPolicyToMap(model secret if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate.go b/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate.go index 31043b4a78..59ce4a1fa8 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate.go @@ -184,11 +184,6 @@ func DataSourceIbmSmPrivateCertificate() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -436,9 +431,6 @@ func dataSourceIbmSmPrivateCertificateRotationPolicyToMap(model secretsmanagerv2 if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate_metadata.go b/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate_metadata.go index 1585e1a831..a1afb4a638 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate_metadata.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_private_certificate_metadata.go @@ -176,11 +176,6 @@ func DataSourceIbmSmPrivateCertificateMetadata() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -403,9 +398,6 @@ func dataSourceIbmSmPrivateCertificateMetadataRotationPolicyToMap(model secretsm if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret.go b/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret.go index 5a9fa84aff..a1119f17e0 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret.go @@ -136,11 +136,6 @@ func DataSourceIbmSmUsernamePasswordSecret() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -296,9 +291,6 @@ func dataSourceIbmSmUsernamePasswordSecretRotationPolicyToMap(model secretsmanag if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret_metadata.go b/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret_metadata.go index 35d24ef67b..6eada6eb8b 100644 --- a/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret_metadata.go +++ b/ibm/service/secretsmanager/data_source_ibm_sm_username_password_secret_metadata.go @@ -128,11 +128,6 @@ func DataSourceIbmSmUsernamePasswordSecretMetadata() *schema.Resource { Computed: true, Description: "The units for the secret rotation time interval.", }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -282,9 +277,6 @@ func dataSourceIbmSmUsernamePasswordSecretMetadataRotationPolicyToMap(model secr if model.Unit != nil { modelMap["unit"] = *model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = *model.RotateKeys - } return modelMap, nil } else { return nil, fmt.Errorf("Unrecognized secretsmanagerv2.RotationPolicyIntf subtype encountered") diff --git a/ibm/service/secretsmanager/resource_ibm_sm_arbitrary_secret.go b/ibm/service/secretsmanager/resource_ibm_sm_arbitrary_secret.go index 6c5b4f3661..b4b4e2bd7f 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_arbitrary_secret.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_arbitrary_secret.go @@ -84,7 +84,6 @@ func ResourceIbmSmArbitrarySecret() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - Computed: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -422,7 +421,7 @@ func resourceIbmSmArbitrarySecretUpdate(context context.Context, d *schema.Resou // Apply change to version_custom_metadata in current version secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) - secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataPatchModel.AsPatch() + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} updateSecretVersionOptions.SetSecretID(secretId) diff --git a/ibm/service/secretsmanager/resource_ibm_sm_iam_credentials_secret.go b/ibm/service/secretsmanager/resource_ibm_sm_iam_credentials_secret.go index e47da2cee7..e729325efb 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_iam_credentials_secret.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_iam_credentials_secret.go @@ -113,12 +113,6 @@ func ResourceIbmSmIamCredentialsSecret() *schema.Resource { Description: "The units for the secret rotation time interval.", DiffSuppressFunc: rotationAttributesDiffSuppress, }, - "rotate_keys": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.", - }, }, }, }, @@ -132,7 +126,6 @@ func ResourceIbmSmIamCredentialsSecret() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - ForceNew: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -414,6 +407,24 @@ func resourceIbmSmIamCredentialsSecretRead(context context.Context, d *schema.Re return diag.FromErr(fmt.Errorf("Error setting signing_algorithm: %s", err)) } + // Call get version metadata API to get the current version_custom_metadata + getVersionMetdataOptions := &secretsmanagerv2.GetSecretVersionMetadataOptions{} + getVersionMetdataOptions.SetSecretID(secretId) + getVersionMetdataOptions.SetID("current") + + versionMetadataIntf, response, err := secretsManagerClient.GetSecretVersionMetadataWithContext(context, getVersionMetdataOptions) + if err != nil { + log.Printf("[DEBUG] GetSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + + versionMetadata := versionMetadataIntf.(*secretsmanagerv2.IAMCredentialsSecretVersionMetadata) + if versionMetadata.VersionCustomMetadata != nil { + if err = d.Set("version_custom_metadata", versionMetadata.VersionCustomMetadata); err != nil { + return diag.FromErr(fmt.Errorf("Error setting version_custom_metadata: %s", err)) + } + } + return nil } @@ -481,6 +492,27 @@ func resourceIbmSmIamCredentialsSecretUpdate(context context.Context, d *schema. } } + if d.HasChange("version_custom_metadata") { + // Apply change to version_custom_metadata in current version + secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) + secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) + + updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} + updateSecretVersionOptions.SetSecretID(secretId) + updateSecretVersionOptions.SetID("current") + updateSecretVersionOptions.SetSecretVersionMetadataPatch(secretVersionMetadataPatchModelAsPatch) + _, response, err := secretsManagerClient.UpdateSecretVersionMetadataWithContext(context, updateSecretVersionOptions) + if err != nil { + if hasChange { + // Call the read function to update the Terraform state with the change already applied to the metadata + resourceIbmSmIamCredentialsSecretRead(context, d, meta) + } + log.Printf("[DEBUG] UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + } + return resourceIbmSmIamCredentialsSecretRead(context, d, meta) } @@ -576,9 +608,6 @@ func resourceIbmSmIamCredentialsSecretMapToRotationPolicy(modelMap map[string]in if modelMap["unit"] != nil && modelMap["unit"].(string) != "" { model.Unit = core.StringPtr(modelMap["unit"].(string)) } - if modelMap["rotate_keys"] != nil { - model.RotateKeys = core.BoolPtr(modelMap["rotate_keys"].(bool)) - } return model, nil } @@ -594,8 +623,5 @@ func resourceIbmSmIamCredentialsSecretRotationPolicyToMap(modelIntf secretsmanag if model.Unit != nil { modelMap["unit"] = model.Unit } - if model.RotateKeys != nil { - modelMap["rotate_keys"] = model.RotateKeys - } return modelMap, nil } diff --git a/ibm/service/secretsmanager/resource_ibm_sm_imported_certificate.go b/ibm/service/secretsmanager/resource_ibm_sm_imported_certificate.go index b87a9cfdc5..aae9c5503c 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_imported_certificate.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_imported_certificate.go @@ -78,7 +78,6 @@ func ResourceIbmSmImportedCertificate() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - Computed: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -537,7 +536,7 @@ func resourceIbmSmImportedCertificateUpdate(context context.Context, d *schema.R // Apply change to version_custom_metadata in current version secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) - secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataPatchModel.AsPatch() + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} updateSecretVersionOptions.SetSecretID(secretId) diff --git a/ibm/service/secretsmanager/resource_ibm_sm_kv_secret.go b/ibm/service/secretsmanager/resource_ibm_sm_kv_secret.go index f474f7cb04..f32d9ee23a 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_kv_secret.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_kv_secret.go @@ -75,7 +75,6 @@ func ResourceIbmSmKvSecret() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - Computed: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -396,7 +395,7 @@ func resourceIbmSmKvSecretUpdate(context context.Context, d *schema.ResourceData // Apply change to version_custom_metadata in current version secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) - secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataPatchModel.AsPatch() + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} updateSecretVersionOptions.SetSecretID(secretId) diff --git a/ibm/service/secretsmanager/resource_ibm_sm_private_certificate.go b/ibm/service/secretsmanager/resource_ibm_sm_private_certificate.go index dfbbc39e69..19bd04e50c 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_private_certificate.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_private_certificate.go @@ -168,7 +168,6 @@ func ResourceIbmSmPrivateCertificate() *schema.Resource { }, "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, - ForceNew: true, Optional: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, @@ -245,9 +244,8 @@ func ResourceIbmSmPrivateCertificate() *schema.Resource { }, "key_algorithm": &schema.Schema{ Type: schema.TypeString, - Optional: true, + Computed: true, ForceNew: true, - Default: "RSA2048", Description: "The identifier for the cryptographic algorithm to be used to generate the public key that is associated with the certificate.The algorithm that you select determines the encryption algorithm (`RSA` or `ECDSA`) and key size to be used to generate keys and sign certificates. For longer living certificates, it is recommended to use longer keys to provide more encryption protection. Allowed values: RSA2048, RSA4096, EC256, EC384.", }, "next_rotation_date": &schema.Schema{ @@ -546,6 +544,24 @@ func resourceIbmSmPrivateCertificateRead(context context.Context, d *schema.Reso return diag.FromErr(fmt.Errorf("Error setting ca_chain: %s", err)) } } + + // Call get version metadata API to get the current version_custom_metadata + getVersionMetdataOptions := &secretsmanagerv2.GetSecretVersionMetadataOptions{} + getVersionMetdataOptions.SetSecretID(secretId) + getVersionMetdataOptions.SetID("current") + + versionMetadataIntf, response, err := secretsManagerClient.GetSecretVersionMetadataWithContext(context, getVersionMetdataOptions) + if err != nil { + log.Printf("[DEBUG] GetSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + + versionMetadata := versionMetadataIntf.(*secretsmanagerv2.PrivateCertificateVersionMetadata) + if versionMetadata.VersionCustomMetadata != nil { + if err = d.Set("version_custom_metadata", versionMetadata.VersionCustomMetadata); err != nil { + return diag.FromErr(fmt.Errorf("Error setting version_custom_metadata: %s", err)) + } + } return nil } @@ -609,6 +625,27 @@ func resourceIbmSmPrivateCertificateUpdate(context context.Context, d *schema.Re } } + if d.HasChange("version_custom_metadata") { + // Apply change to version_custom_metadata in current version + secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) + secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) + + updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} + updateSecretVersionOptions.SetSecretID(secretId) + updateSecretVersionOptions.SetID("current") + updateSecretVersionOptions.SetSecretVersionMetadataPatch(secretVersionMetadataPatchModelAsPatch) + _, response, err := secretsManagerClient.UpdateSecretVersionMetadataWithContext(context, updateSecretVersionOptions) + if err != nil { + if hasChange { + // Call the read function to update the Terraform state with the change already applied to the metadata + resourceIbmSmPrivateCertificateRead(context, d, meta) + } + log.Printf("[DEBUG] UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + } + return resourceIbmSmPrivateCertificateRead(context, d, meta) } diff --git a/ibm/service/secretsmanager/resource_ibm_sm_public_certificate.go b/ibm/service/secretsmanager/resource_ibm_sm_public_certificate.go index fb268ad8ed..8cbd049687 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_public_certificate.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_public_certificate.go @@ -140,7 +140,6 @@ func ResourceIbmSmPublicCertificate() *schema.Resource { }, "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, - ForceNew: true, Optional: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, @@ -636,6 +635,27 @@ func resourceIbmSmPublicCertificateRead(context context.Context, d *schema.Resou if err = d.Set("private_key", secret.PrivateKey); err != nil { return diag.FromErr(fmt.Errorf("Error setting private_key: %s", err)) } + + if *secret.StateDescription == "active" { + // Call get version metadata API to get the current version_custom_metadata + getVersionMetdataOptions := &secretsmanagerv2.GetSecretVersionMetadataOptions{} + getVersionMetdataOptions.SetSecretID(secretId) + getVersionMetdataOptions.SetID("current") + + versionMetadataIntf, response, err := secretsManagerClient.GetSecretVersionMetadataWithContext(context, getVersionMetdataOptions) + if err != nil { + log.Printf("[DEBUG] GetSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + + versionMetadata := versionMetadataIntf.(*secretsmanagerv2.PublicCertificateVersionMetadata) + if versionMetadata.VersionCustomMetadata != nil { + if err = d.Set("version_custom_metadata", versionMetadata.VersionCustomMetadata); err != nil { + return diag.FromErr(fmt.Errorf("Error setting version_custom_metadata: %s", err)) + } + } + } + if d.Get("dns").(string) == "akamai" && d.Get("state_description").(string) == "pre_activation" { err := setChallengesWithAkamaiAndValidateManualDns(context, d, meta, secret, secretsManagerClient) if err != nil { @@ -706,6 +726,27 @@ func resourceIbmSmPublicCertificateUpdate(context context.Context, d *schema.Res } } + if d.HasChange("version_custom_metadata") { + // Apply change to version_custom_metadata in current version + secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) + secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) + + updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} + updateSecretVersionOptions.SetSecretID(secretId) + updateSecretVersionOptions.SetID("current") + updateSecretVersionOptions.SetSecretVersionMetadataPatch(secretVersionMetadataPatchModelAsPatch) + _, response, err := secretsManagerClient.UpdateSecretVersionMetadataWithContext(context, updateSecretVersionOptions) + if err != nil { + if hasChange { + // Call the read function to update the Terraform state with the change already applied to the metadata + resourceIbmSmPublicCertificateRead(context, d, meta) + } + log.Printf("[DEBUG] UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + } + return resourceIbmSmPublicCertificateRead(context, d, meta) } diff --git a/ibm/service/secretsmanager/resource_ibm_sm_public_certificate_configuration_ca_lets_encrypt.go b/ibm/service/secretsmanager/resource_ibm_sm_public_certificate_configuration_ca_lets_encrypt.go index 1e5620511c..4d23c48659 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_public_certificate_configuration_ca_lets_encrypt.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_public_certificate_configuration_ca_lets_encrypt.go @@ -36,6 +36,11 @@ func ResourceIbmSmPublicCertificateConfigurationCALetsEncrypt() *schema.Resource Required: true, Description: "The configuration of the Let's Encrypt CA environment.", }, + "lets_encrypt_preferred_chain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Prefer the chain with an issuer matching this Subject Common Name.", + }, "lets_encrypt_private_key": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -140,6 +145,9 @@ func resourceIbmSmPublicCertificateConfigurationCALetsEncryptRead(context contex if err = d.Set("lets_encrypt_environment", configuration.LetsEncryptEnvironment); err != nil { return diag.FromErr(fmt.Errorf("Error setting lets_encrypt_environment: %s", err)) } + if err = d.Set("lets_encrypt_preferred_chain", configuration.LetsEncryptPreferredChain); err != nil { + return diag.FromErr(fmt.Errorf("Error setting lets_encrypt_preferred_chain: %s", err)) + } if err = d.Set("lets_encrypt_private_key", configuration.LetsEncryptPrivateKey); err != nil { return diag.FromErr(fmt.Errorf("Error setting lets_encrypt_private_key: %s", err)) } @@ -173,6 +181,11 @@ func resourceIbmSmPublicCertificateConfigurationCALetsEncryptUpdate(context cont hasChange = true } + if d.HasChange("lets_encrypt_preferred_chain") { + patchVals.LetsEncryptPreferredChain = core.StringPtr(d.Get("lets_encrypt_preferred_chain").(string)) + hasChange = true + } + patchVals.LetsEncryptEnvironment = core.StringPtr(d.Get("lets_encrypt_environment").(string)) if d.HasChange("lets_encrypt_environment") { hasChange = true @@ -222,15 +235,15 @@ func resourceIbmSmPublicCertificateConfigurationCALetsEncryptMapToConfigurationP model.ConfigType = core.StringPtr("public_cert_configuration_ca_lets_encrypt") - //if _, ok := d.GetOk("config_type"); ok { - // model.ConfigType = core.StringPtr(d.Get("config_type").(string)) - //} if _, ok := d.GetOk("name"); ok { model.Name = core.StringPtr(d.Get("name").(string)) } if _, ok := d.GetOk("lets_encrypt_environment"); ok { model.LetsEncryptEnvironment = core.StringPtr(d.Get("lets_encrypt_environment").(string)) } + if _, ok := d.GetOk("lets_encrypt_preferred_chain"); ok { + model.LetsEncryptPreferredChain = core.StringPtr(d.Get("lets_encrypt_preferred_chain").(string)) + } if _, ok := d.GetOk("lets_encrypt_private_key"); ok { model.LetsEncryptPrivateKey = core.StringPtr(formatCertificate(d.Get("lets_encrypt_private_key").(string))) } diff --git a/ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret.go b/ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret.go similarity index 99% rename from ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret.go rename to ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret.go index 7d5e211b0d..8beb6980d2 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret.go @@ -66,7 +66,6 @@ func ResourceIbmSmServiceCredentialsSecret() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - Computed: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -537,7 +536,7 @@ func resourceIbmSmServiceCredentialsSecretUpdate(context context.Context, d *sch // Apply change to version_custom_metadata in current version secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) - secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataPatchModel.AsPatch() + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} updateSecretVersionOptions.SetSecretID(secretId) diff --git a/ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret_test.go b/ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret_test.go similarity index 100% rename from ibm/service/secretsmanager/resource_ibm_sm_service_credentilas_secret_test.go rename to ibm/service/secretsmanager/resource_ibm_sm_service_credentials_secret_test.go diff --git a/ibm/service/secretsmanager/resource_ibm_sm_username_password_secret.go b/ibm/service/secretsmanager/resource_ibm_sm_username_password_secret.go index a5d48f33ac..5975f94dac 100644 --- a/ibm/service/secretsmanager/resource_ibm_sm_username_password_secret.go +++ b/ibm/service/secretsmanager/resource_ibm_sm_username_password_secret.go @@ -81,7 +81,6 @@ func ResourceIbmSmUsernamePasswordSecret() *schema.Resource { "version_custom_metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, - ForceNew: true, Description: "The secret version metadata that a user can customize.", Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -124,7 +123,6 @@ func ResourceIbmSmUsernamePasswordSecret() *schema.Resource { "password": &schema.Schema{ Type: schema.TypeString, Required: true, - ForceNew: true, Sensitive: true, Description: "The password that is assigned to the secret.", }, @@ -356,6 +354,24 @@ func resourceIbmSmUsernamePasswordSecretRead(context context.Context, d *schema. return diag.FromErr(fmt.Errorf("Error setting password: %s", err)) } + // Call get version metadata API to get the current version_custom_metadata + getVersionMetdataOptions := &secretsmanagerv2.GetSecretVersionMetadataOptions{} + getVersionMetdataOptions.SetSecretID(secretId) + getVersionMetdataOptions.SetID("current") + + versionMetadataIntf, response, err := secretsManagerClient.GetSecretVersionMetadataWithContext(context, getVersionMetdataOptions) + if err != nil { + log.Printf("[DEBUG] GetSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("GetSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + + versionMetadata := versionMetadataIntf.(*secretsmanagerv2.UsernamePasswordSecretVersionMetadata) + if versionMetadata.VersionCustomMetadata != nil { + if err = d.Set("version_custom_metadata", versionMetadata.VersionCustomMetadata); err != nil { + return diag.FromErr(fmt.Errorf("Error setting version_custom_metadata: %s", err)) + } + } + return nil } @@ -434,6 +450,51 @@ func resourceIbmSmUsernamePasswordSecretUpdate(context context.Context, d *schem } } + // Apply change in payload (if changed) + if d.HasChange("password") { + versionModel := &secretsmanagerv2.UsernamePasswordSecretVersionPrototype{} + versionModel.Password = core.StringPtr(d.Get("password").(string)) + if _, ok := d.GetOk("version_custom_metadata"); ok { + versionModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) + } + if _, ok := d.GetOk("custom_metadata"); ok { + versionModel.CustomMetadata = d.Get("custom_metadata").(map[string]interface{}) + } + + createSecretVersionOptions := &secretsmanagerv2.CreateSecretVersionOptions{} + createSecretVersionOptions.SetSecretID(secretId) + createSecretVersionOptions.SetSecretVersionPrototype(versionModel) + _, response, err := secretsManagerClient.CreateSecretVersionWithContext(context, createSecretVersionOptions) + if err != nil { + if hasChange { + // Before returning an error, call the read function to update the Terraform state with the change + // that was already applied to the metadata + resourceIbmSmUsernamePasswordSecretRead(context, d, meta) + } + log.Printf("[DEBUG] CreateSecretVersionWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("CreateSecretVersionWithContext failed %s\n%s", err, response)) + } + } else if d.HasChange("version_custom_metadata") { + // Apply change to version_custom_metadata in current version + secretVersionMetadataPatchModel := new(secretsmanagerv2.SecretVersionMetadataPatch) + secretVersionMetadataPatchModel.VersionCustomMetadata = d.Get("version_custom_metadata").(map[string]interface{}) + secretVersionMetadataPatchModelAsPatch, _ := secretVersionMetadataAsPatchFunction(secretVersionMetadataPatchModel) + + updateSecretVersionOptions := &secretsmanagerv2.UpdateSecretVersionMetadataOptions{} + updateSecretVersionOptions.SetSecretID(secretId) + updateSecretVersionOptions.SetID("current") + updateSecretVersionOptions.SetSecretVersionMetadataPatch(secretVersionMetadataPatchModelAsPatch) + _, response, err := secretsManagerClient.UpdateSecretVersionMetadataWithContext(context, updateSecretVersionOptions) + if err != nil { + if hasChange { + // Call the read function to update the Terraform state with the change already applied to the metadata + resourceIbmSmUsernamePasswordSecretRead(context, d, meta) + } + log.Printf("[DEBUG] UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response) + return diag.FromErr(fmt.Errorf("UpdateSecretVersionMetadataWithContext failed %s\n%s", err, response)) + } + } + return resourceIbmSmUsernamePasswordSecretRead(context, d, meta) } diff --git a/ibm/service/secretsmanager/utils.go b/ibm/service/secretsmanager/utils.go index 07a23380be..da8e661425 100644 --- a/ibm/service/secretsmanager/utils.go +++ b/ibm/service/secretsmanager/utils.go @@ -2,6 +2,7 @@ package secretsmanager import ( "context" + "encoding/json" "fmt" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM/go-sdk-core/v5/core" @@ -182,3 +183,13 @@ func getSecretByIdOrByName(context context.Context, d *schema.ResourceData, meta return nil, "", "", diag.FromErr(fmt.Errorf("Missing required arguments. Please make sure that either \"secret_id\" or \"name\" and \"secret_group_name\" are provided\n")) } + +func secretVersionMetadataAsPatchFunction(secretVersionMetadataPatch *secretsmanagerv2.SecretVersionMetadataPatch) (_patch map[string]interface{}, err error) { + jsonData, err := json.Marshal(struct { + VersionCustomMetadata map[string]interface{} `json:"version_custom_metadata"` + }{VersionCustomMetadata: secretVersionMetadataPatch.VersionCustomMetadata}) + if err == nil { + err = json.Unmarshal(jsonData, &_patch) + } + return +} diff --git a/ibm/service/transitgateway/data_source_ibm_tg_gateway.go b/ibm/service/transitgateway/data_source_ibm_tg_gateway.go index 132aae79d6..acedf31ef5 100644 --- a/ibm/service/transitgateway/data_source_ibm_tg_gateway.go +++ b/ibm/service/transitgateway/data_source_ibm_tg_gateway.go @@ -7,9 +7,11 @@ import ( "fmt" "log" - "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" "github.com/IBM/networking-go-sdk/transitgatewayapisv1" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" + "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" ) const ( @@ -195,94 +197,103 @@ func dataSourceIBMTransitGatewayConnectionsRead(d *schema.ResourceData, meta int if err != nil { return err } + startSub := "" listTransitGatewayConnectionsOptions := &transitgatewayapisv1.ListTransitGatewayConnectionsOptions{} tgGatewayId := d.Id() log.Println("tgGatewayId: ", tgGatewayId) listTransitGatewayConnectionsOptions.SetTransitGatewayID(tgGatewayId) - listTGConnections, response, err := client.ListTransitGatewayConnections(listTransitGatewayConnectionsOptions) - if err != nil { - return fmt.Errorf("[ERROR] Error while listing transit gateway connections %s\n%s", err, response) - } connections := make([]map[string]interface{}, 0) + for { - for _, instance := range listTGConnections.Connections { - tgConn := map[string]interface{}{} - - if instance.ID != nil { - tgConn[ID] = *instance.ID + if startSub != "" { + listTransitGatewayConnectionsOptions.Start = &startSub } - if instance.Name != nil { - tgConn[tgConnName] = *instance.Name - } - if instance.NetworkType != nil { - tgConn[tgNetworkType] = *instance.NetworkType + listTGConnections, response, err := client.ListTransitGatewayConnections(listTransitGatewayConnectionsOptions) + if err != nil { + return fmt.Errorf("[ERROR] Error while listing transit gateway connections %s\n%s", err, response) } + for _, instance := range listTGConnections.Connections { + tgConn := map[string]interface{}{} - if instance.NetworkID != nil { - tgConn[tgNetworkId] = *instance.NetworkID - } - if instance.NetworkAccountID != nil { - tgConn[tgNetworkAccountID] = *instance.NetworkAccountID - } + if instance.ID != nil { + tgConn[ID] = *instance.ID + } + if instance.Name != nil { + tgConn[tgConnName] = *instance.Name + } + if instance.NetworkType != nil { + tgConn[tgNetworkType] = *instance.NetworkType + } - if instance.BaseConnectionID != nil { - tgConn[tgBaseConnectionId] = *instance.BaseConnectionID - } + if instance.NetworkID != nil { + tgConn[tgNetworkId] = *instance.NetworkID + } + if instance.NetworkAccountID != nil { + tgConn[tgNetworkAccountID] = *instance.NetworkAccountID + } - if instance.BaseNetworkType != nil { - tgConn[tgBaseNetworkType] = *instance.BaseNetworkType - } + if instance.BaseConnectionID != nil { + tgConn[tgBaseConnectionId] = *instance.BaseConnectionID + } - if instance.LocalBgpAsn != nil { - tgConn[tgLocalBgpAsn] = *instance.LocalBgpAsn - } + if instance.BaseNetworkType != nil { + tgConn[tgBaseNetworkType] = *instance.BaseNetworkType + } - if instance.LocalGatewayIp != nil { - tgConn[tgLocalGatewayIp] = *instance.LocalGatewayIp - } + if instance.LocalBgpAsn != nil { + tgConn[tgLocalBgpAsn] = *instance.LocalBgpAsn + } - if instance.LocalTunnelIp != nil { - tgConn[tgLocalTunnelIp] = *instance.LocalTunnelIp - } + if instance.LocalGatewayIp != nil { + tgConn[tgLocalGatewayIp] = *instance.LocalGatewayIp + } - if instance.RemoteBgpAsn != nil { - tgConn[tgRemoteBgpAsn] = *instance.RemoteBgpAsn - } + if instance.LocalTunnelIp != nil { + tgConn[tgLocalTunnelIp] = *instance.LocalTunnelIp + } - if instance.RemoteGatewayIp != nil { - tgConn[tgRemoteGatewayIp] = *instance.RemoteGatewayIp - } + if instance.RemoteBgpAsn != nil { + tgConn[tgRemoteBgpAsn] = *instance.RemoteBgpAsn + } - if instance.RemoteTunnelIp != nil { - tgConn[tgRemoteTunnelIp] = *instance.RemoteTunnelIp - } + if instance.RemoteGatewayIp != nil { + tgConn[tgRemoteGatewayIp] = *instance.RemoteGatewayIp + } - if instance.Zone != nil { - tgConn[tgZone] = *instance.Zone.Name - } + if instance.RemoteTunnelIp != nil { + tgConn[tgRemoteTunnelIp] = *instance.RemoteTunnelIp + } - if instance.Mtu != nil { - tgConn[tgMtu] = *instance.Mtu - } + if instance.Zone != nil { + tgConn[tgZone] = *instance.Zone.Name + } - if instance.CreatedAt != nil { - tgConn[tgConectionCreatedAt] = instance.CreatedAt.String() + if instance.Mtu != nil { + tgConn[tgMtu] = *instance.Mtu + } - } - if instance.UpdatedAt != nil { - tgConn[tgUpdatedAt] = instance.UpdatedAt.String() + if instance.CreatedAt != nil { + tgConn[tgConectionCreatedAt] = instance.CreatedAt.String() - } - if instance.Status != nil { - tgConn[tgConnectionStatus] = *instance.Status - } + } + if instance.UpdatedAt != nil { + tgConn[tgUpdatedAt] = instance.UpdatedAt.String() - connections = append(connections, tgConn) + } + if instance.Status != nil { + tgConn[tgConnectionStatus] = *instance.Status + } + + connections = append(connections, tgConn) + } + startSub = flex.GetNext(listTGConnections.Next) + if startSub == "" { + break + } } d.Set(tgConnections, connections) - return nil } diff --git a/ibm/service/vpc/data_source_ibm_is_bare_metal_server.go b/ibm/service/vpc/data_source_ibm_is_bare_metal_server.go index 52ebee951c..949e730849 100644 --- a/ibm/service/vpc/data_source_ibm_is_bare_metal_server.go +++ b/ibm/service/vpc/data_source_ibm_is_bare_metal_server.go @@ -470,9 +470,11 @@ func dataSourceIBMISBareMetalServerRead(context context.Context, d *schema.Resou d.SetId(*bms.ID) d.Set(isBareMetalServerBandwidth, bms.Bandwidth) - bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) - bmsBootTarget := bmsBootTargetIntf.ID - d.Set(isBareMetalServerBootTarget, bmsBootTarget) + if bms.BootTarget != nil { + bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) + bmsBootTarget := bmsBootTargetIntf.ID + d.Set(isBareMetalServerBootTarget, bmsBootTarget) + } // set keys and image using initialization diff --git a/ibm/service/vpc/data_source_ibm_is_bare_metal_servers.go b/ibm/service/vpc/data_source_ibm_is_bare_metal_servers.go index d3ad3b6409..aa4c12f339 100644 --- a/ibm/service/vpc/data_source_ibm_is_bare_metal_servers.go +++ b/ibm/service/vpc/data_source_ibm_is_bare_metal_servers.go @@ -521,9 +521,11 @@ func dataSourceIBMISBareMetalServersRead(context context.Context, d *schema.Reso } l["id"] = *bms.ID l[isBareMetalServerBandwidth] = *bms.Bandwidth - bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) - bmsBootTarget := bmsBootTargetIntf.ID - l[isBareMetalServerBootTarget] = bmsBootTarget + if bms.BootTarget != nil { + bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) + bmsBootTarget := bmsBootTargetIntf.ID + l[isBareMetalServerBootTarget] = bmsBootTarget + } cpuList := make([]map[string]interface{}, 0) if bms.Cpu != nil { currentCPU := map[string]interface{}{} diff --git a/ibm/service/vpc/data_source_ibm_is_image.go b/ibm/service/vpc/data_source_ibm_is_image.go index 760014aff7..61248d9c81 100644 --- a/ibm/service/vpc/data_source_ibm_is_image.go +++ b/ibm/service/vpc/data_source_ibm_is_image.go @@ -48,13 +48,107 @@ func DataSourceIBMISImage() *schema.Resource { ValidateFunc: validate.ValidateAllowedStringValues([]string{"public", "private"}), Description: "Whether the image is publicly visible or private to the account", }, - + "resource_group": { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this IPsec policy.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, "status": { Type: schema.TypeString, Computed: true, Description: "The status of this image", }, - + "status_reasons": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The reasons for the current status (if any).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the status reason.", + }, + "message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the status reason.", + }, + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about this status reason.", + }, + }, + }, + }, + "operating_system": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + "dedicated_host_only": { + Type: schema.TypeBool, + Computed: true, + Description: "Images with this operating system can only be used on dedicated hosts or dedicated host groups", + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: "A unique, display-friendly name for the operating system", + }, + "family": { + Type: schema.TypeString, + Computed: true, + Description: "The software family for this operating system", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this operating system", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this operating system", + }, + "vendor": { + Type: schema.TypeString, + Computed: true, + Description: "The vendor of the operating system", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "The major release version of this operating system", + }, + }, + }, + }, "os": { Type: schema.TypeString, Computed: true, @@ -207,6 +301,9 @@ func imageGetByName(d *schema.ResourceData, meta interface{}, name, visibility s if *image.Status == "deprecated" { fmt.Printf("[WARN] Given image %s is deprecated and soon will be obsolete.", name) } + if len(image.StatusReasons) > 0 { + d.Set("status_reasons", dataSourceIBMIsImageFlattenStatusReasons(image.StatusReasons)) + } d.Set("name", *image.Name) accesstags, err := flex.GetGlobalTagsUsingCRN(meta, *image.CRN, "", isImageAccessTagType) if err != nil { @@ -215,6 +312,19 @@ func imageGetByName(d *schema.ResourceData, meta interface{}, name, visibility s } d.Set(isImageAccessTags, accesstags) d.Set("visibility", *image.Visibility) + + if image.OperatingSystem != nil { + operatingSystemList := []map[string]interface{}{} + operatingSystemMap := dataSourceIBMISImageOperatingSystemToMap(*image.OperatingSystem) + operatingSystemList = append(operatingSystemList, operatingSystemMap) + d.Set("operating_system", operatingSystemList) + } + if image.ResourceGroup != nil { + resourceGroupList := []map[string]interface{}{} + resourceGroupMap := dataSourceImageResourceGroupToMap(*image.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + d.Set("resource_group", resourceGroupList) + } d.Set("os", *image.OperatingSystem.Name) d.Set("architecture", *image.OperatingSystem.Architecture) d.Set("crn", *image.CRN) @@ -271,8 +381,23 @@ func imageGetById(d *schema.ResourceData, meta interface{}, identifier string) e if *image.Status == "deprecated" { fmt.Printf("[WARN] Given image %s is deprecated and soon will be obsolete.", name) } + if len(image.StatusReasons) > 0 { + d.Set("status_reasons", dataSourceIBMIsImageFlattenStatusReasons(image.StatusReasons)) + } d.Set("name", *image.Name) d.Set("visibility", *image.Visibility) + if image.OperatingSystem != nil { + operatingSystemList := []map[string]interface{}{} + operatingSystemMap := dataSourceIBMISImageOperatingSystemToMap(*image.OperatingSystem) + operatingSystemList = append(operatingSystemList, operatingSystemMap) + d.Set("operating_system", operatingSystemList) + } + if image.ResourceGroup != nil { + resourceGroupList := []map[string]interface{}{} + resourceGroupMap := dataSourceImageResourceGroupToMap(*image.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + d.Set("resource_group", resourceGroupList) + } d.Set("os", *image.OperatingSystem.Name) d.Set("architecture", *image.OperatingSystem.Architecture) d.Set("crn", *image.CRN) @@ -297,6 +422,36 @@ func imageGetById(d *schema.ResourceData, meta interface{}, identifier string) e return nil } +func dataSourceIBMISImageOperatingSystemToMap(operatingSystemItem vpcv1.OperatingSystem) (operatingSystemMap map[string]interface{}) { + operatingSystemMap = map[string]interface{}{} + + if operatingSystemItem.Architecture != nil { + operatingSystemMap["architecture"] = operatingSystemItem.Architecture + } + if operatingSystemItem.DedicatedHostOnly != nil { + operatingSystemMap["dedicated_host_only"] = operatingSystemItem.DedicatedHostOnly + } + if operatingSystemItem.DisplayName != nil { + operatingSystemMap["display_name"] = operatingSystemItem.DisplayName + } + if operatingSystemItem.Family != nil { + operatingSystemMap["family"] = operatingSystemItem.Family + } + if operatingSystemItem.Href != nil { + operatingSystemMap["href"] = operatingSystemItem.Href + } + if operatingSystemItem.Name != nil { + operatingSystemMap["name"] = operatingSystemItem.Name + } + if operatingSystemItem.Vendor != nil { + operatingSystemMap["vendor"] = operatingSystemItem.Vendor + } + if operatingSystemItem.Version != nil { + operatingSystemMap["version"] = operatingSystemItem.Version + } + return operatingSystemMap +} + func dataSourceImageCollectionCatalogOfferingToMap(imageCatalogOfferingItem vpcv1.ImageCatalogOffering) (imageCatalogOfferingMap map[string]interface{}) { imageCatalogOfferingMap = map[string]interface{}{} if imageCatalogOfferingItem.Managed != nil { @@ -320,3 +475,40 @@ func dataSourceImageCollectionCatalogOfferingToMap(imageCatalogOfferingItem vpcv return imageCatalogOfferingMap } + +func dataSourceIBMIsImageFlattenStatusReasons(result []vpcv1.ImageStatusReason) (statusReasons []map[string]interface{}) { + for _, statusReasonsItem := range result { + statusReasons = append(statusReasons, dataSourceIBMIsImageStatusReasonToMap(&statusReasonsItem)) + } + + return statusReasons +} + +func dataSourceIBMIsImageStatusReasonToMap(model *vpcv1.ImageStatusReason) map[string]interface{} { + modelMap := make(map[string]interface{}) + if model.Code != nil { + modelMap["code"] = *model.Code + } + if model.Message != nil { + modelMap["message"] = *model.Message + } + if model.MoreInfo != nil { + modelMap["more_info"] = *model.MoreInfo + } + return modelMap +} +func dataSourceImageResourceGroupToMap(resourceGroupItem vpcv1.ResourceGroupReference) (resourceGroupMap map[string]interface{}) { + resourceGroupMap = map[string]interface{}{} + + if resourceGroupItem.Href != nil { + resourceGroupMap["href"] = resourceGroupItem.Href + } + if resourceGroupItem.ID != nil { + resourceGroupMap["id"] = resourceGroupItem.ID + } + if resourceGroupItem.Name != nil { + resourceGroupMap["name"] = resourceGroupItem.Name + } + + return resourceGroupMap +} diff --git a/ibm/service/vpc/data_source_ibm_is_image_test.go b/ibm/service/vpc/data_source_ibm_is_image_test.go index 9c7c126b5e..425a160dc5 100644 --- a/ibm/service/vpc/data_source_ibm_is_image_test.go +++ b/ibm/service/vpc/data_source_ibm_is_image_test.go @@ -34,6 +34,33 @@ func TestAccIBMISImageDataSource_basic(t *testing.T) { }, }) } +func TestAccIBMISImageDataSource_All(t *testing.T) { + resName := "data.ibm_is_image.test1" + imageName := fmt.Sprintf("tfimage-name-%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISImageDataSourceAllConfig(imageName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resName, "operating_system.0.name"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.dedicated_host_only"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.display_name"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.family"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.href"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.vendor"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.version"), + resource.TestCheckResourceAttrSet(resName, "operating_system.0.architecture"), + resource.TestCheckResourceAttrSet(resName, "status"), + resource.TestCheckResourceAttrSet(resName, "resource_group.0.id"), + resource.TestCheckResourceAttrSet(resName, "resource_group.0.name"), + ), + }, + }, + }) +} func TestAccIBMISImageDataSource_ilc(t *testing.T) { resName := "data.ibm_is_image.test1" imageName := fmt.Sprintf("tfimage-name-%d", acctest.RandIntRange(10, 100)) @@ -144,6 +171,16 @@ func testAccCheckIBMISImageDataSourceConfig(imageName string) string { }`, acc.Image_cos_url, imageName, acc.Image_operating_system) } +func testAccCheckIBMISImageDataSourceAllConfig(imageName string) string { + return fmt.Sprintf(` + data "ibm_is_images" "test1" { + status = "available" + } + data "ibm_is_image" "test1" { + name = data.ibm_is_images.test1.images.0.name + }`) +} + func testAccCheckIBMISImageDataSourceConfigIlc(imageName string) string { return fmt.Sprintf(` resource "ibm_is_image" "isExampleImage" { diff --git a/ibm/service/vpc/data_source_ibm_is_images.go b/ibm/service/vpc/data_source_ibm_is_images.go index e1d22caf44..bd9c059110 100644 --- a/ibm/service/vpc/data_source_ibm_is_images.go +++ b/ibm/service/vpc/data_source_ibm_is_images.go @@ -74,11 +74,83 @@ func DataSourceIBMISImages() *schema.Resource { Computed: true, Description: "The status of this image", }, + "status_reasons": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The reasons for the current status (if any).", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "A snake case string succinctly identifying the status reason.", + }, + "message": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "An explanation of the status reason.", + }, + "more_info": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "Link to documentation about this status reason.", + }, + }, + }, + }, "visibility": { Type: schema.TypeString, Computed: true, Description: "Whether the image is publicly visible or private to the account", }, + "operating_system": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "architecture": { + Type: schema.TypeString, + Computed: true, + Description: "The operating system architecture", + }, + "dedicated_host_only": { + Type: schema.TypeBool, + Computed: true, + Description: "Images with this operating system can only be used on dedicated hosts or dedicated host groups", + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: "A unique, display-friendly name for the operating system", + }, + "family": { + Type: schema.TypeString, + Computed: true, + Description: "The software family for this operating system", + }, + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this operating system", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The globally unique name for this operating system", + }, + "vendor": { + Type: schema.TypeString, + Computed: true, + Description: "The vendor of the operating system", + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: "The major release version of this operating system", + }, + }, + }, + }, "os": { Type: schema.TypeString, Computed: true, @@ -89,6 +161,30 @@ func DataSourceIBMISImages() *schema.Resource { Computed: true, Description: "The operating system architecture", }, + "resource_group": { + Type: schema.TypeList, + Computed: true, + Description: "The resource group for this IPsec policy.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "href": { + Type: schema.TypeString, + Computed: true, + Description: "The URL for this resource group.", + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for this resource group.", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The user-defined name for this resource group.", + }, + }, + }, + }, "crn": { Type: schema.TypeString, Computed: true, @@ -283,6 +379,21 @@ func imageList(d *schema.ResourceData, meta interface{}) error { "os": *image.OperatingSystem.Name, "architecture": *image.OperatingSystem.Architecture, } + if len(image.StatusReasons) > 0 { + l["status_reasons"] = dataSourceIBMIsImageFlattenStatusReasons(image.StatusReasons) + } + if image.ResourceGroup != nil { + resourceGroupList := []map[string]interface{}{} + resourceGroupMap := dataSourceImageResourceGroupToMap(*image.ResourceGroup) + resourceGroupList = append(resourceGroupList, resourceGroupMap) + l["resource_group"] = resourceGroupList + } + if image.OperatingSystem != nil { + operatingSystemList := []map[string]interface{}{} + operatingSystemMap := dataSourceIBMISImageOperatingSystemToMap(*image.OperatingSystem) + operatingSystemList = append(operatingSystemList, operatingSystemMap) + l["operating_system"] = operatingSystemList + } if image.File != nil && image.File.Checksums != nil { l[isImageCheckSum] = *image.File.Checksums.Sha256 } diff --git a/ibm/service/vpc/data_source_ibm_is_images_test.go b/ibm/service/vpc/data_source_ibm_is_images_test.go index 2bbbbda009..08c30998de 100644 --- a/ibm/service/vpc/data_source_ibm_is_images_test.go +++ b/ibm/service/vpc/data_source_ibm_is_images_test.go @@ -9,6 +9,7 @@ import ( acc "github.com/IBM-Cloud/terraform-provider-ibm/ibm/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -30,6 +31,33 @@ func TestAccIBMISImagesDataSource_basic(t *testing.T) { }, }) } +func TestAccIBMISImagesDataSource_All(t *testing.T) { + resName := "data.ibm_is_images.test1" + imageName := fmt.Sprintf("tfimage-name-%d", acctest.RandIntRange(10, 100)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISImagesDataSourceAllConfig(imageName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.name"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.dedicated_host_only"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.display_name"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.family"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.href"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.vendor"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.version"), + resource.TestCheckResourceAttrSet(resName, "images.0.operating_system.0.architecture"), + resource.TestCheckResourceAttrSet(resName, "images.0.status"), + resource.TestCheckResourceAttrSet(resName, "images.0.resource_group.0.id"), + resource.TestCheckResourceAttrSet(resName, "images.0.resource_group.0.name"), + ), + }, + }, + }) +} func TestAccIBMISImagesDataSource_catalog(t *testing.T) { resName := "data.ibm_is_images.test1" @@ -92,6 +120,12 @@ func testAccCheckIBMISImagesDataSourceConfig() string { data "ibm_is_images" "test1" { }`) } +func testAccCheckIBMISImagesDataSourceAllConfig(imageName string) string { + return fmt.Sprintf(` + data "ibm_is_images" "test1" { + status = "available" + }`) +} func testAccCheckIBMISCatalogImagesDataSourceConfig() string { // status filter defaults to empty return fmt.Sprintf(` diff --git a/ibm/service/vpc/data_source_ibm_is_share.go b/ibm/service/vpc/data_source_ibm_is_share.go index b1671d68f6..e5bd0c5559 100644 --- a/ibm/service/vpc/data_source_ibm_is_share.go +++ b/ibm/service/vpc/data_source_ibm_is_share.go @@ -64,6 +64,30 @@ func DataSourceIbmIsShare() *schema.Resource { Computed: true, Description: "The maximum input/output operation performance bandwidth per second for the file share.", }, + "latest_sync": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the latest synchronization for this file share.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completed_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The completed date and time of last synchronization between the replica share and its source.", + }, + "data_transferred": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The data transferred (in bytes) in the last synchronization between the replica and its source.", + }, + "started_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The start date and time of last synchronization between the replica share and its source.", + }, + }, + }, + }, "latest_job": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -410,7 +434,17 @@ func dataSourceIbmIsShareRead(context context.Context, d *schema.ResourceData, m if err = d.Set("iops", share.Iops); err != nil { return diag.FromErr(fmt.Errorf("Error setting iops: %s", err)) } - + latest_syncs := []map[string]interface{}{} + if share.LatestSync != nil { + latest_sync := make(map[string]interface{}) + latest_sync["completed_at"] = flex.DateTimeToString(share.LatestSync.CompletedAt) + if share.LatestSync.DataTransferred != nil { + latest_sync["data_transferred"] = *share.LatestSync.DataTransferred + } + latest_sync["started_at"] = flex.DateTimeToString(share.LatestSync.CompletedAt) + latest_syncs = append(latest_syncs, latest_sync) + } + d.Set("latest_sync", latest_syncs) if share.LatestJob != nil { err = d.Set("latest_job", dataSourceShareFlattenLatestJob(*share.LatestJob)) if err != nil { diff --git a/ibm/service/vpc/data_source_ibm_is_shares.go b/ibm/service/vpc/data_source_ibm_is_shares.go index dbeffabe24..f4829e0671 100644 --- a/ibm/service/vpc/data_source_ibm_is_shares.go +++ b/ibm/service/vpc/data_source_ibm_is_shares.go @@ -78,6 +78,30 @@ func DataSourceIbmIsShares() *schema.Resource { Computed: true, Description: "The maximum input/output operation performance bandwidth per second for the file share.", }, + "latest_sync": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the latest synchronization for this file share.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completed_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The completed date and time of last synchronization between the replica share and its source.", + }, + "data_transferred": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The data transferred (in bytes) in the last synchronization between the replica and its source.", + }, + "started_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The start date and time of last synchronization between the replica share and its source.", + }, + }, + }, + }, "latest_job": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -455,6 +479,17 @@ func dataSourceShareCollectionSharesToMap(meta interface{}, sharesItem vpcv1.Sha if sharesItem.Iops != nil { sharesMap["iops"] = sharesItem.Iops } + latest_syncs := []map[string]interface{}{} + if sharesItem.LatestSync != nil { + latest_sync := make(map[string]interface{}) + latest_sync["completed_at"] = flex.DateTimeToString(sharesItem.LatestSync.CompletedAt) + if sharesItem.LatestSync.DataTransferred != nil { + latest_sync["data_transferred"] = *sharesItem.LatestSync.DataTransferred + } + latest_sync["started_at"] = flex.DateTimeToString(sharesItem.LatestSync.CompletedAt) + latest_syncs = append(latest_syncs, latest_sync) + } + sharesMap["latest_sync"] = latest_syncs if sharesItem.LifecycleState != nil { sharesMap["lifecycle_state"] = sharesItem.LifecycleState } diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go index c3023bba21..7293a719fd 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table.go @@ -60,6 +60,14 @@ func DataSourceIBMIBMIsVPCRoutingTable() *schema.Resource { Description: "The routing table identifier.", }, + "advertise_routes_to": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, rtCreateAt: &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -297,6 +305,9 @@ func dataSourceIBMIBMIsVPCRoutingTableRead(context context.Context, d *schema.Re return diag.FromErr(fmt.Errorf("[ERROR] Error setting route_vpc_zone_ingress: %s", err)) } + if err = d.Set("advertise_routes_to", routingTable.AdvertiseRoutesTo); err != nil { + return diag.FromErr(fmt.Errorf("[ERROR] Error setting value of advertise_routes_to: %s", err)) + } routes := []map[string]interface{}{} if routingTable.Routes != nil { for _, modelItem := range routingTable.Routes { diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_route.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_route.go index 2798f27aa8..da18a18571 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_route.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_route.go @@ -57,6 +57,11 @@ func DataSourceIBMIBMIsVPCRoutingTableRoute() *schema.Resource { Computed: true, Description: "The action to perform with a packet matching the route:- `delegate`: delegate to the system's built-in routes- `delegate_vpc`: delegate to the system's built-in routes, ignoring Internet-bound routes- `deliver`: deliver the packet to the specified `next_hop`- `drop`: drop the packet.", }, + "advertise": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table property.", + }, rtCreateAt: &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -271,6 +276,10 @@ func dataSourceIBMIBMIsVPCRoutingTableRouteRead(context context.Context, d *sche return diag.FromErr(fmt.Errorf("[ERROR] Error setting action: %s", err)) } + if err = d.Set("advertise", route.Advertise); err != nil { + return diag.FromErr(fmt.Errorf("[ERROR] Error setting advertise: %s", err)) + } + if err = d.Set(rtCreateAt, flex.DateTimeToString(route.CreatedAt)); err != nil { return diag.FromErr(fmt.Errorf("[ERROR] Error setting created_at: %s", err)) } diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_routes.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_routes.go index a1a5ac299a..6e723e5db6 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_routes.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_table_routes.go @@ -126,6 +126,11 @@ func DataSourceIBMISVPCRoutingTableRoutes() *schema.Resource { Computed: true, Description: "Routing Table Route Action", }, + "advertise": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Description: "Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table property.", + }, isRoutingTableRouteDestination: { Type: schema.TypeString, Computed: true, @@ -216,6 +221,12 @@ func dataSourceIBMISVPCRoutingTableRoutesList(d *schema.ResourceData, meta inter if instance.LifecycleState != nil { route[isRoutingTableRouteLifecycleState] = *instance.LifecycleState } + if instance.Action != nil { + route[isRoutingTableRouteAction] = *instance.Action + } + if instance.Advertise != nil { + route["advertise"] = *instance.Advertise + } if instance.Destination != nil { route[isRoutingTableRouteDestination] = *instance.Destination } diff --git a/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go b/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go index b157c50f04..ffec0e8c74 100644 --- a/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go +++ b/ibm/service/vpc/data_source_ibm_is_vpc_routing_tables.go @@ -72,6 +72,14 @@ func DataSourceIBMISVPCRoutingTables() *schema.Resource { Computed: true, Description: "Routing Table ID", }, + "advertise_routes_to": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources.The enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the resource on which the unexpected property value was encountered.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, isRoutingTableHref: { Type: schema.TypeString, Computed: true, @@ -241,6 +249,9 @@ func dataSourceIBMISVPCRoutingTablesList(d *schema.ResourceData, meta interface{ if routingTable.RouteVPCZoneIngress != nil { rtable[isRoutingTableVPCZoneIngress] = *routingTable.RouteVPCZoneIngress } + if routingTable.AdvertiseRoutesTo != nil { + rtable["advertise_routes_to"] = routingTable.AdvertiseRoutesTo + } if routingTable.IsDefault != nil { rtable[isRoutingTableDefault] = *routingTable.IsDefault } diff --git a/ibm/service/vpc/resource_ibm_is_bare_metal_server.go b/ibm/service/vpc/resource_ibm_is_bare_metal_server.go index 94c1ede67f..0d16c8493b 100644 --- a/ibm/service/vpc/resource_ibm_is_bare_metal_server.go +++ b/ibm/service/vpc/resource_ibm_is_bare_metal_server.go @@ -696,7 +696,8 @@ func resourceIBMISBareMetalServerCreate(context context.Context, d *schema.Resou if err != nil { return diag.FromErr(err) } - options := &vpcv1.CreateBareMetalServerOptions{} + createbmsoptions := &vpcv1.CreateBareMetalServerOptions{} + options := &vpcv1.BareMetalServerPrototype{} var imageStr string if image, ok := d.GetOk(isBareMetalServerImage); ok { imageStr = image.(string) @@ -705,7 +706,7 @@ func resourceIBMISBareMetalServerCreate(context context.Context, d *schema.Resou // enable secure boot if _, ok := d.GetOkExists(isBareMetalServerEnableSecureBoot); ok { - options.SetEnableSecureBoot(d.Get(isBareMetalServerEnableSecureBoot).(bool)) + options.EnableSecureBoot = core.BoolPtr(d.Get(isBareMetalServerEnableSecureBoot).(bool)) } // trusted_platform_module @@ -715,7 +716,7 @@ func resourceIBMISBareMetalServerCreate(context context.Context, d *schema.Resou if err != nil { return diag.FromErr(err) } - options.SetTrustedPlatformModule(trustedPlatformModuleModel) + options.TrustedPlatformModule = trustedPlatformModuleModel } keySet := d.Get(isBareMetalServerKeys).(*schema.Set) @@ -1300,8 +1301,8 @@ func resourceIBMISBareMetalServerCreate(context context.Context, d *schema.Resou ID: &vpc, } } - - bms, response, err := sess.CreateBareMetalServerWithContext(context, options) + createbmsoptions.BareMetalServerPrototype = options + bms, response, err := sess.CreateBareMetalServerWithContext(context, createbmsoptions) if err != nil { return diag.FromErr(fmt.Errorf("[DEBUG] Create bare metal server err %s\n%s", err, response)) } @@ -1360,9 +1361,11 @@ func bareMetalServerGet(context context.Context, d *schema.ResourceData, meta in } d.SetId(*bms.ID) d.Set(isBareMetalServerBandwidth, bms.Bandwidth) - bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) - bmsBootTarget := bmsBootTargetIntf.ID - d.Set(isBareMetalServerBootTarget, bmsBootTarget) + if bms.BootTarget != nil { + bmsBootTargetIntf := bms.BootTarget.(*vpcv1.BareMetalServerBootTarget) + bmsBootTarget := bmsBootTargetIntf.ID + d.Set(isBareMetalServerBootTarget, bmsBootTarget) + } cpuList := make([]map[string]interface{}, 0) if bms.Cpu != nil { currentCPU := map[string]interface{}{} diff --git a/ibm/service/vpc/resource_ibm_is_bare_metal_server_network_interface_allow_float.go b/ibm/service/vpc/resource_ibm_is_bare_metal_server_network_interface_allow_float.go index bb620d77ed..5bc30cd5aa 100644 --- a/ibm/service/vpc/resource_ibm_is_bare_metal_server_network_interface_allow_float.go +++ b/ibm/service/vpc/resource_ibm_is_bare_metal_server_network_interface_allow_float.go @@ -370,10 +370,12 @@ func resourceIBMISBareMetalServerNetworkInterfaceAllowFloatRead(context context. // if response returns an error if err != nil || nicIntf == nil { if response != nil { - return diag.FromErr(fmt.Errorf("[ERROR] Error getting Bare Metal Server (%s) network interface (%s): %s\n%s", bareMetalServerId, nicID, err, response)) + return diag.FromErr(fmt.Errorf("[ERROR] Error getting Bare Metal Server (%s) network interface during read (%s): %s\n%s", bareMetalServerId, nicID, err, response)) } else { - return diag.FromErr(fmt.Errorf("[ERROR] Error getting Bare Metal Server (%s) network interface (%s): %s", bareMetalServerId, nicID, err)) - } + d.SetId("") + return nil + // return diag.FromErr(fmt.Errorf("[ERROR] Error getting Bare Metal Server2 (%s) network interface (%s): %s", bareMetalServerId, nicID, err)) + } // else is returning that the nic is not found anywhere } } err = bareMetalServerNICAllowFloatGet(d, meta, sess, nicIntf, bareMetalServerId) @@ -717,7 +719,7 @@ func bareMetalServerNetworkInterfaceAllowFloatDelete(context context.Context, d if response != nil && response.StatusCode == 404 { return nil } - return fmt.Errorf("[ERROR] Error getting Bare Metal Server (%s) network interface(%s) : %s\n%s", bareMetalServerId, nicId, err, response) + return fmt.Errorf("[ERROR] Error getting Bare Metal Server (%s) network interface(%s) during delete : %s\n%s", bareMetalServerId, nicId, err, response) } nicType := "" switch reflect.TypeOf(nicIntf).String() { diff --git a/ibm/service/vpc/resource_ibm_is_instance.go b/ibm/service/vpc/resource_ibm_is_instance.go index 2debbf2734..da973a044a 100644 --- a/ibm/service/vpc/resource_ibm_is_instance.go +++ b/ibm/service/vpc/resource_ibm_is_instance.go @@ -3545,6 +3545,30 @@ func instanceUpdate(d *schema.ResourceData, meta interface{}) error { } } } + bootVolName := "boot_volume.0.name" + if d.HasChange(bootVolName) && !d.IsNewResource() { + volId := d.Get("boot_volume.0.volume_id").(string) + volName := d.Get(bootVolName).(string) + updateVolumeOptions := &vpcv1.UpdateVolumeOptions{ + ID: &volId, + } + volPatchModel := &vpcv1.VolumePatch{ + Name: &volName, + } + volPatchModelAsPatch, err := volPatchModel.AsPatch() + + if err != nil { + return (fmt.Errorf("[ERROR] Error encountered while apply as patch for boot volume name update of instance %s", err)) + } + + updateVolumeOptions.VolumePatch = volPatchModelAsPatch + + vol, res, err := instanceC.UpdateVolume(updateVolumeOptions) + + if vol == nil || err != nil { + return (fmt.Errorf("[ERROR] Error encountered while updating name of boot volume of instance %s/n%s", err, res)) + } + } bootVolAutoDel := "boot_volume.0.auto_delete_volume" if d.HasChange(bootVolAutoDel) && !d.IsNewResource() { listvolattoptions := &vpcv1.ListInstanceVolumeAttachmentsOptions{ diff --git a/ibm/service/vpc/resource_ibm_is_instance_test.go b/ibm/service/vpc/resource_ibm_is_instance_test.go index 6d0b0055b8..892958a5b4 100644 --- a/ibm/service/vpc/resource_ibm_is_instance_test.go +++ b/ibm/service/vpc/resource_ibm_is_instance_test.go @@ -252,6 +252,54 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVE }, }) } +func TestAccIBMISInstance_RenameBoot(t *testing.T) { + var instance string + vpcname := fmt.Sprintf("tf-vpc-%d", acctest.RandIntRange(10, 100)) + name := fmt.Sprintf("tf-instnace-%d", acctest.RandIntRange(10, 100)) + subnetname := fmt.Sprintf("tf-subnet-%d", acctest.RandIntRange(10, 100)) + publicKey := strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) + sshname := fmt.Sprintf("tf-ssh-%d", acctest.RandIntRange(10, 100)) + userData1 := "a" + rename1 := fmt.Sprintf("tf-bootvol-%d", acctest.RandIntRange(10, 100)) + rename2 := fmt.Sprintf("tf-bootvol-update-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISInstanceRenameConfig(vpcname, subnetname, sshname, publicKey, name, userData1, rename1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceExists("ibm_is_instance.testacc_instance", instance), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "user_data", userData1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.name", rename1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "zone", acc.ISZoneName), + ), + }, + { + Config: testAccCheckIBMISInstanceRenameConfig(vpcname, subnetname, sshname, publicKey, name, userData1, rename2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISInstanceExists("ibm_is_instance.testacc_instance", instance), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "name", name), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "user_data", userData1), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "boot_volume.0.name", rename2), + resource.TestCheckResourceAttr( + "ibm_is_instance.testacc_instance", "zone", acc.ISZoneName), + ), + }, + }, + }) +} func TestAccIBMISInstance_bootVolumeUserTags(t *testing.T) { var instance string @@ -1219,6 +1267,41 @@ func testAccCheckIBMISInstanceResizeConfig(vpcname, subnetname, sshname, publicK keys = [ibm_is_ssh_key.testacc_sshkey.id] }`, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, resize, userData, acc.ISZoneName) } +func testAccCheckIBMISInstanceRenameConfig(vpcname, subnetname, sshname, publicKey, name, userData, rename string) string { + return fmt.Sprintf(` + resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" + } + + resource "ibm_is_subnet" "testacc_subnet" { + name = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + ipv4_cidr_block = "%s" + } + + resource "ibm_is_ssh_key" "testacc_sshkey" { + name = "%s" + public_key = "%s" + } + + resource "ibm_is_instance" "testacc_instance" { + name = "%s" + image = "%s" + profile = "%s" + boot_volume { + name = "%s" + } + primary_network_interface { + subnet = ibm_is_subnet.testacc_subnet.id + } + user_data = "%s" + vpc = ibm_is_vpc.testacc_vpc.id + zone = "%s" + keys = [ibm_is_ssh_key.testacc_sshkey.id] + } + `, vpcname, subnetname, acc.ISZoneName, acc.ISCIDR, sshname, publicKey, name, acc.IsImage, acc.InstanceProfileName, rename, userData, acc.ISZoneName) +} func testAccCheckIBMISInstanceBandwidthConfig(vpcname, subnetname, sshname, publicKey, name string, bandwidth int) string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/resource_ibm_is_share.go b/ibm/service/vpc/resource_ibm_is_share.go index 487479fd42..39fcaa0d72 100644 --- a/ibm/service/vpc/resource_ibm_is_share.go +++ b/ibm/service/vpc/resource_ibm_is_share.go @@ -54,12 +54,11 @@ func ResourceIbmIsShare() *schema.Resource { Schema: map[string]*schema.Schema{ "encryption_key": { - Type: schema.TypeString, - Optional: true, - RequiredWith: []string{"size"}, - ForceNew: true, - Computed: true, - Description: "The CRN of the key to use for encrypting this file share.If no encryption key is provided, the share will not be encrypted.", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "The CRN of the key to use for encrypting this file share.If no encryption key is provided, the share will not be encrypted.", }, "initial_owner": { Type: schema.TypeList, @@ -105,8 +104,8 @@ func ResourceIbmIsShare() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ExactlyOneOf: []string{"size", "source_share"}, - ConflictsWith: []string{"replication_cron_spec", "source_share"}, + ExactlyOneOf: []string{"size", "source_share", "source_share_crn"}, + ConflictsWith: []string{"replication_cron_spec", "source_share", "source_share_crn"}, ValidateFunc: validate.InvokeValidator("ibm_is_share", "size"), Description: "The size of the file share rounded up to the next gigabyte.", }, @@ -511,16 +510,25 @@ func ResourceIbmIsShare() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ConflictsWith: []string{"replica_share", "size"}, + Computed: true, + ConflictsWith: []string{"replica_share", "size", "source_share_crn"}, RequiredWith: []string{"replication_cron_spec"}, Description: "The ID of the source file share for this replica file share. The specified file share must not already have a replica, and must not be a replica.", }, + "source_share_crn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"replica_share", "size", "source_share"}, + RequiredWith: []string{"replication_cron_spec"}, + Description: "The CRN of the source file share for this replica file share. The specified file share must not already have a replica, and must not be a replica.", + }, "replication_cron_spec": &schema.Schema{ Type: schema.TypeString, Optional: true, DiffSuppressFunc: suppressCronSpecDiff, Computed: true, - RequiredWith: []string{"source_share"}, ConflictsWith: []string{"replica_share", "size"}, Description: "The cron specification for the file share replication schedule.Replication of a share can be scheduled to occur at most once per hour.", }, @@ -563,6 +571,30 @@ func ResourceIbmIsShare() *schema.Resource { Computed: true, Description: "The date and time that the file share was last synchronized to its replica.This property will be present when the `replication_role` is `source`.", }, + "latest_sync": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Description: "Information about the latest synchronization for this file share.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completed_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The completed date and time of last synchronization between the replica share and its source.", + }, + "data_transferred": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Description: "The data transferred (in bytes) in the last synchronization between the replica and its source.", + }, + "started_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The start date and time of last synchronization between the replica share and its source.", + }, + }, + }, + }, "latest_job": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -727,17 +759,18 @@ func resourceIbmIsShareCreate(context context.Context, d *schema.ResourceData, m accessControlMode := accessControlModeIntf.(string) sharePrototype.AccessControlMode = &accessControlMode } + if encryptionKeyIntf, ok := d.GetOk("encryption_key"); ok { + encryptionKey := encryptionKeyIntf.(string) + encryptionKeyIdentity := &vpcv1.EncryptionKeyIdentity{ + CRN: &encryptionKey, + } + sharePrototype.EncryptionKey = encryptionKeyIdentity + } if sizeIntf, ok := d.GetOk("size"); ok { size := int64(sizeIntf.(int)) sharePrototype.Size = &size - if encryptionKeyIntf, ok := d.GetOk("encryption_key"); ok { - encryptionKey := encryptionKeyIntf.(string) - encryptionKeyIdentity := &vpcv1.EncryptionKeyIdentity{ - CRN: &encryptionKey, - } - sharePrototype.EncryptionKey = encryptionKeyIdentity - } + initial_owner := &vpcv1.ShareInitialOwner{} if initialOwnerIntf, ok := d.GetOk("initial_owner"); ok { initialOwnerMap := initialOwnerIntf.([]interface{})[0].(map[string]interface{}) @@ -823,7 +856,15 @@ func resourceIbmIsShareCreate(context context.Context, d *schema.ResourceData, m sharePrototype.SourceShare = &vpcv1.ShareIdentity{ ID: &sourceShare, } + } else { + sourceShareCRN := d.Get("source_share_crn").(string) + if sourceShareCRN != "" { + sharePrototype.SourceShare = &vpcv1.ShareIdentity{ + CRN: &sourceShareCRN, + } + } } + replicationCronSpec := d.Get("replication_cron_spec").(string) sharePrototype.ReplicationCronSpec = &replicationCronSpec } @@ -1075,9 +1116,17 @@ func resourceIbmIsShareRead(context context.Context, d *schema.ResourceData, met return diag.FromErr(fmt.Errorf("Error setting resource_type: %s", err)) } - // if share.LastSyncAt != nil { - // d.Set("last_sync_at", share.LastSyncAt.String()) - // } + latest_syncs := []map[string]interface{}{} + if share.LatestSync != nil { + latest_sync := make(map[string]interface{}) + latest_sync["completed_at"] = flex.DateTimeToString(share.LatestSync.CompletedAt) + if share.LatestSync.DataTransferred != nil { + latest_sync["data_transferred"] = *share.LatestSync.DataTransferred + } + latest_sync["started_at"] = flex.DateTimeToString(share.LatestSync.CompletedAt) + latest_syncs = append(latest_syncs, latest_sync) + } + d.Set("latest_sync", latest_syncs) latest_jobs := []map[string]interface{}{} if share.LatestJob != nil { latest_job := make(map[string]interface{}) diff --git a/ibm/service/vpc/resource_ibm_is_share_test.go b/ibm/service/vpc/resource_ibm_is_share_test.go index 353e1fe570..b5f4c0d6ae 100644 --- a/ibm/service/vpc/resource_ibm_is_share_test.go +++ b/ibm/service/vpc/resource_ibm_is_share_test.go @@ -38,6 +38,28 @@ func TestAccIbmIsShareBasic(t *testing.T) { }) } +func TestAccIbmIsShareCrossRegionReplication(t *testing.T) { + var conf vpcv1.Share + name := fmt.Sprintf("tf-fs-name-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIbmIsShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIbmIsShareCrossRegionReplicaConfig(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIbmIsShareExists("ibm_is_share.is_share", conf), + resource.TestCheckResourceAttrSet("ibm_is_share.is_share", "source_share_crn"), + resource.TestCheckResourceAttrSet("ibm_is_share.is_share", "encryption_key"), + resource.TestCheckResourceAttr("ibm_is_share.is_share", "name", name), + resource.TestCheckResourceAttr("ibm_is_share.is_share", "encryption", "user_managed"), + ), + }, + }, + }) +} + func TestAccIbmIsShareAllArgs(t *testing.T) { var conf vpcv1.Share @@ -157,7 +179,18 @@ func testAccCheckIbmIsShareConfigBasic(name string) string { } `, name, acc.ShareProfileName) } - +func testAccCheckIbmIsShareCrossRegionReplicaConfig(name string) string { + return fmt.Sprintf(` + resource "ibm_is_share" "is_share" { + zone = "us-south-2" + encryption_key = "%s" + source_share_crn = "%s" + replication_cron_spec = "0 */5 * * *" + name = "%s" + profile = "%s" + } + `, acc.ShareEncryptionKey, acc.SourceShareCRN, name, acc.ShareProfileName) +} func testAccCheckIbmIsShareConfig(vpcName, name string, size int, shareTergetName string) string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/resource_ibm_is_subnet_reserved_ip.go b/ibm/service/vpc/resource_ibm_is_subnet_reserved_ip.go index bfe20059a8..9c8523bc22 100644 --- a/ibm/service/vpc/resource_ibm_is_subnet_reserved_ip.go +++ b/ibm/service/vpc/resource_ibm_is_subnet_reserved_ip.go @@ -303,6 +303,11 @@ func resourceIBMISReservedIPUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("[ERROR] Error updating the reserved IP %s\n%s", err, response) } + + _, err = isWaitForReservedIpAvailable(sess, subnetID, reservedIPID, d.Timeout(schema.TimeoutCreate), d) + if err != nil { + return fmt.Errorf("[ERROR] Error waiting for the reserved IP to be available: %s", err) + } } return resourceIBMISReservedIPRead(d, meta) } diff --git a/ibm/service/vpc/resource_ibm_is_vpc.go b/ibm/service/vpc/resource_ibm_is_vpc.go index 68e75e6fad..da9104f86c 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc.go +++ b/ibm/service/vpc/resource_ibm_is_vpc.go @@ -190,6 +190,18 @@ func ResourceIBMISVPC() *schema.Resource { Computed: true, Description: "The type of the DNS resolver used for the VPC.- `delegated`: DNS server addresses are provided by the DNS resolver of the VPC specified in `dns.resolver.vpc`.- `manual`: DNS server addresses are specified in `dns.resolver.manual_servers`.- `system`: DNS server addresses are provided by the system.", }, + + "dns_binding_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Description: "The VPC dns binding id whose DNS resolver provides the DNS server addresses for this VPC.", + }, + "dns_binding_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "The VPC dns binding name whose DNS resolver provides the DNS server addresses for this VPC.", + }, "vpc_id": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -714,6 +726,51 @@ func vpcCreate(d *schema.ResourceData, meta interface{}, name, apm, rg string, i return err } + if dnsresolvertpeOk, ok := d.GetOk("dns.0.resolver.0.type"); ok { + if dnsresolvertpeOk.(string) == "delegated" && d.Get("dns.0.resolver.0.vpc_id").(string) != "" { + vpcId := d.Get("dns.0.resolver.0.vpc_id").(string) + createDnsBindings := &vpcv1.CreateVPCDnsResolutionBindingOptions{ + VPCID: vpc.ID, + VPC: &vpcv1.VPCIdentity{ + ID: &vpcId, + }, + } + if bindingNameOk, ok := d.GetOk("dns.0.resolver.0.dns_binding_name"); ok { + bindingName := bindingNameOk.(string) + createDnsBindings.Name = &bindingName + } + _, response, err := sess.CreateVPCDnsResolutionBinding(createDnsBindings) + if err != nil { + log.Printf("[DEBUG] CreateVPCDnsResolutionBindingWithContext failed %s\n%s", err, response) + return fmt.Errorf("[ERROR] CreateVPCDnsResolutionBinding failed in vpc resource %s\n%s", err, response) + } + resolverType := "delegated" + dnsPatch := &vpcv1.VpcdnsPatch{ + Resolver: &vpcv1.VpcdnsResolverPatch{ + Type: &resolverType, + VPC: &vpcv1.VpcdnsResolverVPCPatch{ + ID: &vpcId, + }, + }, + } + vpcPatchModel := &vpcv1.VPCPatch{} + vpcPatchModel.Dns = dnsPatch + vpcPatchModelAsPatch, err := vpcPatchModel.AsPatch() + if err != nil { + return fmt.Errorf("[ERROR] CreateVPCDnsResolutionBinding failed in vpcpatch as patch %s", err) + } + updateVpcOptions := &vpcv1.UpdateVPCOptions{ + ID: vpc.ID, + } + updateVpcOptions.VPCPatch = vpcPatchModelAsPatch + _, response, err = sess.UpdateVPC(updateVpcOptions) + if err != nil { + log.Printf("[DEBUG] Update vpc with delegated failed %s\n%s", err, response) + return fmt.Errorf("[ERROR] Update vpc with delegated failed in vpc resource %s\n%s", err, response) + } + } + } + if sgAclRules, ok := d.GetOk(isVPCNoSgAclRules); ok { sgAclRules := sgAclRules.(bool) if sgAclRules { @@ -920,6 +977,38 @@ func vpcGet(d *schema.ResourceData, meta interface{}, id string) error { if err != nil { return err } + resolverMapArray := dnsMap["resolver"].([]map[string]interface{}) + resolverMap := resolverMapArray[0] + if resolverMap["type"] != nil && resolverMap["vpc_id"] != nil { + resType := resolverMap["type"].(*string) + resVpc := resolverMap["vpc_id"].(string) + if *resType == "delegated" { + listVPCDnsResolutionBindingOptions := &vpcv1.ListVPCDnsResolutionBindingsOptions{ + VPCID: vpc.ID, + } + + pager, err := sess.NewVPCDnsResolutionBindingsPager(listVPCDnsResolutionBindingOptions) + if err != nil { + return fmt.Errorf("[ERROR] Error getting VPC dns bindings: %s", err) + } + var allResults []vpcv1.VpcdnsResolutionBinding + for pager.HasNext() { + nextPage, err := pager.GetNext() + if err != nil { + return fmt.Errorf("[ERROR] Error getting VPC dns bindings pager next: %s", err) + } + allResults = append(allResults, nextPage...) + } + for _, binding := range allResults { + if *binding.VPC.ID == resVpc { + resolverMap["dns_binding_id"] = binding.ID + resolverMap["dns_binding_name"] = binding.Name + resolverMapArray[0] = resolverMap + dnsMap["resolver"] = resolverMapArray + } + } + } + } if err = d.Set(isVPCDns, []map[string]interface{}{dnsMap}); err != nil { return fmt.Errorf("[ERROR] Error setting dns: %s", err) } @@ -1543,7 +1632,11 @@ func resourceIBMIsVPCMapToVpcdnsPrototype(modelMap map[string]interface{}) (*vpc func resourceIBMIsVPCMapToVpcdnsResolverPrototype(modelMap map[string]interface{}) (vpcv1.VpcdnsResolverPrototypeIntf, error) { model := &vpcv1.VpcdnsResolverPrototype{} if modelMap["type"] != nil && modelMap["type"].(string) != "" { - model.Type = core.StringPtr(modelMap["type"].(string)) + if modelMap["type"].(string) == "delegated" { + model.Type = core.StringPtr("system") + } else { + model.Type = core.StringPtr(modelMap["type"].(string)) + } } if modelMap["manual_servers"] != nil && modelMap["manual_servers"].(*schema.Set).Len() > 0 { model.Type = core.StringPtr("manual") @@ -1583,6 +1676,7 @@ func resourceIBMIsVPCVpcdnsToMap(model *vpcv1.Vpcdns, vpcId, vpcCrn string) (map return modelMap, err } modelMap["resolver"] = []map[string]interface{}{resolverMap} + return modelMap, nil } diff --git a/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go b/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go index 50c6a9c8ce..1908868ee2 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_routing_table.go @@ -65,6 +65,14 @@ func ResourceIBMISVPCRoutingTable() *schema.Resource { Set: schema.HashString, Description: "The filters specifying the resources that may create routes in this routing table, The resource type: vpn_gateway or vpn_server", }, + "advertise_routes_to": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Set: schema.HashString, + Description: "The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, rtRouteDirectLinkIngress: { Type: schema.TypeBool, ForceNew: false, @@ -209,6 +217,15 @@ func resourceIBMISVPCRoutingTableCreate(d *schema.ResourceData, meta interface{} } createVpcRoutingTableOptions.AcceptRoutesFrom = aroutes } + if _, ok := d.GetOk("advertise_routes_to"); ok { + var advertiseRoutesToList []string + advertiseRoutesTo := d.Get("advertise_routes_to").(*schema.Set) + + for _, val := range advertiseRoutesTo.List() { + advertiseRoutesToList = append(advertiseRoutesToList, val.(string)) + } + createVpcRoutingTableOptions.AdvertiseRoutesTo = advertiseRoutesToList + } if _, ok := d.GetOk(rtRouteInternetIngress); ok { rtRouteInternetIngress := d.Get(rtRouteInternetIngress).(bool) @@ -263,12 +280,22 @@ func resourceIBMISVPCRoutingTableRead(d *schema.ResourceData, meta interface{}) d.Set(rtRouteVPCZoneIngress, routeTable.RouteVPCZoneIngress) d.Set(rtIsDefault, routeTable.IsDefault) acceptRoutesFromArray := make([]string, 0) + advertiseRoutesToArray := make([]string, 0) for i := 0; i < len(routeTable.AcceptRoutesFrom); i++ { acceptRoutesFromArray = append(acceptRoutesFromArray, string(*(routeTable.AcceptRoutesFrom[i].ResourceType))) } if err = d.Set("accept_routes_from_resource_type", acceptRoutesFromArray); err != nil { return fmt.Errorf("[ERROR] Error setting accept_routes_from_resource_type: %s", err) } + + for i := 0; i < len(routeTable.AdvertiseRoutesTo); i++ { + advertiseRoutesToArray = append(advertiseRoutesToArray, routeTable.AdvertiseRoutesTo[i]) + } + + if err = d.Set("advertise_routes_to", advertiseRoutesToArray); err != nil { + return fmt.Errorf("[ERROR] Error setting advertise_routes_to: %s", err) + } + subnets := make([]map[string]interface{}, 0) for _, s := range routeTable.Subnets { @@ -307,19 +334,40 @@ func resourceIBMISVPCRoutingTableUpdate(d *schema.ResourceData, meta interface{} routingTablePatchModel.Name = core.StringPtr(name) hasChange = true } + removeAcceptRoutesFromFilter := false if d.HasChange("accept_routes_from_resource_type") { var aroutes []vpcv1.ResourceFilter acptRoutes := d.Get("accept_routes_from_resource_type").(*schema.Set) - for _, val := range acptRoutes.List() { - value := val.(string) - resourceFilter := vpcv1.ResourceFilter{ - ResourceType: &value, + if len(acptRoutes.List()) == 0 { + removeAcceptRoutesFromFilter = true + } else { + for _, val := range acptRoutes.List() { + value := val.(string) + resourceFilter := vpcv1.ResourceFilter{ + ResourceType: &value, + } + aroutes = append(aroutes, resourceFilter) } - aroutes = append(aroutes, resourceFilter) } routingTablePatchModel.AcceptRoutesFrom = aroutes hasChange = true } + removeAdvertiseRoutesTo := false + if d.HasChange("advertise_routes_to") { + var advertiseRoutesToList []string + advertiseRoutesTo := d.Get("advertise_routes_to").(*schema.Set) + + if len(advertiseRoutesTo.List()) == 0 { + removeAdvertiseRoutesTo = true + } else { + for _, val := range advertiseRoutesTo.List() { + advertiseRoutesToList = append(advertiseRoutesToList, val.(string)) + } + } + + routingTablePatchModel.AdvertiseRoutesTo = advertiseRoutesToList + hasChange = true + } if d.HasChange(rtRouteDirectLinkIngress) { routeDirectLinkIngress := d.Get(rtRouteDirectLinkIngress).(bool) routingTablePatchModel.RouteDirectLinkIngress = core.BoolPtr(routeDirectLinkIngress) @@ -348,6 +396,12 @@ func resourceIBMISVPCRoutingTableUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("[ERROR] Error calling asPatch for RoutingTablePatchModel: %s", asPatchErr) } + if removeAdvertiseRoutesTo { + routingTablePatchModelAsPatch["advertise_routes_to"] = []string{} + } + if removeAcceptRoutesFromFilter { + routingTablePatchModelAsPatch["accept_routes_from"] = []vpcv1.ResourceFilter{} + } updateVpcRoutingTableOptions.RoutingTablePatch = routingTablePatchModelAsPatch _, response, err := sess.UpdateVPCRoutingTable(updateVpcRoutingTableOptions) if err != nil { diff --git a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route.go b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route.go index 2a0495e69c..dd5c23d053 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route.go @@ -78,6 +78,12 @@ func ResourceIBMISVPCRoutingTableRoute() *schema.Resource { Description: "The action to perform with a packet matching the route.", ValidateFunc: validate.InvokeValidator("ibm_is_vpc_routing_table_route", rAction), }, + "advertise": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table property.", + }, rName: { Type: schema.TypeString, Optional: true, @@ -242,6 +248,11 @@ func resourceIBMISVPCRoutingTableRouteCreate(d *schema.ResourceData, meta interf createVpcRoutingTableRouteOptions.SetAction(routeAction) } + if advertiseVal, ok := d.GetOk("advertise"); ok { + advertise := advertiseVal.(bool) + createVpcRoutingTableRouteOptions.SetAdvertise(advertise) + } + if name, ok := d.GetOk(rName); ok { routeName := name.(string) createVpcRoutingTableRouteOptions.SetName(routeName) @@ -282,6 +293,9 @@ func resourceIBMISVPCRoutingTableRouteRead(d *schema.ResourceData, meta interfac } d.Set(rID, *route.ID) + if route.Advertise != nil { + d.Set("Advertise", route.Advertise) + } d.Set(rName, *route.Name) d.Set(rDestination, *route.Destination) if route.NextHop != nil { @@ -329,6 +343,12 @@ func resourceIBMISVPCRoutingTableRouteUpdate(d *schema.ResourceData, meta interf // Construct an instance of the RoutePatch model routePatchModel := new(vpcv1.RoutePatch) + if d.HasChange("advertise") { + advertiseVal := d.Get("advertise").(bool) + routePatchModel.Advertise = &advertiseVal + hasChange = true + + } if d.HasChange(rName) { name := d.Get(rName).(string) routePatchModel.Name = &name diff --git a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route_test.go b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route_test.go index 5157cbcf68..4ce678bd24 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route_test.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_route_test.go @@ -24,6 +24,8 @@ func TestAccIBMISVPCRoutingTableRoute_basic(t *testing.T) { routeName1 := fmt.Sprintf("tfvpcuat-create-%d", acctest.RandIntRange(10, 100)) routeTableName := fmt.Sprintf("tfvpcrt-create-%d", acctest.RandIntRange(10, 100)) routeTableName1 := fmt.Sprintf("tfvpcrt-up-create-%d", acctest.RandIntRange(10, 100)) + advertiseVal := fmt.Sprintf("tfpvpcuat-create-%d", acctest.RandIntRange(10, 50)) + advertiseValUpd := fmt.Sprintf("tfpvpcuat-update-%d", acctest.RandIntRange(60, 100)) resource.Test(t, resource.TestCase{ PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -31,19 +33,23 @@ func TestAccIBMISVPCRoutingTableRoute_basic(t *testing.T) { CheckDestroy: testAccCheckIBMISVPCRouteTableRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckIBMISVPCRouteTableRouteConfig(routeTableName, name1, subnetName, routeName), + Config: testAccCheckIBMISVPCRouteTableRouteConfig(routeTableName, name1, subnetName, routeName, advertiseVal), Check: resource.ComposeTestCheckFunc( testAccCheckIBMISVPCRouteTableRouteExists("ibm_is_vpc_routing_table_route.test_custom_route1", vpcRouteTables), resource.TestCheckResourceAttr( "ibm_is_vpc_routing_table_route.test_custom_route1", "name", routeName), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table_route.test_custom_route1", "advertise", advertiseVal), ), }, { - Config: testAccCheckIBMISVPCRouteTableRouteConfig(routeTableName1, name1, subnetName, routeName1), + Config: testAccCheckIBMISVPCRouteTableRouteConfig(routeTableName1, name1, subnetName, routeName1, advertiseValUpd), Check: resource.ComposeTestCheckFunc( testAccCheckIBMISVPCRouteTableRouteExists("ibm_is_vpc_routing_table_route.test_custom_route1", vpcRouteTables), resource.TestCheckResourceAttr( "ibm_is_vpc_routing_table_route.test_custom_route1", "name", routeName1), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table_route.test_custom_route1", "advertise", advertiseValUpd), ), }, }, @@ -114,7 +120,7 @@ func testAccCheckIBMISVPCRouteTableRouteExists(n, vpcrouteTableID string) resour } } -func testAccCheckIBMISVPCRouteTableRouteConfig(rtName, name, subnetName, routeName string) string { +func testAccCheckIBMISVPCRouteTableRouteConfig(rtName, name, subnetName, routeName, advertise string) string { return fmt.Sprintf(` resource "ibm_is_vpc" "testacc_vpc" { name = "%s" @@ -137,10 +143,11 @@ resource "ibm_is_vpc_routing_table_route" "test_custom_route1" { depends_on = [ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table, ibm_is_subnet.test_cr_subnet1] vpc = ibm_is_vpc.testacc_vpc.id routing_table = ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table.routing_table + advertise = "%s" name = "%s" zone = "%s" next_hop = "%s" destination = ibm_is_subnet.test_cr_subnet1.ipv4_cidr_block } -`, name, rtName, subnetName, acc.ISZoneName, acc.ISCIDR, routeName, acc.ISZoneName, acc.ISRouteNextHop) +`, name, rtName, subnetName, acc.ISZoneName, acc.ISCIDR, advertise, routeName, acc.ISZoneName, acc.ISRouteNextHop) } diff --git a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_test.go b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_test.go index 3068c21d0a..91e876d6a9 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_routing_table_test.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_routing_table_test.go @@ -89,6 +89,56 @@ func TestAccIBMISVPCRoutingTable_acceptRoutesFrom(t *testing.T) { }) } +// advertise_routes_to +func TestAccIBMISVPCRoutingTable_advertiseRoutesTO(t *testing.T) { + var vpcRouteTables string + name1 := fmt.Sprintf("tfvpc-create-%d", acctest.RandIntRange(10, 100)) + routeTableName := fmt.Sprintf("tfvpcrt-create-%d", acctest.RandIntRange(10, 100)) + + advertiseRoutesToDirectLink := "direct_link" + advertiseRoutesToTransit_gateway := "transit_gateway" + acceptRoutesFromVPNServer := "vpn_server" + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISVPCRouteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVPCRouteTableAdvertiseRoutesToConfig(routeTableName, name1, acceptRoutesFromVPNServer, advertiseRoutesToDirectLink, advertiseRoutesToTransit_gateway), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPCRouteTableExists("ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", vpcRouteTables), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "advertise_routes_to.0", advertiseRoutesToDirectLink), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "advertise_routes_to.1", advertiseRoutesToTransit_gateway), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "accept_routes_from_resource_type.0", acceptRoutesFromVPNServer), + ), + }, + { + Config: testAccCheckIBMISVPCRouteTableAdvertiseRoutesToDLConfig(routeTableName, name1, acceptRoutesFromVPNServer, advertiseRoutesToDirectLink), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPCRouteTableExists("ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", vpcRouteTables), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "advertise_routes_to.0", advertiseRoutesToDirectLink), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "accept_routes_from_resource_type.0", acceptRoutesFromVPNServer), + ), + }, + { + Config: testAccCheckIBMISVPCRouteTableAdvertiseRoutesToRemovalConfig(routeTableName, name1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPCRouteTableExists("ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", vpcRouteTables), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "advertise_routes_to.#", "0"), + resource.TestCheckResourceAttr( + "ibm_is_vpc_routing_table.test_ibm_is_vpc_routing_table", "accept_routes_from_resource_type.#", "0"), + ), + }, + }, + }) +} + func testAccCheckIBMISVPCRouteTableDestroy(s *terraform.State) error { //userDetails, _ := acc.TestAccProvider.Meta().(conns.ClientSession).BluemixUserDetails() @@ -174,3 +224,50 @@ resource "ibm_is_vpc_routing_table" "test_ibm_is_vpc_routing_table" { accept_routes_from_resource_type=["%s"] }`, name, rtName, acceptRoutesFromVPNServer) } + +func testAccCheckIBMISVPCRouteTableAdvertiseRoutesToConfig(rtName, name, acceptRoutesFromVPNServer, advertiseRoutesTo1, advertiseRoutesTo2 string) string { + return fmt.Sprintf(` +resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" +} +resource "ibm_is_vpc_routing_table" "test_ibm_is_vpc_routing_table" { + depends_on = [ibm_is_vpc.testacc_vpc] + route_direct_link_ingress = true + route_transit_gateway_ingress = true + vpc = ibm_is_vpc.testacc_vpc.id + name = "%s" + accept_routes_from_resource_type=["%s"] + advertise_routes_to=["%s","%s"] +}`, name, rtName, acceptRoutesFromVPNServer, advertiseRoutesTo1, advertiseRoutesTo2) +} + +func testAccCheckIBMISVPCRouteTableAdvertiseRoutesToDLConfig(rtName, name, acceptRoutesFromVPNServer, advertiseRoutesTo1 string) string { + return fmt.Sprintf(` +resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" +} +resource "ibm_is_vpc_routing_table" "test_ibm_is_vpc_routing_table" { + depends_on = [ibm_is_vpc.testacc_vpc] + route_direct_link_ingress = true + route_transit_gateway_ingress = true + vpc = ibm_is_vpc.testacc_vpc.id + name = "%s" + accept_routes_from_resource_type=["%s"] + advertise_routes_to=["%s"] +}`, name, rtName, acceptRoutesFromVPNServer, advertiseRoutesTo1) +} + +func testAccCheckIBMISVPCRouteTableAdvertiseRoutesToRemovalConfig(rtName, name string) string { + return fmt.Sprintf(` +resource "ibm_is_vpc" "testacc_vpc" { + name = "%s" +} +resource "ibm_is_vpc_routing_table" "test_ibm_is_vpc_routing_table" { + depends_on = [ibm_is_vpc.testacc_vpc] + route_direct_link_ingress = true + vpc = ibm_is_vpc.testacc_vpc.id + name = "%s" + accept_routes_from_resource_type=[] + advertise_routes_to=[] +}`, name, rtName) +} diff --git a/ibm/service/vpc/resource_ibm_is_vpc_test.go b/ibm/service/vpc/resource_ibm_is_vpc_test.go index 6f9cc88a66..5468af48d1 100644 --- a/ibm/service/vpc/resource_ibm_is_vpc_test.go +++ b/ibm/service/vpc/resource_ibm_is_vpc_test.go @@ -298,6 +298,53 @@ func TestAccIBMISVPC_dns_delegated(t *testing.T) { }, }) } +func TestAccIBMISVPC_dns_delegated_first(t *testing.T) { + var vpc string + name1 := fmt.Sprintf("terraformvpcuat-%d", acctest.RandIntRange(10, 100)) + name2 := fmt.Sprintf("terraformvpcuat-%d", acctest.RandIntRange(10, 100)) + subnet1 := fmt.Sprintf("terraformsubnet-%d", acctest.RandIntRange(10, 100)) + subnet2 := fmt.Sprintf("terraformsubnet-%d", acctest.RandIntRange(10, 100)) + resourecinstance := fmt.Sprintf("terraformresource-%d", acctest.RandIntRange(10, 100)) + resolver1 := fmt.Sprintf("terraformresolver-%d", acctest.RandIntRange(10, 100)) + binding := fmt.Sprintf("terraformbinding-%d", acctest.RandIntRange(10, 100)) + enableHubTrue := true + enableHubFalse := false + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + CheckDestroy: testAccCheckIBMISVPCDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMISVPCDnsDelegatedFirstConfig(name1, name2, subnet1, subnet2, resourecinstance, resolver1, binding, enableHubTrue, enableHubFalse), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMISVPCExists("ibm_is_vpc.hub_true", vpc), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_true", "name", name1), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_false_delegated", "name", name2), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_true", "dns.0.enable_hub", fmt.Sprintf("%t", enableHubTrue)), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_true", "dns.0.resolver.0.type", "system"), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_false_delegated", "dns.0.enable_hub", fmt.Sprintf("%t", enableHubFalse)), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolver.0.type", "delegated"), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolution_binding_count", "1"), + resource.TestCheckResourceAttr( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolver.0.dns_binding_name", binding), + resource.TestCheckResourceAttrSet( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolver.0.dns_binding_id"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolver.0.vpc_id"), + resource.TestCheckResourceAttrSet( + "ibm_is_vpc.hub_false_delegated", "dns.0.resolver.0.vpc_name"), + ), + }, + }, + }) +} func TestAccIBMISVPC_basic_apm(t *testing.T) { var vpc string @@ -597,6 +644,69 @@ func testAccCheckIBMISVPCDnsDelegatedConfig(vpcname, vpcname2, subnetname1, subn `, vpcname, enableHub, vpcname2, enablehubfalse, subnetname1, acc.ISZoneName, subnetname2, acc.ISZoneName, subnetname3, acc.ISZoneName, subnetname4, acc.ISZoneName, resourceinstance, resolver1, resolver2, bindingname) +} +func testAccCheckIBMISVPCDnsDelegatedFirstConfig(vpcname, vpcname2, subnetname1, subnetname2, resourceinstance, resolver1, bindingname string, enableHub, enablehubfalse bool) string { + return fmt.Sprintf(` + data "ibm_resource_group" "rg" { + is_default = true + } + + resource ibm_is_vpc hub_true { + name = "%s" + dns { + enable_hub = %t + } + } + + resource ibm_is_vpc hub_false_delegated { + depends_on = [ ibm_dns_custom_resolver.test_hub_true ] + name = "%s" + dns { + enable_hub = %t + resolver { + type = "delegated" + vpc_id = ibm_is_vpc.hub_true.id + dns_binding_name = "%s" + } + } + } + + resource "ibm_is_subnet" "hub_true_sub1" { + name = "%s" + vpc = ibm_is_vpc.hub_true.id + zone = "%s" + total_ipv4_address_count = 16 + } + resource "ibm_is_subnet" "hub_true_sub2" { + name = "%s" + vpc = ibm_is_vpc.hub_true.id + zone = "%s" + total_ipv4_address_count = 16 + } + resource "ibm_resource_instance" "dns-cr-instance" { + name = "%s" + resource_group_id = data.ibm_resource_group.rg.id + location = "global" + service = "dns-svcs" + plan = "standard-dns" + } + resource "ibm_dns_custom_resolver" "test_hub_true" { + name = "%s" + instance_id = ibm_resource_instance.dns-cr-instance.guid + description = "new test CR - TF" + high_availability = true + enabled = true + locations { + subnet_crn = ibm_is_subnet.hub_true_sub1.crn + enabled = true + } + locations { + subnet_crn = ibm_is_subnet.hub_true_sub2.crn + enabled = true + } + } + `, vpcname, enableHub, vpcname2, enablehubfalse, bindingname, subnetname1, acc.ISZoneName, subnetname2, acc.ISZoneName, resourceinstance, resolver1) + } func testAccCheckIBMISVPCDnsDelegatedUpdate1Config(vpcname, vpcname2, subnetname1, subnetname2, subnetname3, subnetname4, resourceinstance, resolver1, resolver2, bindingname string, enableHub, enablehubfalse bool) string { return fmt.Sprintf(` diff --git a/ibm/service/vpc/resource_ibm_is_vpn_server.go b/ibm/service/vpc/resource_ibm_is_vpn_server.go index 23a6e5ecdb..fa0042b9af 100644 --- a/ibm/service/vpc/resource_ibm_is_vpn_server.go +++ b/ibm/service/vpc/resource_ibm_is_vpn_server.go @@ -1027,9 +1027,9 @@ func isWaitForVPNServerDeleted(context context.Context, sess *vpcv1.VpcV1, d *sc if response != nil && response.StatusCode == 404 { return vpnServer, isVPNServerStatusDeleted, nil } - return vpnServer, isVPNServerStatusDeleting, fmt.Errorf("The VPC route %s failed to delete: %s\n%s", d.Id(), err, response) + return vpnServer, *vpnServer.LifecycleState, fmt.Errorf("The VPC vpn server %s failed to delete: %s\n%s", d.Id(), err, response) } - return vpnServer, isVPNServerStatusDeleting, nil + return vpnServer, *vpnServer.LifecycleState, nil }, Timeout: d.Timeout(schema.TimeoutDelete), diff --git a/ibm/service/vpc/resource_ibm_is_vpn_server_route.go b/ibm/service/vpc/resource_ibm_is_vpn_server_route.go index ed38a19bd8..95ce9764cd 100644 --- a/ibm/service/vpc/resource_ibm_is_vpn_server_route.go +++ b/ibm/service/vpc/resource_ibm_is_vpn_server_route.go @@ -422,9 +422,9 @@ func isWaitForVPNServerRouteDeleted(context context.Context, sess *vpcv1.VpcV1, if response != nil && response.StatusCode == 404 { return vpnServerRoute, isVPNServerRouteStatusDeleted, nil } - return vpnServerRoute, isVPNServerRouteStatusDeleting, fmt.Errorf("The VPC route %s failed to delete: %s\n%s", d.Id(), err, response) + return vpnServerRoute, *vpnServerRoute.LifecycleState, fmt.Errorf("The VPC route %s failed to delete: %s\n%s", d.Id(), err, response) } - return vpnServerRoute, isVPNServerRouteStatusDeleting, nil + return vpnServerRoute, *vpnServerRoute.LifecycleState, nil }, Timeout: d.Timeout(schema.TimeoutDelete), diff --git a/version/version.go b/version/version.go index 19638a80ae..5ee2b1e015 100644 --- a/version/version.go +++ b/version/version.go @@ -5,7 +5,7 @@ import ( ) // Version is the current provider main version -const Version = "1.60.0" +const Version = "1.62.0" // GitCommit is the git commit that was compiled. This will be filled in by the compiler. var GitCommit string diff --git a/website/docs/d/cbr_rule.html.markdown b/website/docs/d/cbr_rule.html.markdown index 07925ccca9..cf89cc6106 100644 --- a/website/docs/d/cbr_rule.html.markdown +++ b/website/docs/d/cbr_rule.html.markdown @@ -31,7 +31,7 @@ In addition to all argument references listed, you can access the following attr * `id` - The unique identifier of the cbr_rule. * `contexts` - (List) The contexts this rule applies to. - * Constraints: The maximum length is `1000` items. The minimum length is `1` item. + * Constraints: The maximum length is `1000` items. The minimum length is `0` items. Nested scheme for **contexts**: * `attributes` - (List) The attributes. * Constraints: The minimum length is `1` item. diff --git a/website/docs/d/cbr_zone.html.markdown b/website/docs/d/cbr_zone.html.markdown index e91a07c493..a0f495a59a 100644 --- a/website/docs/d/cbr_zone.html.markdown +++ b/website/docs/d/cbr_zone.html.markdown @@ -36,7 +36,7 @@ In addition to all argument references listed, you can access the following attr * `address_count` - (Integer) The number of addresses in the zone. * `addresses` - (List) The list of addresses in the zone. - * Constraints: The maximum length is `1000` items. The minimum length is `1` item. + * Constraints: The maximum length is `1000` items. The minimum length is `0` items. Nested scheme for **addresses**: * `ref` - (List) A service reference value. Nested scheme for **ref**: diff --git a/website/docs/d/cd_toolchain_tool_securitycompliance.html.markdown b/website/docs/d/cd_toolchain_tool_securitycompliance.html.markdown index bf3493ad3e..bff8d39dca 100644 --- a/website/docs/d/cd_toolchain_tool_securitycompliance.html.markdown +++ b/website/docs/d/cd_toolchain_tool_securitycompliance.html.markdown @@ -46,7 +46,7 @@ After your data source is created, you can read values from the following attrib Nested schema for **parameters**: * `attachment_id` - (String) An attachment ID. An attachment is configured under a profile to define how a scan will be run. To find the attachment ID, in the browser, in the attachments list, click on the attachment link, and a panel appears with a button to copy the attachment ID. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * Constraints: The value must match regular expression `/^[-0-9a-f]{32,36}$/`. - * `evidence_namespace` - (String) The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment pipeline, or `cc` which will use evidence generated by a Continuous Compliance pipeline. + * `evidence_namespace` - (String) The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment (CD) pipeline, or `cc` which will use evidence generated by a Continuous Compliance (CC) pipeline. The default behavior is to use the CD evidence. * Constraints: Allowable values are: `cd`, `cc`. * `evidence_repo_url` - (String) The URL to a Git repository evidence locker. The DevSecOps toolchain templates will collect and store evidence for scans and tasks in an evidence repository. This evidence URL should match the `repo_url` for a Git tool integration in this toolchain. The DevSecOps toolchain goals in the Security and Compliance Center will check the evidence repository for the pass or fail results for those goals. * `instance_crn` - (String) The Security and Compliance Center service instance CRN (Cloud Resource Name). It is recommended to provide an instance CRN, but when absent, the oldest service instance will be used. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. @@ -55,7 +55,7 @@ Nested schema for **parameters**: * `profile_name` - (String) The name of a Security and Compliance Center profile. Usually, use the "IBM Cloud Framework for Financial Services" predefined profile, which contains the DevSecOps Toolchain rules. Or use a user-authored customized profile that has been configured to contain those rules. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * `profile_version` - (String) The version of a Security and Compliance Center profile, in SemVer format, like '0.0.0'. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * `scc_api_key` - (String) The IBM Cloud API key used to access the Security and Compliance Center service, for the use profile with attachment setting. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. You can use a toolchain secret reference for this parameter. For more information, see [Protecting your sensitive data in Continuous Delivery](https://cloud.ibm.com/docs/ContinuousDelivery?topic=ContinuousDelivery-cd_data_security#cd_secure_credentials). - * `use_profile_attachment` - (String) Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service. When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`. + * `use_profile_attachment` - (String) Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service to perform pre-deploy validation against compliance rules for Continuous Deployment (CD) and compliance monitoring for Continuous Compliance (CC). When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`. * Constraints: Allowable values are: `disabled`, `enabled`. * `referent` - (List) Information on URIs to access this resource through the UI or API. diff --git a/website/docs/d/is_image.html.markdown b/website/docs/d/is_image.html.markdown index 4b7f19ebd2..e5a27a0ada 100644 --- a/website/docs/d/is_image.html.markdown +++ b/website/docs/d/is_image.html.markdown @@ -25,7 +25,7 @@ provider "ibm" { ```terraform data "ibm_is_image" "example" { - name = "centos-7.x-amd64" + name = "ibm-centos-7-9-minimal-amd64-12" } ``` ```terraform @@ -69,5 +69,29 @@ In addition to all argument reference list, you can access the following attribu - `id` - (String) The unique identifier of the image. - `obsolescence_at` - (String) The obsolescence date and time (UTC) for this image. If absent, no obsolescence date and time has been set. - `os` - (String) The name of the operating system. +- `operating_system` - (List) The operating system details. + + Nested scheme for `operating_system`: + - `architecture` - (String) The operating system architecture. + - `dedicated_host_only` - (Bool) Images with this operating system can only be used on dedicated hosts or dedicated host groups. + - `display_name` - (String) A unique, display-friendly name for the operating system. + - `family` - (String) The software family for this operating system. + - `href` - (String) The URL for this operating system. + - `name` - (String) The globally unique name for this operating system. + - `vendor` - (String) The vendor of the operating system. + - `version` - (String) The major release version of this operating system. +- `resource_group` - (List) The resource group object, for this image. + + Nested scheme for `resource_group`: + - `href` - (String) The URL for this resource group. + - `id` - (String) The unique identifier for this resource group. + - `name` - (String) The user-defined name for this resource group. - `status` - (String) The status of this image. +- `status_reasons` - (List) The reasons for the current status (if any). + + Nested scheme for `status_reasons`: + - `code` - (String) The status reason code + - `message` - (String) An explanation of the status reason + - `more_info` - (String) Link to documentation about this status reason + - `source_volume` - The source volume id of the image. diff --git a/website/docs/d/is_images.html.markdown b/website/docs/d/is_images.html.markdown index d5a63840af..a8dc652a19 100644 --- a/website/docs/d/is_images.html.markdown +++ b/website/docs/d/is_images.html.markdown @@ -36,11 +36,11 @@ data "ibm_is_images" "ds_images" { Review the argument references that you can specify for your data source. -* `catalog_managed` - (Optional, bool) Lists only those images which are managed as part of a catalog offering. -* `resource_group` - (Optional, string) The id of the resource group. -* `name` - (Optional, string) The name of the image. -* `visibility` - (Optional, string) Visibility of the image. -* `status` - (Optional, string) Status of the image. +- `catalog_managed` - (Optional, bool) Lists only those images which are managed as part of a catalog offering. +- `resource_group` - (Optional, string) The id of the resource group. +- `name` - (Optional, string) The name of the image. +- `visibility` - (Optional, string) Visibility of the image. Accepted values : **private**, **public** +- `status` - (Optional, string) Status of the image. Accepted value : **available**, **deleting**, **deprecated**, **failed**, **obsolete**, **pending**, **unusable** ## Attribute reference You can access the following attribute references after your data source is created. @@ -65,7 +65,29 @@ You can access the following attribute references after your data source is crea - `id` - (String) The unique identifier for this image. - `name` - (String) The name for this image. - `os` - (String) The name of the Operating System. + - `operating_system` - (List) The operating system details. + + Nested scheme for `operating_system`: + - `architecture` - (String) The operating system architecture. + - `dedicated_host_only` - (Bool) Images with this operating system can only be used on dedicated hosts or dedicated host groups. + - `display_name` - (String) A unique, display-friendly name for the operating system. + - `family` - (String) The software family for this operating system. + - `href` - (String) The URL for this operating system. + - `name` - (String) The globally unique name for this operating system. + - `vendor` - (String) The vendor of the operating system. + - `version` - (String) The major release version of this operating system. + - `resource_group` - (List) The resource group object, for this image. + Nested scheme for `resource_group`: + - `href` - (String) The URL for this resource group. + - `id` - (String) The unique identifier for this resource group. + - `name` - (String) The user-defined name for this resource group. - `status` - (String) The status of this image. + - `status_reasons` - (List) The reasons for the current status (if any). + + Nested scheme for `status_reasons`: + - `code` - (String) The status reason code + - `message` - (String) An explanation of the status reason + - `more_info` - (String) Link to documentation about this status reason - `visibility` - (String) The visibility of the image public or private. - `source_volume` - The source volume id of the image. diff --git a/website/docs/d/is_share.html.markdown b/website/docs/d/is_share.html.markdown index c338555af8..45a1875c8b 100644 --- a/website/docs/d/is_share.html.markdown +++ b/website/docs/d/is_share.html.markdown @@ -48,9 +48,14 @@ The following attributes are exported: - `created_at` - The date and time that the file share is created. - `crn` - The CRN for this share. - `encryption` - The type of encryption used for this file share. -- `encryption_key` - The CRN of the key used to encrypt this file share. Nested `encryption_key` blocks have the following structure: +- `encryption_key` - The CRN of the key used to encrypt this file share. - `href` - The URL for this share. - `iops` - The maximum input/output operation performance bandwidth per second for the file share. +- `latest_sync` - (List) Information about the latest synchronization for this file share. +Nested `latest_sync` blocks have the following structure: + - `completed_at` - (String) The completed date and time of last synchronization between the replica share and its source. + - `data_transferred` - (Integer) The data transferred (in bytes) in the last synchronization between the replica and its source. + - `started_at` - (String) The start date and time of last synchronization between the replica share and its source. - `latest_job` - The latest job associated with this file share.This property will be absent if no jobs have been created for this file share. Nested `latest_job` blocks have the following structure: - `status` - The status of the file share job - `status_reasons` - The reasons for the file share job status (if any). Nested `status_reasons` blocks have the following structure: diff --git a/website/docs/d/is_shares.html.markdown b/website/docs/d/is_shares.html.markdown index 122b519f9e..88a6099736 100644 --- a/website/docs/d/is_shares.html.markdown +++ b/website/docs/d/is_shares.html.markdown @@ -32,10 +32,15 @@ The following attributes are exported: - `created_at` - The date and time that the file share is created. - `crn` - The CRN for this share. - `encryption` - The type of encryption used for this file share. - - `encryption_key` - The CRN of the key used to encrypt this file share. Nested `encryption_key` blocks have the following structure: + - `encryption_key` - The CRN of the key used to encrypt this file share. - `href` - The URL for this share. - `id` - The unique identifier for this file share. - `iops` - The maximum input/output operation performance bandwidth per second for the file share. + - `latest_sync` - (List) Information about the latest synchronization for this file share. + Nested `latest_sync` blocks have the following structure: + - `completed_at` - (String) The completed date and time of last synchronization between the replica share and its source. + - `data_transferred` - (Integer) The data transferred (in bytes) in the last synchronization between the replica and its source. + - `started_at` - (String) The start date and time of last synchronization between the replica share and its source. - `latest_job` - The latest job associated with this file share.This property will be absent if no jobs have been created for this file share. Nested `latest_job` blocks have the following structure: - `status` - The status of the file share job - `status_reasons` - The reasons for the file share job status (if any). Nested `status_reasons` blocks have the following structure: diff --git a/website/docs/d/is_vpc_routing_table.html.markdown b/website/docs/d/is_vpc_routing_table.html.markdown index 752281c49c..04f6388bcd 100644 --- a/website/docs/d/is_vpc_routing_table.html.markdown +++ b/website/docs/d/is_vpc_routing_table.html.markdown @@ -52,6 +52,11 @@ In addition to all argument references listed, you can access the following attr - `accept_routes_from` - (List) The filters specifying the resources that may create routes in this routing table.At present, only the `resource_type` filter is permitted, and only the `vpn_gateway` value is supported, but filter support is expected to expand in the future. Nested scheme for **accept_routes_from**: - `resource_type` - (String) The resource type. +- `advertise_routes_to` - (Optional, List) The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources. + + ->**Options** An ingress source that routes can be advertised to:
+ **•** `direct_link` (requires `route_direct_link_ingress` be set to `true`)
+ **•** `transit_gateway` (requires `route_transit_gateway_ingress` be set to `true`) - `created_at` - (String) The date and time that this routing table was created. - `href` - (String) The URL for this routing table. - `id` - (String) The unique identifier of the RoutingTable. diff --git a/website/docs/d/is_vpc_routing_table_route.html.markdown b/website/docs/d/is_vpc_routing_table_route.html.markdown index 858af62286..ace775cc16 100644 --- a/website/docs/d/is_vpc_routing_table_route.html.markdown +++ b/website/docs/d/is_vpc_routing_table_route.html.markdown @@ -54,6 +54,7 @@ Review the argument reference that you can specify for your data source. In addition to all argument references listed, you can access the following attribute references after your data source is created. - `action` - (String) The action to perform with a packet matching the route, allowable values are: `delegate`, `delegate_vpc`, `deliver`, `drop`. +- `advertise` - (Boolean) Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table property. - `delegate`: delegate to the system's built-in routes - `delegate_vpc`: delegate to the system's built-in routes, ignoring Internet-bound routes - `deliver`: deliver the packet to the specified `next_hop` diff --git a/website/docs/d/is_vpc_routing_table_routes.html.markdown b/website/docs/d/is_vpc_routing_table_routes.html.markdown index 2d906396de..7ada595c45 100644 --- a/website/docs/d/is_vpc_routing_table_routes.html.markdown +++ b/website/docs/d/is_vpc_routing_table_routes.html.markdown @@ -72,6 +72,7 @@ In addition to all argument reference list, you can access the following attribu - `resource_type` - (String) The resource type. - Constraints: Allowable values are: `vpn_gateway`. The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-z][a-z0-9]*(_[a-z0-9]+)*$/`. - `action` - (String) The action to perform with a packet matching the route. + - `advertise` - (Boolean) Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table property. - `destination` - (String) The destination of the route. - `next_hop` - (String) The next hop address of the route. - `origin` - (String) The origin of this route:- `service`: route was directly created by a service - `user`: route was directly created by a userThe enumerated values for this property are expected to expand in the future. When processing this property, check for and log unknown values. Optionally halt processing and surface the error, or bypass the route on which the unexpected property value was encountered. diff --git a/website/docs/d/is_vpc_routing_tables.html.markdown b/website/docs/d/is_vpc_routing_tables.html.markdown index b38983b1be..d7b750a62d 100644 --- a/website/docs/d/is_vpc_routing_tables.html.markdown +++ b/website/docs/d/is_vpc_routing_tables.html.markdown @@ -48,6 +48,11 @@ In addition to the argument reference list, you can access the following attribu - `accept_routes_from` - (List) The filters specifying the resources that may create routes in this routing table.At present, only the `resource_type` filter is permitted, and only the `vpn_gateway` value is supported, but filter support is expected to expand in the future. Nested scheme for **accept_routes_from**: - `resource_type` - (String) The resource type. + - `advertise_routes_to` - (Optional, List) The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources. + + ->**Options** An ingress source that routes can be advertised to:
+ **•** `direct_link` (requires `route_direct_link_ingress` be set to `true`)
+ **•** `transit_gateway` (requires `route_transit_gateway_ingress` be set to `true`) - `created_at` - (Timestamp) The date and time the routing table was created. - `href` - (String) The routing table URL. - `is_default` - (String) Indicates whether the default routing table. diff --git a/website/docs/d/is_vpn_gateway.html.markdown b/website/docs/d/is_vpn_gateway.html.markdown index da67f40b90..12ee7f6fcf 100644 --- a/website/docs/d/is_vpn_gateway.html.markdown +++ b/website/docs/d/is_vpn_gateway.html.markdown @@ -65,7 +65,6 @@ In addition to all argument references listed, you can access the following attr - `private_ip_address` - (String) The private IP address assigned to the VPN gateway member. This property will be present only when the VPN gateway status is `available`. This property may add support for IPv6 addresses in the future. When processing a value in this property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing and surface the error, or bypass the resource on which the unexpected IP address format was encountered. Same as `primary_ip.0.address` - `public_ip_address` - (String) The public IP address assigned to the VPN gateway member. This property may add support for IPv6 addresses in the future. When processing a value in this property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing and surface the error, or bypass the resource on which the unexpected IP address format was encountered. - `role` - (String) The high availability role assigned to the VPN gateway member. - - `status` - (String) The status of the VPN gateway member. - `mode` - (String) Route mode VPN gateway. @@ -79,7 +78,6 @@ In addition to all argument references listed, you can access the following attr - `resource_type` - (String) The resource type. -- `status` - (String) The status of the VPN gateway. - `health_reasons` - (List) The reasons for the current health_state (if any). Nested scheme for `health_reasons`: diff --git a/website/docs/d/is_vpn_gateways.html.markdown b/website/docs/d/is_vpn_gateways.html.markdown index 8c7a66df30..457edbcfc7 100644 --- a/website/docs/d/is_vpn_gateways.html.markdown +++ b/website/docs/d/is_vpn_gateways.html.markdown @@ -56,11 +56,9 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `private_ip`: - `address` - (String) The IP address. If the address has not yet been selected, the value will be 0.0.0.0. This property may add support for IPv6 addresses in the future. When processing a value in this property, verify that the address is in an expected format. If it is not, log an error. Optionally halt processing and surface the error, or bypass the resource on which the unexpected IP address format was encountered. - `private_address` - (String) The private IP address assigned to the VPN gateway member. Same as `private_ip.0.address`.
- - `status` - (String) The status of the VPN gateway member. - `resource_type` - (String) The resource type, supported value is `vpn_gateway`. - - `status` - (String) The status of the VPN gateway, supported values are **available**, **deleting**, **failed**, **pending**. - `health_reasons` - (List) The reasons for the current health_state (if any). Nested scheme for `health_reasons`: diff --git a/website/docs/d/pi_cloud_connection.html.markdown b/website/docs/d/pi_cloud_connection.html.markdown index ca42eb4117..2e6de4d4bd 100644 --- a/website/docs/d/pi_cloud_connection.html.markdown +++ b/website/docs/d/pi_cloud_connection.html.markdown @@ -7,11 +7,9 @@ description: |- --- # ibm_pi_cloud_connection - Retrieve information about an existing IBM Cloud Power Virtual Server Cloud cloud connection. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_cloud_connection" "example" { pi_cloud_connection_name = "test_cloud_connection" @@ -20,15 +18,12 @@ data "ibm_pi_cloud_connection" "example" { ``` **Notes** - - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - - `region` - `lon` - `zone` - `lon04` - Example usage: - +Example usage: ```terraform provider "ibm" { region = "lon" @@ -37,28 +32,26 @@ data "ibm_pi_cloud_connection" "example" { ``` ## Argument reference - Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_cloud_connection_name` - (Required, String) The cloud connection name to be used. ## Attribute reference - In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `id` - (String) The unique identifier of the cloud connection. -- `classic_enabled` - (Bool) Is classic endpoint destination enabled? +- `classic_enabled` - (Boolean) Enable classic endpoint destination. - `connection_mode` - (String) Type of service the gateway is attached to. -- `global_routing` - (String) Is global routing enabled for this cloud connection. -- `gre_destination_address` - (String) The GRE destination IP address. -- `gre_source_address` - (String) The GRE auto-assigned source IP address. +- `global_routing` - (String) Enable global routing for this cloud connection. +- `gre_destination_address` - (String) GRE destination IP address. +- `gre_source_address` - (String) GRE auto-assigned source IP address. +- `id` - (String) The unique identifier of the cloud connection. - `ibm_ip_address` - (String) The IBM IP address. -- `metered` - (String) Is metered enabled for this cloud connection. -- `networks` - (Set of String) Set of Networks attached to this cloud connection. +- `metered` - (String) Enable metering for this cloud connection. +- `networks` - (Set) Set of Networks attached to this cloud connection. - `port` - (String) Port. - `speed` - (Integer) Speed of the cloud connection (speed in megabits per second). - `status` - (String) Link status. - `user_ip_address` - (String) User IP address. -- `vpc_crns` - (Set of String) Set of VPCs attached to this cloud connection. -- `vpc_enabled` - (Bool) Is VPC enabled for this cloud connection? +- `vpc_crns` - (Set) Set of VPCs attached to this cloud connection. +- `vpc_enabled` - (Boolean) Enable VPC for this cloud connection. diff --git a/website/docs/d/pi_cloud_connections.html.markdown b/website/docs/d/pi_cloud_connections.html.markdown index 8925fd81e6..0ab04e36da 100644 --- a/website/docs/d/pi_cloud_connections.html.markdown +++ b/website/docs/d/pi_cloud_connections.html.markdown @@ -7,11 +7,9 @@ description: |- --- # ibm_pi_cloud_connections - Retrieve information about all cloud connections as a read-only data source. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_cloud_connections" "example" { pi_cloud_instance_id = "" @@ -19,15 +17,13 @@ data "ibm_pi_cloud_connections" "example" { ``` **Notes** - - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` - Example usage: - +Example usage: ```terraform provider "ibm" { region = "lon" @@ -36,32 +32,29 @@ data "ibm_pi_cloud_connections" "example" { ``` ## Argument reference - Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference - In addition to all argument reference list, you can access the following attribute references after your data source is created. - `connections` - (List) List of all the Cloud Connections. Nested scheme for `connections`: - - - `classic_enabled` - (Bool) Is Classic endpoint destination enabled. + - `classic_enabled` - (Boolean) Enable classic endpoint destination. - `cloud_connection_id` - (String) The unique identifier of the cloud connection. - - `global_routing` - (String) Is global routing enabled for this cloud connection. + - `connection_mode` - (String) Type of service the gateway is attached to. + - `global_routing` - (String) Enable global routing for this cloud connection. - `gre_destination_address` - (String) GRE destination IP address. - `gre_source_address` - (String) GRE auto-assigned source IP address. - `ibm_ip_address` - (String) IBM IP address. - - `metered` - (String) Is metered enabled for this cloud connection. + - `metered` - (String) Enable metering for this cloud connection. - `name` - (String) Name of the cloud connection. - - `networks` - (Set of String) Set of Networks attached to this cloud connection. + - `networks` - (Set) Set of Networks attached to this cloud connection. - `port` - (String) Port. - `speed` - (Integer) Speed of the cloud connection (speed in megabits per second). - `status` - (String) Link status. - `user_ip_address` - (String) User IP address. - - `vpc_crns` - (Set of String) Set of VPCs attached to this cloud connection. - - `vpc_enabled` - (Bool) Is VPC enabled for this cloud connection. - - `connection_mode` - (String) Type of service the gateway is attached to. + - `vpc_crns` - (Set) Set of VPCs attached to this cloud connection. + - `vpc_enabled` - (Boolean) Enable VPC for this cloud connection. diff --git a/website/docs/d/pi_cloud_instance.html.markdown b/website/docs/d/pi_cloud_instance.html.markdown index 5e65926b84..32dcbb43ba 100644 --- a/website/docs/d/pi_cloud_instance.html.markdown +++ b/website/docs/d/pi_cloud_instance.html.markdown @@ -10,21 +10,19 @@ description: |- Retrieve information about an existing IBM Power Virtual Server Cloud Instance as a read-only data source. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_cloud_instance" "ds_cloud_instance" { pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` -## Notes: -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -46,7 +44,7 @@ In addition to the argument reference list, you can access the following attribu - `pvm_instances` - (List) PVM instances owned by the Cloud Instance. Nested scheme for `pvm_instances`: - - `creation_date` - (String) Date/Time of PVM creation. + - `creation_date` - (String) Date of PVM instance creation. - `href` - (String) Link to Cloud Instance resource. - `id` - (String) PVM Instance ID. - `name` - (String) Name of the server. @@ -59,4 +57,3 @@ In addition to the argument reference list, you can access the following attribu - `total_processors_consumed` - (String) The total processors consumed by this service instance. - `total_ssd_storage_consumed` - (String) The total SSD Storage consumed by this service instance. - `total_standard_storage_consumed` - (String) The total Standard Storage consumed by this service instance. - diff --git a/website/docs/d/pi_console_languages.html.markdown b/website/docs/d/pi_console_languages.html.markdown index d5f7f36afd..e679fedd62 100644 --- a/website/docs/d/pi_console_languages.html.markdown +++ b/website/docs/d/pi_console_languages.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_console_languages" @@ -8,11 +7,9 @@ description: |- --- # ibm_pi_console_languages - Retrieve information about all the available Console Languages for an Instance. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_console_languages" "example" { pi_cloud_instance_id = "" @@ -21,14 +18,12 @@ data "ibm_pi_console_languages" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -37,14 +32,12 @@ Example usage: ``` ## Argument reference - Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_instance_name` - (Required, String) The unique identifier or name of the instance. ## Attribute reference - In addition to all argument reference list, you can access the following attribute references after your data source is created. - `console_languages` - (List) List of all the Console Languages. diff --git a/website/docs/d/pi_instance_ip.html.markdown b/website/docs/d/pi_instance_ip.html.markdown index 541891d09c..8f1942cc83 100644 --- a/website/docs/d/pi_instance_ip.html.markdown +++ b/website/docs/d/pi_instance_ip.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_instance_ip" @@ -11,7 +10,6 @@ description: |- Retrieve information about a Power Systems Virtual Server instance IP address. For more information, about Power Systems Virtual Server instance IP address, see [configuring and adding a private network subnet](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-configuring-subnet). ## Example usage - ```terraform data "ibm_pi_instance_ip" "ds_instance_ip" { pi_instance_name = "terraform-test-instance" @@ -19,12 +17,14 @@ data "ibm_pi_instance_ip" "ds_instance_ip" { pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` + **Notes** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - Example usage: +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: ```terraform provider "ibm" { region = "lon" @@ -36,7 +36,7 @@ data "ibm_pi_instance_ip" "ds_instance_ip" { Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. -- `pi_instance_name` - (Required, String) The name of the instance. +- `pi_instance_name` - (Required, String) The unique identifier or name of the instance. - `pi_network_name` - (Required, String) The subnet that the instance belongs to. diff --git a/website/docs/d/pi_instance_snapshots.html.markdown b/website/docs/d/pi_instance_snapshots.html.markdown index cbe97e86f3..7d39c0d0d5 100644 --- a/website/docs/d/pi_instance_snapshots.html.markdown +++ b/website/docs/d/pi_instance_snapshots.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_instance_snapshots" @@ -11,21 +10,19 @@ description: |- Retrieve information about a Power Systems Virtual Server instance snapshots. For more information, about Power Virtual Server instance snapshots, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_instance_snapshots" "ds_instance_snapshots" { pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` -**Note** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` - Example usage: - +Example usage: ```terraform provider "ibm" { region = "lon" @@ -41,15 +38,15 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `instance_snapshots` - The list of Power Virtual Machine instance snapshots within the given cloud instance. +- `instance_snapshots` - (List) List of Power Virtual Machine instance snapshots within the given cloud instance. Nested scheme for `instance_snapshots`: - `action` - (String) Action performed on the instance snapshot. - - `creation_date` - (String) The creation date. + - `creation_date` - (String) Date of snapshot creation. - `description` - (String) The description of the snapshot. - `id` - (String) The unique identifier of the Power Systems Virtual Machine instance snapshot. - - `last_updated_date` - (String) The last Update Date. + - `last_updated_date` - (String) Date of last update. - `name` - (String) The name of the Power Systems Virtual Machine instance snapshot. - `percent_complete` - (Integer) The snapshot completion percentage. - `status` - (String) The status of the Power Virtual Machine instance snapshot. - - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. \ No newline at end of file + - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. diff --git a/website/docs/d/pi_instance_volumes.html.markdown b/website/docs/d/pi_instance_volumes.html.markdown index 1fc2583b2b..6c17530538 100644 --- a/website/docs/d/pi_instance_volumes.html.markdown +++ b/website/docs/d/pi_instance_volumes.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_instance_volumes" @@ -8,26 +7,25 @@ description: |- --- # ibm_pi_instance_volumes -Retrieves information about a persistent storage volume that is mounted to a Power Systems Virtual Server instance. For more information, about power instance volume, see [snapshotting, cloning, and restoring](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-volume-snapshot-clone). +Retrieves information about the persistent storage volumes that are mounted to a Power Systems Virtual Server instance. For more information, about power instance volume, see [snapshotting, cloning, and restoring](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-volume-snapshot-clone). ## Example usage -The following example retrieves information about the `volume_1` volume that is mounted to the Power Systems Virtual Server instance with the ID. +The following example retrieves information about the volumes attached to the `terraform-test-instance` instance. ```terraform data "ibm_pi_instance_volumes" "ds_volumes" { - pi_instance_name = "volume_1" + pi_instance_name = "terraform-test-instance" pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` **Notes** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - - Example usage: +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` +Example usage: ```terraform provider "ibm" { region = "lon" @@ -39,21 +37,21 @@ data "ibm_pi_instance_volumes" "ds_volumes" { Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. -- `pi_volume_name` - (Required, String) The name of the volume for which you want to retrieve detailed information. +- `pi_instance_name` - (Required, String) The unique identifier or name of the instance. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. - `boot_volume_id` - (String) The unique identifier of the boot volume. -- `instance_volumes` - List of volumes - List of volumes attached to instance. +- `instance_volumes` - (List) List of volumes attached to instance. Nested scheme for `instance_volumes`: - - `bootable`- (Bool) Indicates if the volume is boot capable. + - `bootable`- (Boolean) Indicates if the volume is boot capable. - `href` - (String) The hyper link of the volume. - `id` - (String) The unique identifier of the volume. - `name` - (String) The name of the volume. - `pool` - (String) Volume pool, name of storage pool where the volume is located. - - `shareable` - (Bool) Indicates if the volume is shareable between VMs. + - `shareable` - (Boolean) Indicates if the volume is shareable between VMs. - `size` - (Integer) The size of this volume in gigabytes. - `state` - (String) The state of the volume. - `type` - (String) The disk type that is used for this volume. diff --git a/website/docs/d/pi_key.html.markdown b/website/docs/d/pi_key.html.markdown index 19b824e44c..69eda7ca5b 100644 --- a/website/docs/d/pi_key.html.markdown +++ b/website/docs/d/pi_key.html.markdown @@ -1,28 +1,40 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_key" description: |- - Manages an key in the Power Virtual Server cloud. + Manages an SSH key in the Power Virtual Server cloud. --- # ibm_pi_key -Retrieve information about the SSH key that is used for your Power Systems Virtual Server instance. The SSH key is used to access the instance after it is created. For more information, about [configuring your IBM virtual machine (VM)](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-ssh-key). +Retrieve information about the SSH key that is used for your Power Systems Virtual Server instance. The SSH key is used to access the instance after it is created. For more information, about [generating and using SSH Keys](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-ssh-key). ## Example usage - ```terraform data "ibm_pi_key" "ds_instance" { pi_key_name = "terraform-test-key" pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` - + +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + ## Argument reference Review the argument references that you can specify for your data source. -- `pi_cloud_instance_id` - (Required, String) Cloud Instance ID of a PCloud Instance. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. - `pi_key_name` - (Required, String) User defined name for the SSH key. ## Attribute reference @@ -31,19 +43,3 @@ In addition to all argument reference list, you can access the following attribu - `id` - (String) User defined name for the SSH key - `creation_date` - (String) Date of SSH Key creation. - `ssh_key` - (String) SSH RSA key. - -**Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - -Example usage: - - ```terraform - provider "ibm" { - region = "lon" - zone = "lon04" - } - ``` \ No newline at end of file diff --git a/website/docs/d/pi_keys.html.markdown b/website/docs/d/pi_keys.html.markdown index 305c0f4a9d..dd5dac4d13 100644 --- a/website/docs/d/pi_keys.html.markdown +++ b/website/docs/d/pi_keys.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_keys" @@ -8,20 +7,33 @@ description: |- --- # ibm_pi_keys -Retrieve information about all SSH keys. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). +Retrieve information about all SSH keys. For more information, about [generating and using SSH Keys](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-ssh-key). ## Example usage - ```terraform data "ibm_pi_keys" "example" { pi_cloud_instance_id = "" } ``` - + +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + ## Argument reference Review the argument references that you can specify for your data source. -- `pi_cloud_instance_id` - (Required, String) Cloud Instance ID of a PCloud Instance. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. @@ -32,19 +44,3 @@ In addition to all argument reference list, you can access the following attribu - `name` - (String) User defined name for the SSH key - `creation_date` - (String) Date of SSH Key creation. - `ssh_key` - (String) SSH RSA key. - -**Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - -Example usage: - - ```terraform - provider "ibm" { - region = "lon" - zone = "lon04" - } - ``` \ No newline at end of file diff --git a/website/docs/d/pi_network.html.markdown b/website/docs/d/pi_network.html.markdown index c4f03cb824..30635fd315 100644 --- a/website/docs/d/pi_network.html.markdown +++ b/website/docs/d/pi_network.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_network" @@ -11,7 +10,6 @@ description: |- Retrieve information about the network that your Power Systems Virtual Server instance is connected to. For more information, about power virtual server instance network, see [setting up an IBM network install server](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-configuring-subnet). ## Example usage - ```terraform data "ibm_pi_network" "ds_network" { pi_network_name = "APP" @@ -19,15 +17,13 @@ data "ibm_pi_network" "ds_network" { } ``` -**Note** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` - Example usage: - +Example usage: ```terraform provider "ibm" { region = "lon" @@ -44,15 +40,15 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. +- `access_config` - (String) The network communication configuration option of the network (for satellite locations only). - `available_ip_count` - (Float) The total number of IP addresses that you have in your network. - `cidr` - (String) The CIDR of the network. -- `dns`- (Set of String) The DNS Servers for the network. +- `dns`- (Set) The DNS Servers for the network. - `gateway` - (String) The network gateway that is attached to your network. - `id` - (String) The ID of the network. +- `jumbo` - (Deprecated, Boolean) MTU Jumbo option of the network (for multi-zone locations only). +- `mtu` - (Boolean) Maximum Transmission Unit option of the network. - `type` - (String) The type of network. - `used_ip_count` - (Float) The number of used IP addresses. - `used_ip_percent` - (Float) The percentage of IP addresses used. - `vlan_id` - (String) The VLAN ID that the network is connected to. -- `jumbo` - (Bool) MTU Jumbo option of the network (for multi-zone locations only). `deprecated` -- `mtu` - (Bool) Maximum Transmission Unit option of the network. -- `access_config` - (String) The network communication configuration option of the network (for satellite locations only). \ No newline at end of file diff --git a/website/docs/d/pi_network_port.html.markdown b/website/docs/d/pi_network_port.html.markdown new file mode 100644 index 0000000000..9d46bee2d0 --- /dev/null +++ b/website/docs/d/pi_network_port.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "Power Systems" +layout: "ibm" +page_title: "IBM: pi_network_port" +description: |- + Manages an Network Port in the Power Virtual Server Cloud. +--- + +# ibm_pi_network_port +Retrieve information about a network port in the Power Virtual Server Cloud. For more information, about networks in IBM power virtual server, see [adding or removing a public network](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-modifying-server#adding-removing-network). + +## Example usage +```terraform +data "ibm_pi_network_port" "test-network-port" { + pi_network_name = "Zone1-CFN" + pi_cloud_instance_id = "51e1879c-bcbe-4ee1-a008-49cdba0eaf60" +} +``` + +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` + +Example usage: + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument reference +Review the argument references that you can specify for your data source. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_network_name` - (Required, String) The unique identifier or name of a network. + +## Attribute reference +In addition to all argument reference list, you can access the following attribute reference after your data source is created. + +- `network_ports` - (List) List of all in use network ports for a network. + + Nested scheme for `network_ports`: + - `description` - (String) The description for the network port. + - `href` - (String) Network port href. + - `ipaddress` - (String) The IP address of the port. + - `macaddress` - (String) The MAC address of the port. + - `portid` - (String) The ID of the port. + - `public_ip`- (String) The public IP associated with the port. + - `status` - (String) The status of the port. diff --git a/website/docs/d/pi_placement_group.html.markdown b/website/docs/d/pi_placement_group.html.markdown index d90d4b83f9..0c6aebd4d4 100644 --- a/website/docs/d/pi_placement_group.html.markdown +++ b/website/docs/d/pi_placement_group.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_placement_group" @@ -11,7 +10,6 @@ description: |- Retrieve information about a placement group. For more information, about placement groups, see [Managing server placement groups](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-placement-groups). ## Example Usage - ```terraform data "ibm_pi_placement_group" "ds_placement_group" { pi_placement_group_name = "my-pg" @@ -20,20 +18,18 @@ data "ibm_pi_placement_group" "ds_placement_group" { ``` **Notes** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - - Example usage: +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` +Example usage: ```terraform provider "ibm" { region = "lon" zone = "lon04" } ``` - ## Argument reference Review the argument references that you can specify for your data source. @@ -45,5 +41,5 @@ Review the argument references that you can specify for your data source. In addition to all argument reference list, you can access the following attribute references after your data source is created. - `id` - (String) The ID of the placement group. -- `members` - (List of strings) The list of server instances IDs that are members of the placement group. +- `members` - (List) List of server instances IDs that are members of the placement group. - `policy` - (String) The value of the group's affinity policy. Valid values are affinity and anti-affinity. diff --git a/website/docs/d/pi_placement_groups.html.markdown b/website/docs/d/pi_placement_groups.html.markdown index c4ef5d7946..a1e11659bf 100644 --- a/website/docs/d/pi_placement_groups.html.markdown +++ b/website/docs/d/pi_placement_groups.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_placement_groups" @@ -11,7 +10,6 @@ description: |- Retrieve information about all placement groups. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_placement_groups" "example" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_placement_groups" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -46,6 +42,6 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `placement_groups`: - `id` - (String) The ID of the placement group. + - `members` - (List) List of server instances IDs that are members of the placement group. - `name` - (String) User defined name for the placement group. - - `members` - (List of strings) The list of server instances IDs that are members of the placement group. - `policy` - (String) The value of the group's affinity policy. Valid values are affinity and anti-affinity. diff --git a/website/docs/d/pi_public_network.html.markdown b/website/docs/d/pi_public_network.html.markdown index 9c00c9d0a3..097d94ce6a 100644 --- a/website/docs/d/pi_public_network.html.markdown +++ b/website/docs/d/pi_public_network.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_public_network" @@ -8,32 +7,29 @@ description: |- --- # ibm_pi_public_network -Retrieve the details about a public network that is used for your Power Systems Virtual Server instance. For more information, about public network in IBM power virutal server, see [adding or removing a public network +Retrieve the details about a public network that is used for your Power Systems Virtual Server instance. For more information, about public network in IBM Power Systems Virtual Server, see [adding or removing a public network ](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-modifying-server#adding-removing-network). ## Example usage - ```terraform data "ibm_pi_public_network" "ds_public_network" { pi_cloud_instance_id = "49fba6c9-23f8-40bc-9899-aca322ee7d5b" } ``` -**Note** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - - Example usage: +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` +Example usage: ```terraform provider "ibm" { region = "lon" zone = "lon04" } ``` - ## Argument reference Review the argument references that you can specify for your data source. diff --git a/website/docs/d/pi_pvm_snapshots.html.markdown b/website/docs/d/pi_pvm_snapshots.html.markdown index 7a2eeaf5e5..3a5fa6571a 100644 --- a/website/docs/d/pi_pvm_snapshots.html.markdown +++ b/website/docs/d/pi_pvm_snapshots.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_pvm_snapshots" @@ -11,7 +10,6 @@ description: |- Retrieve information about a Power Systems Virtual Server instance snapshots. For more information, about Power Virtual Server PVM instance snapshots, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_pvm_snapshots" "ds_pvm_snapshots" { pi_instance_name = "terraform-test-instance" @@ -19,14 +17,13 @@ data "ibm_pi_pvm_snapshots" "ds_pvm_snapshots" { } ``` -**Note** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +**Notes** +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` - Example usage: - +Example usage: ```terraform provider "ibm" { region = "lon" @@ -37,8 +34,8 @@ data "ibm_pi_pvm_snapshots" "ds_pvm_snapshots" { ## Argument reference Review the argument references that you can specify for your data source. -- `pi_instance_name` - (Required, String) The name of the instance. - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_instance_name` - (Required, String) The unique identifier or name of the instance. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. @@ -47,11 +44,11 @@ In addition to all argument reference list, you can access the following attribu Nested scheme for `pvm_snapshots`: - `action` - (String) Action performed on the instance snapshot. - - `creation_date` - (String) The creation date. + - `creation_date` - (String) Date of snapshot creation. - `description` - (String) The description of the snapshot. - `id` - (String) The unique identifier of the Power Virtual Machine instance snapshot. - - `last_updated_date` - (String) The last update date. + - `last_updated_date` - (String) Date of last update. - `name` - (String) The name of the Power Virtual Machine instance snapshot. - `percent_complete` - (Integer) The snapshot completion percentage. - `status` - (String) The status of the Power Virtual Machine instance snapshot. - - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. \ No newline at end of file + - `volume_snapshots` - (Map) A map of volume snapshots included in the Power Virtual Machine instance snapshot. diff --git a/website/docs/d/pi_sap_profile.html.markdown b/website/docs/d/pi_sap_profile.html.markdown index 2be43b8874..32717c17f7 100644 --- a/website/docs/d/pi_sap_profile.html.markdown +++ b/website/docs/d/pi_sap_profile.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_sap_profile" @@ -11,7 +10,6 @@ description: |- Retrieve information about a SAP profile. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_sap_profile" "example" { pi_cloud_instance_id = "" @@ -20,14 +18,12 @@ data "ibm_pi_sap_profile" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" diff --git a/website/docs/d/pi_sap_profiles.html.markdown b/website/docs/d/pi_sap_profiles.html.markdown index 4106f1fa84..630242ee68 100644 --- a/website/docs/d/pi_sap_profiles.html.markdown +++ b/website/docs/d/pi_sap_profiles.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_sap_profiles" @@ -11,7 +10,6 @@ description: |- Retrieve information about all SAP profiles. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_sap_profiles" "example" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_sap_profiles" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" diff --git a/website/docs/d/pi_shared_processor_pool.html.markdown b/website/docs/d/pi_shared_processor_pool.html.markdown index b47e607be6..7918b46199 100644 --- a/website/docs/d/pi_shared_processor_pool.html.markdown +++ b/website/docs/d/pi_shared_processor_pool.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_shared_processor_pool" @@ -11,7 +10,6 @@ description: |- Retrieve information about a shared processor pool. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example Usage - ```terraform data "ibm_pi_shared_processor_pool" "ds_pool" { pi_shared_processor_pool_id = "my-spp" @@ -20,20 +18,18 @@ data "ibm_pi_shared_processor_pool" "ds_pool" { ``` **Notes** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - - Example usage: +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` +Example usage: ```terraform provider "ibm" { region = "lon" zone = "lon04" } - ``` - + ``` ## Argument reference Review the argument references that you can specify for your data source. @@ -48,7 +44,8 @@ In addition to all argument reference list, you can access the following attribu - `available_cores` - (Integer) The available cores in the shared processor pool. - `host_id` - (Integer) The host ID where the shared processor pool resides. - `id` - (String) The shared processor pool's unique ID. -- `instances` - (List of Map) The list of server instances that are deployed in the shared processor pool. +- `instances` - (List) List of server instances deployed in the shared processor pool. + Nested scheme for `instances`: - `availability_zone` - (String) Availability zone for the server instances. - `cpus` - (Integer) The amount of cpus for the server instance. diff --git a/website/docs/d/pi_shared_processor_pools.html.markdown b/website/docs/d/pi_shared_processor_pools.html.markdown index dd9ad6d97e..3586e0437c 100644 --- a/website/docs/d/pi_shared_processor_pools.html.markdown +++ b/website/docs/d/pi_shared_processor_pools.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_shared_processor_pools" @@ -11,7 +10,6 @@ description: |- Retrieve information about all shared processor pools. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_shared_processor_pools" "example" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_shared_processor_pools" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -52,4 +48,4 @@ In addition to all argument reference list, you can access the following attribu - `reserved_cores` - (Integer) The amount of reserved cores for the shared processor pool. - `shared_processor_pool_id` - (String) The shared processor pool's unique ID. - `status` - (String) The status of the shared processor pool. - - `status_detail` - (String) The status details of the shared processor pool. \ No newline at end of file + - `status_detail` - (String) The status details of the shared processor pool. diff --git a/website/docs/d/pi_spp_placement_group.html.markdown b/website/docs/d/pi_spp_placement_group.html.markdown index 79af4c068e..b725105481 100644 --- a/website/docs/d/pi_spp_placement_group.html.markdown +++ b/website/docs/d/pi_spp_placement_group.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_spp_placement_group" @@ -11,7 +10,6 @@ description: |- Retrieve information about a shared processor pool placement group. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example Usage - ```terraform data "ibm_pi_spp_placement_group" "ds_placement_group" { pi_spp_placement_group_id = "my-spppg" @@ -20,20 +18,18 @@ data "ibm_pi_spp_placement_group" "ds_placement_group" { ``` **Notes** -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` - - Example usage: +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` +Example usage: ```terraform provider "ibm" { region = "lon" zone = "lon04" } ``` - ## Argument reference Review the argument references that you can specify for your data source. @@ -44,6 +40,6 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `members` - (List of strings) The list of shared processor pool IDs that are members of the placement group. +- `members` - (List) List of shared processor pool IDs that are members of the placement group. - `name` - (String) The name of the shared processor pool placement group. - `policy` - (String) The value of the group's affinity policy. Valid values are affinity and anti-affinity. diff --git a/website/docs/d/pi_spp_placement_groups.html.markdown b/website/docs/d/pi_spp_placement_groups.html.markdown index 61267fefb5..cc1503dd9f 100644 --- a/website/docs/d/pi_spp_placement_groups.html.markdown +++ b/website/docs/d/pi_spp_placement_groups.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_spp_placement_groups" @@ -11,7 +10,6 @@ description: |- Retrieve information about all shared processor pool placement groups. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_spp_placement_groups" "example" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_spp_placement_groups" "example" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -45,7 +41,7 @@ In addition to all argument reference list, you can access the following attribu - `spp_placement_groups` - (List) List of all the shared processor pool placement groups. Nested scheme for `spp_placement_groups`: + - `members` - (List) The list of shared processor pool IDs that are members of the shared processor pool placement group. - `name` - (String) User defined name for the shared processor pool placement group. - - `members` - (List of strings) The list of shared processor pool IDs that are members of the shared processor pool placement group. - `policy` - (String) The value of the group's affinity policy. Valid values are affinity and anti-affinity. - `spp_placement_group_id` - (String) The ID of the shared processor pool placement group. diff --git a/website/docs/d/pi_storage_pool_capacity.markdown b/website/docs/d/pi_storage_pool_capacity.markdown index a38e01eb79..caa5550c16 100644 --- a/website/docs/d/pi_storage_pool_capacity.markdown +++ b/website/docs/d/pi_storage_pool_capacity.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_storage_pool_capacity" @@ -11,7 +10,6 @@ description: |- Retrieve information about storages capacity for a storage pool in a region. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_storage_pool_capacity" "pool" { pi_cloud_instance_id = "" @@ -20,14 +18,12 @@ data "ibm_pi_storage_pool_capacity" "pool" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -45,6 +41,7 @@ Review the argument references that you can specify for your data source. In addition to all argument reference list, you can access the following attribute references after your data source is created. - `max_allocation_size` - (Integer) Maximum allocation storage size (GB). +- `replication_enabled` - (Boolean) Replication status of the storage pool. - `storage_type` - (String) Storage type of the storage pool. - `total_capacity` - (Integer) Total pool capacity (GB). -- `replication_enabled` - (Boolean) Replication status of the storage pool. + diff --git a/website/docs/d/pi_storage_pools_capacity.markdown b/website/docs/d/pi_storage_pools_capacity.markdown index f85cecb0d3..0a20832207 100644 --- a/website/docs/d/pi_storage_pools_capacity.markdown +++ b/website/docs/d/pi_storage_pools_capacity.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_storage_pools_capacity" @@ -11,7 +10,6 @@ description: |- Retrieve information about storages capacity for all available storage pools in a region. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_storage_pools_capacity" "pools" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_storage_pools_capacity" "pools" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" diff --git a/website/docs/d/pi_system_pools.markdown b/website/docs/d/pi_system_pools.markdown index 28ce84b76d..09cb154c97 100644 --- a/website/docs/d/pi_system_pools.markdown +++ b/website/docs/d/pi_system_pools.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_system_pools" @@ -11,7 +10,6 @@ description: |- Retrieve information about list of system pools within a particular data center. For more information, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). ## Example usage - ```terraform data "ibm_pi_system_pools" "pools" { pi_cloud_instance_id = "" @@ -19,14 +17,12 @@ data "ibm_pi_system_pools" "pools" { ``` **Notes** - -* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. -* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - * `region` - `lon` - * `zone` - `lon04` +- Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +- If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + - `region` - `lon` + - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -42,47 +38,46 @@ Review the argument references that you can specify for your data source. ## Attribute reference In addition to all argument reference list, you can access the following attribute references after your data source is created. -- `system_pools` - (List) The available system pools within a particular DataCenter. +- `system_pools` - (List) List of available system pools within a particular Datacenter. Nested scheme for `system_pools`: - - `system_pool_name` - (String) The system pool name. - `capacity` - (Map) Advertised capacity cores and memory (GB). - Nested scheme for `capacity`: - - `cores` - (String) The host available Processor units. - - `memory`- (String) The host available RAM memory in GiB. + Nested scheme for `capacity`: + - `cores` - (String) The host available Processor units. + - `memory`- (String) The host available RAM memory in GiB. - `core_memory_ratio` - (Float) Processor to Memory (GB) Ratio. - `max_available` - (Map) Maximum configurable cores and memory (GB) (aggregated from all hosts). - Nested scheme for `max_available`: - - `cores` - (String) The host available Processor units. - - `memory`- (String) The host available RAM memory in GiB. + Nested scheme for `max_available`: + - `cores` - (String) The host available Processor units. + - `memory`- (String) The host available RAM memory in GiB. - `max_cores_available` - (Map) Maximum configurable cores available combined with available memory of that host. - Nested scheme for `max_cores_available`: - - `cores` - (String) The host available Processor units. - - `memory`- (String) The host available RAM memory in GiB. + Nested scheme for `max_cores_available`: + - `cores` - (String) The host available Processor units. + - `memory`- (String) The host available RAM memory in GiB. - `max_memory_available` - (Map) Maximum configurable memory available combined with available cores of that host. - Nested scheme for `max_memory_available`: - - `cores` - (String) The host available Processor units. - - `memory`- (String) The host available RAM memory in GiB. + Nested scheme for `max_memory_available`: + - `cores` - (String) The host available Processor units. + - `memory`- (String) The host available RAM memory in GiB. - `shared_core_ratio` - (Map) The min-max-default allocation percentage of shared core per vCPU. - Nested scheme for `shared_core_ratio`: - - `default` - (String) The default value. - - `max` - (String) The max value. - - `min`- (String) The min value. - - - `systems` - (List) The DataCenter list of servers and their available resources. + Nested scheme for `shared_core_ratio`: + - `default` - (String) The default value. + - `max` - (String) The max value. + - `min`- (String) The min value. + - `system_pool_name` - (String) The system pool name. + - `systems` - (List) The Datacenter list of servers and their available resources. - Nested scheme for `systems`: - - `cores` - (String) The host available Processor units. - - `id` - (String) The host identifier. - - `memory`- (String) The host available RAM memory in GiB. + Nested scheme for `systems`: + - `cores` - (String) The host available Processor units. + - `id` - (String) The host identifier. + - `memory`- (String) The host available RAM memory in GiB. - `type` - (String) Type of system hardware. diff --git a/website/docs/d/pi_volume.html.markdown b/website/docs/d/pi_volume.html.markdown index 1147a6d164..4d4ef82032 100644 --- a/website/docs/d/pi_volume.html.markdown +++ b/website/docs/d/pi_volume.html.markdown @@ -60,3 +60,4 @@ In addition to all argument reference list, you can access the following attribu - `state` - (String) The state of the volume. - `volume_pool` - (String) Volume pool, name of storage pool where the volume is located. - `wwn` - (String) The world wide name of the volume. +- `io_throttle_rate` -(String) Amount of iops assigned to the volume. diff --git a/website/docs/d/pi_volume_clone.html.markdown b/website/docs/d/pi_volume_clone.html.markdown new file mode 100644 index 0000000000..995cac79ac --- /dev/null +++ b/website/docs/d/pi_volume_clone.html.markdown @@ -0,0 +1,55 @@ +--- + +subcategory: "Power Systems" +layout: "ibm" +page_title: "IBM: pi_volume_clone" +description: |- + Manages IBM Volume Clone in the Power Virtual Server cloud. +--- + +# ibm_pi_volume_clone +Retrieves information about a volume clone. For more information, about managing volume clone, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started). + +## Example usage +The following example retrieves information about the volume clone task that is present in Power Systems Virtual Server. + +```terraform +data "ibm_pi_volume_clone" "ds_volume_clone" { + pi_cloud_instance_id = "" + pi_volume_clone_task_id = "" +} +``` + +**Note** +* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. +* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: + * `region` - `lon` + * `zone` - `lon04` + + Example usage: + + ```terraform + provider "ibm" { + region = "lon" + zone = "lon04" + } + ``` + +## Argument reference +Review the argument references that you can specify for your resource. + +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. +- `pi_volume_clone_task_id` - (Required, String) The ID of the volume clone task. + +## Attribute reference +In addition to all argument reference list, you can access the following attribute reference after your resource is created. + +- `cloned_volumes` - (List of objects) The List of cloned volumes. + + Nested scheme for `cloned_volumes`: + - `clone_volume_id` - (String) The ID of the newly cloned volume. + - `source_volume_id` - (String) The ID of the source volume. +- `failure_reason` - (String) The reason for the failure of the clone volume task. +- `id` - (String) The unique identifier of the volume clone task. +- `percent_complete` - (Integer) The completion percentage of the volume clone task. +- `status` - (String) The status of the volume clone task. diff --git a/website/docs/d/pi_workspace.html.markdown b/website/docs/d/pi_workspace.html.markdown index 1b3195f016..6134e2a18c 100644 --- a/website/docs/d/pi_workspace.html.markdown +++ b/website/docs/d/pi_workspace.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_workspace" @@ -8,26 +7,22 @@ description: |- --- # ibm_pi_workspace - Retrieve information about your Power Systems account workspace. ## Example usage - ```terraform data "ibm_pi_workspace" "workspace" { pi_cloud_instance_id = "99fba9c9-66f9-99bc-9999-aca999ee9d9b" } ``` -## Notes - +**Notes** - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -36,13 +31,11 @@ Example usage: ``` ## Argument reference - Review the argument references that you can specify for your data source. - `pi_cloud_instance_id` - (Required, String) Cloud Instance ID of a PCloud Instance under your account. ## Attribute reference - In addition to all argument reference listed, you can access the following attribute references after your data source is created. - `id` - (String) Workspace ID. @@ -54,14 +47,14 @@ In addition to all argument reference listed, you can access the following attri - `pi_workspace_details` - (Map) Workspace information. Nested schema for `pi_workspace_details`: - - `creation_date` - (String) Workspace creation date. + - `creation_date` - (String) Date of workspace creation. - `crn` - (String) Workspace crn. - `pi_workspace_location` - (Map) Workspace location. Nested schema for `Workspace location`: - - `region` - (String) The Workspace location region zone. - - `type` - (String) The Workspace location region type. - - `url`- (String) The Workspace location region url. -- `pi_workspace_name` - (String) The Workspace name. -- `pi_workspace_status` - (String) The Workspace status, `active`, `critical`, `failed`, `provisioning`. -- `pi_workspace_type` - (String) The Workspace type, `off-premises` or `on-premises`. + - `region` - (String) Workspace location region zone. + - `type` - (String) Workspace location region type. + - `url`- (String) Workspace location region url. +- `pi_workspace_name` - (String) Workspace name. +- `pi_workspace_status` - (String) Workspace status, `active`, `critical`, `failed`, `provisioning`. +- `pi_workspace_type` - (String) Workspace type, `off-premises` or `on-premises`. diff --git a/website/docs/d/pi_workspaces.html.markdown b/website/docs/d/pi_workspaces.html.markdown index 12ca699246..0c5f6e5af2 100644 --- a/website/docs/d/pi_workspaces.html.markdown +++ b/website/docs/d/pi_workspaces.html.markdown @@ -1,5 +1,4 @@ --- - subcategory: "Power Systems" layout: "ibm" page_title: "IBM: pi_workspaces" @@ -8,26 +7,22 @@ description: |- --- # ibm_pi_workspaces - Retrieve information about Power Systems workspaces. ## Example usage - ```terraform data "ibm_pi_workspaces" "workspaces" { pi_cloud_instance_id = "99fba9c9-66f9-99bc-9999-aca999ee9d9b" } ``` -## Notes - +**Notes** - Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints. - If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows: - `region` - `lon` - `zone` - `lon04` Example usage: - ```terraform provider "ibm" { region = "lon" @@ -36,34 +31,32 @@ Example usage: ``` ## Argument reference - Review the argument references that you can specify for your data source. -- `pi_cloud_instance_id` - (Required, String) Cloud Instance ID of a PCloud Instance. +- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account. ## Attribute reference - In addition to all argument reference listed, you can access the following attribute references after your data source is created. -- `workspaces` - List of all Workspaces. +- `workspaces` - (List) List of all Workspaces. Nested schema for `workspaces` - `pi_workspace_capabilities` - (Map) Workspace Capabilities. Capabilities are `true` or `false`. - Some of `pi_workspace_capabilities` are: - - `cloud-connections` ,`power-edge-router`, `power-vpn-connections`, `transit-gateway-connection` + Some of `pi_workspace_capabilities` are: + - `cloud-connections`, `power-edge-router`, `power-vpn-connections`, `transit-gateway-connection` - `pi_workspace_details` - (Map) Workspace information. - Nested schema for `pi_workspace_details`: - - `creation_date` - (String) Workspace creation date. - - `crn` - (String) Workspace crn. + Nested schema for `pi_workspace_details`: + - `creation_date` - (String) Date of workspace creation. + - `crn` - (String) Workspace crn. - `pi_workspace_id` - (String) Workspace ID. - `pi_workspace_location` - (Map) Workspace location. - Nested schema for `Workspace location`: - - `region` - (String) The Workspace location region zone. - - `type` - (String) The Workspace location region type. - - `url`- (String) The Workspace location region url. - - `pi_workspace_name` - (String) The Workspace name. - - `pi_workspace_status` - (String) The Workspace status, `active`, `critical`, `failed`, `provisioning`. - - `pi_workspace_type` - (String) The Workspace type, `off-premises` or `on-premises`. + Nested schema for `Workspace location`: + - `region` - (String) Workspace location region zone. + - `type` - (String) Workspace location region type. + - `url`- (String) Workspace location region url. + - `pi_workspace_name` - (String) Workspace name. + - `pi_workspace_status` - (String) Workspace status, `active`, `critical`, `failed`, `provisioning`. + - `pi_workspace_type` - (String) Workspace type, `off-premises` or `on-premises`. diff --git a/website/docs/d/project.html.markdown b/website/docs/d/project.html.markdown index 6cac923733..d8db66d883 100644 --- a/website/docs/d/project.html.markdown +++ b/website/docs/d/project.html.markdown @@ -37,9 +37,11 @@ Nested schema for **configs**: * `definition` - (List) The name and description of a project configuration. Nested schema for **definition**: * `description` - (String) A project configuration description. - * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. + * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `name` - (String) The configuration name. It is unique within the account across projects and regions. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9][a-zA-Z0-9-_ ]*$/`. + * `deployment_model` - (String) The configuration type. + * Constraints: Allowable values are: `project_deployed`, `user_deployed`. * `href` - (String) A URL. * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`. * `id` - (String) The ID of the configuration. If this parameter is empty, an ID is automatically created for the configuration. @@ -58,7 +60,7 @@ Nested schema for **configs**: * `id` - (String) The unique ID. * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`. * `state` - (String) The state of the configuration. - * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`. + * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`, `applied`, `apply_failed`. * `version` - (Integer) The version of the configuration. * `created_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. @@ -83,7 +85,7 @@ Nested schema for **cumulative_needs_attention_view**: * `definition` - (List) The definition of the project. Nested schema for **definition**: * `description` - (String) A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description. - * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. + * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `destroy_on_delete` - (Boolean) The policy that indicates whether the resources are destroyed or not when a project is deleted. * `name` - (String) The name of the project. It is unique within the account across regions. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`. @@ -95,7 +97,7 @@ Nested schema for **environments**: * `definition` - (List) The environment definition used in the project collection. Nested schema for **definition**: * `description` - (String) The description of the environment. - * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. + * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `name` - (String) The name of the environment. It is unique within the account across projects and regions. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`. * `href` - (String) A URL. @@ -118,6 +120,9 @@ Nested schema for **environments**: * `event_notifications_crn` - (String) The CRN of the event notifications instance if one is connected to this project. * Constraints: The maximum length is `512` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^crn:v[0-9](:([A-Za-z0-9\\-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}$/`. +* `href` - (String) A URL. + * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`. + * `location` - (Forces new resource, String) The IBM Cloud location where a resource is deployed. * Constraints: The maximum length is `64` characters. The minimum length is `0` characters. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]*$/`. diff --git a/website/docs/d/project_config.html.markdown b/website/docs/d/project_config.html.markdown index 8a3b1e183b..233f91b9d7 100644 --- a/website/docs/d/project_config.html.markdown +++ b/website/docs/d/project_config.html.markdown @@ -35,7 +35,7 @@ After your data source is created, you can read values from the following attrib * `id` - The unique identifier of the project_config. * `created_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. -* `definition` - (List) The name and description of a project configuration. +* `definition` - (List) Nested schema for **definition**: * `authorizations` - (List) The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager. Nested schema for **authorizations**: @@ -58,7 +58,7 @@ Nested schema for **definition**: * `profile_name` - (String) The name of the compliance profile. * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^`<>\\x00-\\x1F]*$/`. * `description` - (String) A project configuration description. - * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. + * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `environment_id` - (String) The ID of the project environment. * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`. * `inputs` - (Map) The input variables for configuration definition and environment. @@ -66,8 +66,13 @@ Nested schema for **definition**: * Constraints: The maximum length is `512` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[\\.0-9a-z-A-Z_-]+$/`. * `name` - (String) The configuration name. It is unique within the account across projects and regions. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9][a-zA-Z0-9-_ ]*$/`. + * `resource_crns` - (List) The CRNs of resources associated with this configuration. + * Constraints: The list items must match regular expression `/(?!\\s)(?!.*\\s$)^(crn)[^'"`<>{}\\s\\x00-\\x1F]*/`. The maximum length is `110` items. The minimum length is `0` items. * `settings` - (Map) Schematics environment variables to use to deploy the configuration. Settings are only available if they were specified when the configuration was initially created. +* `href` - (String) A URL. + * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`. + * `is_draft` - (Boolean) The flag that indicates whether the version of the configuration is draft, or active. * `last_saved_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. @@ -84,7 +89,7 @@ Nested schema for **outputs**: * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `name` - (String) The variable name. * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$).+$/`. - * `value` - (String) Can be any value - a string, number, boolean, array, or object. + * `value` - (Map) Can be any value - a string, number, boolean, array, or object. * `project` - (List) The project referenced by this resource. Nested schema for **project**: @@ -153,7 +158,7 @@ Nested schema for **schematics**: * Constraints: The maximum length is `512` characters. The minimum length is `4` characters. The value must match regular expression `/(?!\\s)(?!.*\\s$)^(crn)[^'"`<>{}\\s\\x00-\\x1F]*/`. * `state` - (String) The state of the configuration. - * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`. + * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`, `applied`, `apply_failed`. * `update_available` - (Boolean) The flag that indicates whether a configuration update is available. diff --git a/website/docs/d/project_environment.html.markdown b/website/docs/d/project_environment.html.markdown index e7c6fd774c..d06f95d38e 100644 --- a/website/docs/d/project_environment.html.markdown +++ b/website/docs/d/project_environment.html.markdown @@ -58,11 +58,14 @@ Nested schema for **definition**: * `profile_name` - (String) The name of the compliance profile. * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^`<>\\x00-\\x1F]*$/`. * `description` - (String) The description of the environment. - * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. + * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`. * `inputs` - (Map) The input variables for configuration definition and environment. * `name` - (String) The name of the environment. It is unique within the account across projects and regions. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`. +* `href` - (String) A URL. + * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`. + * `modified_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339. * `project` - (List) The project referenced by this resource. diff --git a/website/docs/d/resource_instance.html.markdown b/website/docs/d/resource_instance.html.markdown index c24ecf6848..ba6cbbc1c0 100644 --- a/website/docs/d/resource_instance.html.markdown +++ b/website/docs/d/resource_instance.html.markdown @@ -40,5 +40,6 @@ In addition to all argument reference list, you can access the following attribu - `extensions` - (String) The extended metadata as a map associated with the resource instance. - `guid`- (String) The GUID of the resource instance. - `id` - (String) The unique identifier of the resource instance. +- `parameters_json` - (String) The parameters associated with the instance in json format. - `plan` - (String) The plan for the service offering used by this resource instance. - `status` - (String) The status of resource instance. diff --git a/website/docs/d/schematics_workspace.html.markdown b/website/docs/d/schematics_workspace.html.markdown index 2f21acc368..4aa3a50e45 100644 --- a/website/docs/d/schematics_workspace.html.markdown +++ b/website/docs/d/schematics_workspace.html.markdown @@ -45,11 +45,7 @@ Nested scheme for **catalog_ref**: * `item_url` - (String) The URL to the software template in the IBM Cloud catalog. * `launch_url` - (String) The URL to the dashboard to access your software. * `offering_version` - (String) The version of the software template that you chose to install from the IBM Cloud catalog. - * `service_extensions` - (List) List of service data -Nested scheme for **service_extensions**: - * `name` - (String) Name of the Service Data. - * `value` - (String) Value of the Service Data. - * `type` - (String) Type of the value string, int, bool. + * `service_extensions` - (String) Service extensions defined as string of json * `created_at` - (String) The timestamp when the workspace was created. diff --git a/website/docs/d/secrets_manager_secret.html.markdown b/website/docs/d/secrets_manager_secret.html.markdown index 6f92ce83b9..26cda83ab2 100644 --- a/website/docs/d/secrets_manager_secret.html.markdown +++ b/website/docs/d/secrets_manager_secret.html.markdown @@ -6,63 +6,4 @@ description: |- Get information about secrets manager secret --- -# ibm_secrets_manager_secret (Deprecated) -Retrieve information about the secrets manager secret data sources. For more information, about getting started with secrets manager, see [about secrets manager](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-getting-started). - -## Example usage - -```terraform -data "ibm_secrets_manager_secret" "secrets_manager_secret" { - instance_id = "36401ffc-6280-459a-ba98-456aba10d0c7" - secret_type = "arbitrary" - secret_id = "7dd2022c-5f54-f96d-4c32-87309e887e5" -} -``` - -## Argument reference -Review the argument references that you can specify for your data source. - -- `instance_id` - (Required, String) The secrets manager instance GUID. -- `secret_type` - (Required, String) The secret type. Supported options are `arbitrary`, `iam_credentials`, `username_password`,`imported_cert`,`public_cert`,`private_cert`,`kv`. -- `secret_id` - (Required, String) The v4 UUID that uniquely identifies the secret. -- `endpoint_type` - (Optional, String) The type of the endpoint used to fetch secret. Supported options are `public`, and `private`. The default value is `public`. - -## Attribute reference -In addition to all argument reference list, you can access the following attribute references after your data source is created. - -- `access_groups` - (String) The access groups that define the capabilities of the service ID and API key that are generated for an `iam_credentials` secret. **Tip** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud console. Select the access group to inspect, and click **Details** to view its ID. -- `api_key` - (String) The API key that is generated for this secret.After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. -- `crn` - (String) The Cloud Resource Name (CRN) that uniquely identifies your secrets manager resource. -- `creation_date` - (String) The date the secret was created. The date format follows `RFC 3339`. -- `created_by` - (String) The unique identifier for the entity that created the secret. -- `description` - (String) An extended description of your secret. To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. -- `expiration_date` - (String) The date the secret material expires. The date format follows `RFC 3339` format. You can set an expiration date on supported secret types at their creation. If you create a secret without specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the following secret types `arbitrary`, and `username_password`. -- `id` - (String) The unique identifier of the secrets manager secret. -- `labels` - (String) Labels that you can use to filter for secrets in your instance. Only 30 labels can be created. Labels can be between `2-30` characters, including spaces. Special characters are not permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character. To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. -- `last_update_date` - (String) Updates when the actual secret is modified. The date format follows `RFC 3339`. -- `metadata` - (String) The metadata that describes the resource array. Nested `metadata` blocks have the following structure. - - Nested scheme for `metadata`: - - `collection_type` - (String) The type of resources in the resource array. - - `collection_total` - (String) The number of elements in the resource array. -- `name` - (String) A human readable alias to assign to your secret. To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. -- `next_rotation_date` - (String) The date that the secret is scheduled for automatic rotation. The service automatically creates a new version of the secret on its next rotation date. This field exists only for secrets that can be auto rotated and an existing rotation policy. -- `password` - (String) The password assigned to an `username_password` secret. -- `payload` - (String) The secret data assigned to an `arbitrary` secret. -- `reuse_api_key` - (String) (IAM credentials) Reuse the service ID and API key for future read operations. -- `secret_data` - (String) Map of username, password if secret_type is `username_password` else map of payload if secret_type is `arbitrary`. -- `secret_group_id` - (String) The `v4` UUID that uniquely identifies the secret group to assign to this secret. If you omit this parameter, your secret is assigned to the default secret group. -- `service_id` - (String) The service ID in which the API key (see the `api_key` field) is created. This service ID is added to the access groups that you assign for this secret. -- `state` - (String) The secret state based on `NIST SP 800-57`. States are integers and correspond to the `Pre-activation = 0`, `Active = 1`, `Suspended = 2`, `Deactivated = 3`, and `Destroyed = 5` values. -- `state_description` - (String) A text representation of the secret state. -- `ttl` - (String) The time-to-live (`TTL`) or lease duration to assign to generated credentials. For `iam_credentials` secrets, the `TTL` defines for how long each generated API key remains valid. The value can be either an integer that specifies the number of seconds, or the string representation of a duration, such as 120 minutes or 24 hours. -- `type` - (String) The `MIME` type that represents the secret. -- `username` - (String) The username assigned to an `username_password` secret. -- `versions` - (String) An array that contains metadata for each secret version. Nested `versions` blocks have the following structure. - - Nested scheme for `versions`: - - `id` - (String) The ID of the secret version. - - `creation_date` - (String) The date that the version of the secret was created. - - `created_by` - (String) The unique identifier for the entity that created the secret. - - `auto_rotated` - (String) Indicates whether the version of the secret created by automatic rotation. - +# ibm_secrets_manager_secret (Removed) \ No newline at end of file diff --git a/website/docs/d/secrets_manager_secrets.html.markdown b/website/docs/d/secrets_manager_secrets.html.markdown index 9652a961a5..27899b85fe 100644 --- a/website/docs/d/secrets_manager_secrets.html.markdown +++ b/website/docs/d/secrets_manager_secrets.html.markdown @@ -6,66 +6,5 @@ description: |- Get information about secrets manager secrets. --- -# ibm_secrets_manager_secrets (Deprecated) -Retrieve information about the secrets manager secret data sources. For more information, about getting started with secrets manager, see [about secrets manager](https://cloud.ibm.com/docs/secrets-manager?topic=secrets-manager-getting-started). - -## Example usage - -```terraform -data "ibm_secrets_manager_secrets" "secrets_manager_secrets" { - instance_id = "36401ffc-6280-459a-ba98-456aba10d0c7" -} -``` - -## Argument reference -Review the argument references that you can specify for your data source. - -- `endpoint_type` - (Optional, String) The type of the endpoint used to fetch secret. Supported options are `public`, and `private`. Default is `public`. -- `instance_id` - (Required, String) The secrets manager instance GUID. -- `secret_type` - (Optional, String) The secret type. Supported options are `arbitrary`, `iam_credentials`, `username_password`. - -## Attribute reference -In addition to all argument reference list, you can access the following attribute references after your data source is created. - - -- `id` - (String) The unique identifier of the secrets manager secrets. -- `metadata` - (String) The metadata that describes the resource array. Nested `metadata` blocks have the following structure. - - Nested scheme for `metadata`: - - `collection_type` - (String) The type of resources in the resource array. - - `collection_total` - (String) The number of elements in the resource array. -- `secrets`- (String) A collection of secrets. Nested `secrets` blocks have the following structure. - - Nested scheme for `secrets`: - - `access_groups` - (String) The access groups that define the capabilities of the service ID and API key that are generated for an `iam_credentials` secret. **Tip** To find the ID of an access group, go to **Manage > Access (IAM) > Access groups** in the IBM Cloud console. Select the access group to inspect, and click **Details** to view its ID. - - `api_key` - (String) The API key that is generated for this secret.After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you want to continue to use the same API key for future read operations, see the `reuse_api_key` field. - - `crn` - (String) The Cloud Resource Name (CRN) that uniquely identifies your secrets manager resource. - - `creation_date` - (String) The date the secret was created. The date format follows `RFC 3339`. - - `created_by` - (String) The unique identifier for the entity that created the secret. - - `description` - (String) An extended description of your secret. To protect your privacy, do not use personal data, such as your name or location, as a description for your secret. - - `expiration_date` - (String) The date the secret material expires. The date format follows `RFC 3339` format. You can set an expiration date on supported secret types at their creation. If you create a secret without specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the following secret types `arbitrary`, and `username_password`. - - `labels` - (String) Labels that you can use to filter for secrets in your instance. Only 30 labels can be created. Labels can be between `2-30` characters, including spaces. Special characters are not permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (`- `). To protect your privacy, do not use personal data, such as your name or location, as a label for your secret. - - `last_update_date` - (String) Updates when the actual secret is modified. The date format follows `RFC 3339`. - - `name` - (String) A human readable alias to assign to your secret. To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret. - - `secret_group_id` - (String) The `v4` UUID that uniquely identifies the secret group to assign to this secret. If you omit this parameter, your secret is assigned to the default secret group. - - `secret_id ` - (String) The `v4` UUID that uniquely identifies the secret. - - `state` - (String) The secret state based on `NIST SP 800-57`. States are integers and correspond to the `Pre-activation = 0`, `Active = 1`, `Suspended = 2`, `Deactivated = 3`, and `Destroyed = 5` values. - - `state_description` - (String) A text representation of the secret state. - - `secret_type`- (String) The secret type. - - `type` - (String) The `MIME` type that represents the secret. - - `versions` - (String) An array that contains metadata for each secret version. Nested `versions` blocks have the following structure. - - Nested scheme for `versions`: - - `auto_rotated` - (String) Indicates whether the version of the secret created by automatic rotation. - - `creation_date` - (String) The date that the version of the secret was created. - - `created_by` - (String) The unique identifier for the entity that created the secret. - - `id` - (String) The ID of the secret version. - - `next_rotation_date` - (String) The date that the secret is scheduled for automatic rotation. The service automatically creates a new version of the secret on its next rotation date. This field exists only for secrets that can be auto rotated and an existing rotation policy. - - `payload` - (String) The secret data assigned to an `arbitrary` secret. - - `password` - (String) The password assigned to an `username_password` secret. - - `reuse_api_key` - (String) (IAM credentials) Reuse the service ID and API key for future read operations. - - `secret_data` - (String) Map of username, password if secret_type is `username_password` else map of payload if secret_type is `arbitrary`. - - `service_id` - (String) The service ID in which the API key (see the `api_key` field) is created. This service ID is added to the access groups that you assign for this secret. - - `ttl` - (String) The time-to-live (`TTL`) or lease duration to assign to generated credentials. For `iam_credentials` secrets, the `TTL` defines for how long each generated API key remains valid. The value can be either an integer that specifies the number of seconds, or the string representation of a duration, such as 120 minutes or 24 hours. - - `username` - (String) The username assigned to an `username_password` secret. +# ibm_secrets_manager_secrets (Removed) diff --git a/website/docs/d/sm_iam_credentials_secret.html.markdown b/website/docs/d/sm_iam_credentials_secret.html.markdown index 8355a081f0..79fe4ce3c0 100644 --- a/website/docs/d/sm_iam_credentials_secret.html.markdown +++ b/website/docs/d/sm_iam_credentials_secret.html.markdown @@ -93,7 +93,6 @@ Nested scheme for **rotation**: * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/d/sm_iam_credentials_secret_metadata.html.markdown b/website/docs/d/sm_iam_credentials_secret_metadata.html.markdown index e429c1892f..1db8123221 100644 --- a/website/docs/d/sm_iam_credentials_secret_metadata.html.markdown +++ b/website/docs/d/sm_iam_credentials_secret_metadata.html.markdown @@ -74,7 +74,6 @@ Nested scheme for **rotation**: * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/d/sm_private_certificate.html.markdown b/website/docs/d/sm_private_certificate.html.markdown index 22bc586770..abecd98dc0 100644 --- a/website/docs/d/sm_private_certificate.html.markdown +++ b/website/docs/d/sm_private_certificate.html.markdown @@ -119,7 +119,6 @@ In addition to all argument references listed, you can access the following attr * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/d/sm_private_certificate_metadata.html.markdown b/website/docs/d/sm_private_certificate_metadata.html.markdown index caf491c86c..4d16d2b1a8 100644 --- a/website/docs/d/sm_private_certificate_metadata.html.markdown +++ b/website/docs/d/sm_private_certificate_metadata.html.markdown @@ -90,7 +90,6 @@ Nested scheme for **rotation**: * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/d/sm_username_password_secret.html.markdown b/website/docs/d/sm_username_password_secret.html.markdown index c18a55df9c..64768e067f 100644 --- a/website/docs/d/sm_username_password_secret.html.markdown +++ b/website/docs/d/sm_username_password_secret.html.markdown @@ -88,7 +88,6 @@ Nested scheme for **rotation**: * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/d/sm_username_password_secret_metadata.html.markdown b/website/docs/d/sm_username_password_secret_metadata.html.markdown index e4a10d832b..781f524f1a 100644 --- a/website/docs/d/sm_username_password_secret_metadata.html.markdown +++ b/website/docs/d/sm_username_password_secret_metadata.html.markdown @@ -69,7 +69,6 @@ Nested scheme for **rotation**: * `auto_rotate` - (Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval. * `interval` - (Integer) The length of the secret rotation time interval. * Constraints: The minimum value is `1`. - * `rotate_keys` - (Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate. * `unit` - (String) The units for the secret rotation time interval. * Constraints: Allowable values are: `day`, `month`. diff --git a/website/docs/r/cbr_rule.html.markdown b/website/docs/r/cbr_rule.html.markdown index 82399a036b..e08049b341 100644 --- a/website/docs/r/cbr_rule.html.markdown +++ b/website/docs/r/cbr_rule.html.markdown @@ -101,7 +101,7 @@ resource "ibm_cbr_rule" "cbr_rule" { Review the argument reference that you can specify for your resource. * `contexts` - (Optional, List) The contexts this rule applies to. - * Constraints: The maximum length is `1000` items. The minimum length is `1` item. + * Constraints: The maximum length is `1000` items. The minimum length is `0` items. Nested scheme for **contexts**: * `attributes` - (Required, List) The attributes. * Constraints: The minimum length is `1` item. diff --git a/website/docs/r/cbr_zone.html.markdown b/website/docs/r/cbr_zone.html.markdown index 71a9db498c..1fa3dc850f 100644 --- a/website/docs/r/cbr_zone.html.markdown +++ b/website/docs/r/cbr_zone.html.markdown @@ -47,7 +47,7 @@ Review the argument reference that you can specify for your resource. * `account_id` - (Optional, String) The id of the account owning this zone. * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9\-]+$/`. * `addresses` - (Optional, List) The list of addresses in the zone. - * Constraints: The maximum length is `1000` items. The minimum length is `1` item. + * Constraints: The maximum length is `1000` items. The minimum length is `0` items. Nested scheme for **addresses**: * `ref` - (Optional, List) A service reference value. Nested scheme for **ref**: diff --git a/website/docs/r/cd_toolchain_tool_securitycompliance.html.markdown b/website/docs/r/cd_toolchain_tool_securitycompliance.html.markdown index 47011c756e..45378f9847 100644 --- a/website/docs/r/cd_toolchain_tool_securitycompliance.html.markdown +++ b/website/docs/r/cd_toolchain_tool_securitycompliance.html.markdown @@ -35,7 +35,7 @@ You can specify the following arguments for this resource. Nested schema for **parameters**: * `attachment_id` - (Optional, String) An attachment ID. An attachment is configured under a profile to define how a scan will be run. To find the attachment ID, in the browser, in the attachments list, click on the attachment link, and a panel appears with a button to copy the attachment ID. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * Constraints: The value must match regular expression `/^[-0-9a-f]{32,36}$/`. - * `evidence_namespace` - (Optional, String) The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment pipeline, or `cc` which will use evidence generated by a Continuous Compliance pipeline. + * `evidence_namespace` - (Optional, String) The kind of pipeline evidence to be displayed in Security and Compliance Center for this toolchain. The values are; `cd` which will use evidence generated by a Continuous Deployment (CD) pipeline, or `cc` which will use evidence generated by a Continuous Compliance (CC) pipeline. The default behavior is to use the CD evidence. * Constraints: Allowable values are: `cd`, `cc`. * `evidence_repo_url` - (Required, String) The URL to a Git repository evidence locker. The DevSecOps toolchain templates will collect and store evidence for scans and tasks in an evidence repository. This evidence URL should match the `repo_url` for a Git tool integration in this toolchain. The DevSecOps toolchain goals in the Security and Compliance Center will check the evidence repository for the pass or fail results for those goals. * `instance_crn` - (Optional, String) The Security and Compliance Center service instance CRN (Cloud Resource Name). It is recommended to provide an instance CRN, but when absent, the oldest service instance will be used. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. @@ -44,7 +44,7 @@ Nested schema for **parameters**: * `profile_name` - (Optional, String) The name of a Security and Compliance Center profile. Usually, use the "IBM Cloud Framework for Financial Services" predefined profile, which contains the DevSecOps Toolchain rules. Or use a user-authored customized profile that has been configured to contain those rules. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * `profile_version` - (Optional, String) The version of a Security and Compliance Center profile, in SemVer format, like '0.0.0'. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. * `scc_api_key` - (Optional, String) The IBM Cloud API key used to access the Security and Compliance Center service, for the use profile with attachment setting. This parameter is only relevant when the `use_profile_attachment` parameter is `enabled`. You can use a toolchain secret reference for this parameter. For more information, see [Protecting your sensitive data in Continuous Delivery](https://cloud.ibm.com/docs/ContinuousDelivery?topic=ContinuousDelivery-cd_data_security#cd_secure_credentials). - * `use_profile_attachment` - (Optional, String) Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service. When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`. + * `use_profile_attachment` - (Optional, String) Set to `enabled` to enable use profile with attachment, so that the scripts in the pipeline can interact with the Security and Compliance Center service to perform pre-deploy validation against compliance rules for Continuous Deployment (CD) and compliance monitoring for Continuous Compliance (CC). When enabled, other parameters become relevant; `scc_api_key`, `instance_crn`, `profile_name`, `profile_version`, `attachment_id`. * Constraints: Allowable values are: `disabled`, `enabled`. * `toolchain_id` - (Required, Forces new resource, String) ID of the toolchain to bind the tool to. * Constraints: The maximum length is `36` characters. The minimum length is `36` characters. The value must match regular expression `/^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[89abAB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$/`. diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 9770c96065..d830e4fe38 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -195,13 +195,13 @@ The `ibm_container_alb` provides the following [Timeouts](https://www.terraform. Review the argument references that you can specify for your resource. - `datacenter` - (Required, Forces new resource, String) The datacenter where you want to provision the worker nodes. The zone that you choose must be supported in the region where you want to create the cluster. To find supported zones, run `ibmcloud ks zones` [command line](https://cloud.ibm.com/docs/cli?topic=cloud-cli-getting-started). -- `default_pool_size` - (Optional, Integer) The number of worker nodes that you want to add to the default worker pool. -- `disk_encryption` - (Optional, Forces new resource, Bool) If set to **true**, the worker node disks are set up with an AES 256-bit encryption. If set to **false**, the disk encryption for the worker node is disabled. For more information, see [Encrypted disks for worker node](https://cloud.ibm.com/docs/containers?topic=containers-security#workernodes). +- `default_pool_size` - (Optional, Integer) The number of worker nodes that you want to add to the default worker pool on cluster creation. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. +- `disk_encryption` - (Optional, Bool) If set to **true**, the disks for the workers in the default worker pool are set up with an AES 256-bit encryption, otherwise they are not encrypted. For more information, see [Encrypted disks for worker node](https://cloud.ibm.com/docs/containers?topic=containers-security#workernodes). This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `entitlement` - (Optional, String) If you purchased an IBM Cloud Cloud Pak that includes an entitlement to run worker nodes that are installed with OpenShift Container Platform, enter `entitlement` to create your cluster with that entitlement so that you are not charged twice for the OpenShift license. Note that this option can be set only when you create the cluster. After the cluster is created, the cost for the OpenShift license occurred and you cannot disable this charge. **Note** 1. Set only for the first time creation of the cluster, modification do not have any impacts. 2. Set this argument to `cloud_pak` only if you use this cluster with a Cloud Pak that has an OpenShift entitlement. - `force_delete_storage` - (Optional, Bool) If set to **true**,force the removal of persistent storage associated with the cluster during cluster deletion. Default value is **false**. **NOTE** If `force_delete_storage` parameter is used after provisioning the cluster, then, you need to execute `terraform apply` before `terraform destroy` for `force_delete_storage` parameter to take effect. -- `hardware` - (Optional, Forces new resource, String) The level of hardware isolation for your worker node. Use `dedicated` to have available physical resources dedicated to you only, or `shared` to allow physical resources to be shared with other IBM customers. This option is available for virtual machine worker node flavors only. +- `hardware` - (Optional, String) The level of hardware isolation for worker nodes in the default worker pool. Use `dedicated` to have available physical resources dedicated to you only, or `shared` to allow physical resources to be shared with other IBM customers. This option is available for virtual machine worker node flavors only. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `image_security_enforcement` - (Optional, Bool) Set to **true** to enable image security enforcement policies in a cluster. - `gateway_enabled` - (Optional, Bool) Set to **true** if you want to automatically create a gateway-enabled cluster. If `gateway_enabled` is set to **true**, then `private_service_endpoint` must be set to **true** at the same time. - `kms_config` - (Optional, List) Used to attach a Key Protect instance to a cluster. Nested `kms_config` block has an `instance_id`, `crk_id`, `private_endpoint` and `account_id`. @@ -213,22 +213,22 @@ Review the argument references that you can specify for your resource. - `account_id` - (Optional, String) Account ID of KMS instance holder - if not provided, defaults to the account in use. - `kube_version` - (Optional, String) The Kubernetes or OpenShift version that you want to set up in your cluster. If the version is not specified, the default version in [IBM Cloud Kubernetes Service](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions) or [Red Hat OpenShift on IBM Cloud](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#version_types) is used. For example, to specify Kubernetes version 1.16, enter `1.16`. For OpenShift clusters, you can specify version `3.11_openshift` or `4.3.1_openshift`. - `labels`- (Optional, Map) Labels on all the workers in the default worker pool. -- `machine_type` - (Optional, Forces new resource, String) The machine type for your worker node. The machine type determines the amount of memory, CPU, and disk space that is available to the worker node. For an overview of supported machine types, see [Planning your worker node setup](https://cloud.ibm.com/docs/containers?topic=containers-planning_worker_nodes). You can retrieve the value by executing the `ibmcloud ks machine-types ` command in the IBM Cloud CLI. +- `machine_type` - (Optional, String) The machine type for the worker nodes in the default worker pool. The machine type determines the amount of memory, CPU, and disk space that is available to the worker node. For an overview of supported machine types, see [Planning your worker node setup](https://cloud.ibm.com/docs/containers?topic=containers-planning_worker_nodes). You can retrieve the value by executing the `ibmcloud ks flavor ls --zone ` command in the IBM Cloud CLI. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `name` - (Required, Forces new resource, String) The name of the cluster. The name must start with a letter, can contain letters, numbers, and hyphen (-), and must be 35 characters or fewer. Use a name that is unique across regions. The cluster name and the region in which the cluster is deployed form the fully qualified domain name for the Ingress subdomain. To ensure that the Ingress subdomain is unique within a region, the cluster name might be truncated and appended with a random value within the Ingress domain name. - `no_subnet` - (Optional, Forces new resource, Bool) If set to **true**, no portable subnet is created during cluster creation. The portable subnet is used to provide portable IP addresses for the Ingress subdomain and Kubernetes load balancer services. If set to **false**, a portable subnet is created by default. The default is **false**. -- `operating_system` - (Optional, Forces new resource, String) The operating system of the workers in the default worker pool. For supported options, see [Red Hat OpenShift on IBM Cloud version information](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions) or [IBM Cloud Kubernetes Service version information](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). +- `operating_system` - (Optional, String) The operating system of the workers in the default worker pool. For supported options, see [Red Hat OpenShift on IBM Cloud version information](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions) or [IBM Cloud Kubernetes Service version information](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `patch_version` - (Optional, String) Updates the worker nodes with the required patch version. The patch_version should be in the format: `patch_version_fixpack_version`. For more information, about Kubernetes version information and update, see [Kubernetes version update](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). **NOTE:** To update the patch or fix pack versions of the worker nodes, run the command `ibmcloud ks workers -c output json`. Fetch the required patch & fix pack versions from `kubeVersion.target` and set the `patch_version` parameter. - `public_service_endpoint` - (Optional, Forces new resource, Bool) If set to **true**, your cluster is set up with a public service endpoint. You can use the public service endpoint to access the Kubernetes master from the public network. To use service endpoints, your account must be enabled for [Virtual Routing and Forwarding (VRF)](https://cloud.ibm.com/docs/account?topic=account-vrf-service-endpoint#vrf). For more information, see [Worker-to-master and user-to-master communication: Service endpoints](https://cloud.ibm.com/docs/containers?topic=containers-plan_clusters#workeruser-master). If set to **false**, the public service endpoint is disabled for your cluster. -- `public_vlan_id` - (Optional, Forces new resource, String) The ID of the public VLAN that you want to use for your worker nodes. You can retrieve the VLAN ID with the `ibmcloud ks vlans --zone ` command.

* **Standard clusters**: If you create a standard cluster and you have an existing public VLAN ID for the zone where you plan to set up worker nodes, you must enter the VLAN ID. To retrieve the ID, run `ibmcloud ks vlans --zone `. If you do not have an existing public VLAN ID, or you want to connect your cluster to a private VLAN only, do not specify this option. **Note**: The prerequisite for using service endpoints, account must be enabled for [Virtual Routing and Forwarding (VRF)](https://cloud.ibm.com/docs/infrastructure/direct-link/vrf-on-ibm-cloud.html#overview-of-virtual-routing-and-forwarding-vrf-on-ibm-cloud). Account must be enabled for connectivity to service endpoints. Use the resource `ibm_container_cluster_feature` to update the `public_service_endpoint` and `private_service_endpoint`. +- `public_vlan_id` - (Optional, String) The ID of the public VLAN that you want to use for the worker nodes in the default worker pool. You can retrieve the VLAN ID with the `ibmcloud ks vlans --zone ` command.

* **Free clusters**: If you create a standard cluster and you have an existing public VLAN ID for the zone where you plan to set up worker nodes, you must enter the VLAN ID. To retrieve the ID, run `ibmcloud ks vlans --zone `. If you do not have an existing public VLAN ID, or you want to connect your cluster to a private VLAN only, do not specify this option. **Note**: The prerequisite for using service endpoints, account must be enabled for [Virtual Routing and Forwarding (VRF)](https://cloud.ibm.com/docs/infrastructure/direct-link/vrf-on-ibm-cloud.html#overview-of-virtual-routing-and-forwarding-vrf-on-ibm-cloud). Account must be enabled for connectivity to service endpoints. Use the resource `ibm_container_cluster_feature` to update the `public_service_endpoint` and `private_service_endpoint`. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `private_service_endpoint` - (Optional, Forces new resource, Bool) If set to **true**, your cluster is set up with a private service endpoint. When the private service endpoint is enabled, communication between the Kubernetes and the worker nodes is established over the private network. If you enable the private service endpoint, you cannot disable it later. To use service endpoints, your account must be enabled for [Virtual Routing and Forwarding (VRF)](https://cloud.ibm.com/docs/account?topic=account-vrf-service-endpoint#vrf). For more information, see [Worker-to-master and user-to-master communication: Service endpoints](https://cloud.ibm.com/docs/containers?topic=containers-plan_clusters#workeruser-master). If set to **false**, the private service endpoint is disabled and all communication to the Kubernetes master must go through the public network. -- `private_vlan_id` - (Optional, Forces new resource, String) The ID of the private VLAN that you want to use for your worker nodes. You can retrieve the VLAN ID with the `ibmcloud ks vlans --zone ` command.

* **Standard clusters**: If you create a standard cluster and you have an existing private VLAN ID for the zone where you plan to set up worker nodes, you must enter the VLAN ID. To retrieve the ID, run `ibmcloud ks vlans --zone `. If you do not have an existing private VLAN ID, do not specify this option. A private VLAN is created automatically for you. +- `private_vlan_id` - (Optional, String) The ID of the private VLAN that you want to use for the worker nodes in your default worker pool. You can retrieve the VLAN ID with the `ibmcloud ks vlans --zone ` command.

* * **Standard clusters**: If you create a standard cluster and you have an existing private VLAN ID for the zone where you plan to set up worker nodes, you must enter the VLAN ID. To retrieve the ID, run `ibmcloud ks vlans --zone `. If you do not have an existing private VLAN ID, do not specify this option. A private VLAN is created automatically for you. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `pod_subnet`- (Optional, String) Specify a custom subnet CIDR to provide private IP addresses for pods. The subnet must be at least `/23` or more. For more information, refer to [Pod subnet](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#pod-subnet).Yes- - `resource_group_id` - (Optional, String) The ID of the resource group where you want to provision your cluster. To retrieve the ID, use the `ibm_resource_group` data source. If no value is provided, the cluster is automatically provisioned into the `default` resource group. - `retry_patch_version` - (Optional, Integer) This argument retries the update of `patch_version` if the previous update fails. Increment the value to retry the update of `patch_version` on worker nodes. - `subnet_id` - (Optional, String) The ID of an existing subnet that you want to use for your worker nodes. To find existing subnets, run `ibmcloud ks subnets`. - `service_subnet`- (Optional, Forces new resource, String) Specify a custom subnet CIDR to provide private IP addresses for services. The subnet should be at least `/24` or more. For more information, refer to [Subnet service](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#service-subnet). - `tags` - (Optional, Array of string) A list of tags that you want to add to your cluster. Tags can help find a cluster more quickly. **Note**: For users on account to add tags to a resource, they must be assigned the appropriate [permissions](https://cloud.ibm.com/docs/resources?topic=resources-access). -- `taints` - (Optional, Set) A nested block that sets or removes Kubernetes taints for all worker nodes in a worker pool +- `taints` - (Optional, Set) A nested block that sets or removes Kubernetes taints for all worker nodes in a worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. Nested scheme for `taints`: - `key` - (Required, String) Key for taint. @@ -242,7 +242,6 @@ Review the argument references that you can specify for your resource. Nested scheme for `workers_info`: - `id` - (Optional, String) The ID of the worker node that you want to update. - `version` - (Optional, String) The Kubernetes version that you want to update your worker nodes to. -- `worker_num`- (Optional, Integer) The number of worker nodes in your cluster. This attribute creates a worker node that is not associated with a worker pool. **Note**: Conflicts with `workers`. - `wait_for_worker_update` - (Optional, Bool) Set to **true** to wait and update the Kubernetes version of worker nodes. **NOTE** Setting wait_for_worker_update to **false** is not recommended. Setting **false** results in upgrading all the worker nodes in the cluster at the same time causing the cluster downtime. - `wait_till` - (Optional, String) The cluster creation happens in multi-stages. To avoid the longer wait times for resource execution. This argument in the resource will wait for the specified stage and complete the execution. The default stage value is `IngressReady`. The supported stages are `MasterNodeReady` Resource waits till the master node is ready. `OneWorkerNodeReady` Resource waits till one worker node is in to ready state. `Normal` Terraform marks the creation of your cluster complete when the cluster is in a [Normal](https://cloud.ibm.com/docs/containers?topic=containers-cluster-states-reference#cluster-state-normal) state. If you plan to do reading on the cluster from a datasource, use `Normal`. At the moment wait_till `Normal` also ignores the critical and warning states the are temporary happen during cluster creation, but cannot distinguish it from actual critical or warning states. `IngressReady` Resource waits till the ingress-host and ingress-secret are available. @@ -262,6 +261,7 @@ Review the argument references that you can specify for your resource. - `region` - (Deprecated, Forces new resource, string) The region where the cluster is provisioned. If the region is not specified it will be defaulted to provider region(IC_REGION/IBMCLOUD_REGION). To get the list of supported regions please access this [link](https://containers.bluemix.net/v1/regions) and use the alias. - `wait_time_minutes` - (Deprecated, integer) The duration, expressed in minutes, to wait for the cluster to become available before declaring it as created. It is also the same amount of time waited for no active transactions before proceeding with an update or deletion. The default value is `90`. - `workers` - (Deprecated) The worker nodes that you want to add to the cluster. **Note** Conflicts with `worker_num`. Nested `workers` blocks have the following structure: +- `worker_num`- (Deprecated, Optional, Integer) The number of worker nodes in your cluster. This attribute creates a worker node that is not associated with a worker pool. **Note**: Conflicts with `workers`. Nested scheme for `workers`: - `action` - valid actions are add, reboot and reload. diff --git a/website/docs/r/container_vpc_cluster.html.markdown b/website/docs/r/container_vpc_cluster.html.markdown index ee040451a7..f64c602fba 100644 --- a/website/docs/r/container_vpc_cluster.html.markdown +++ b/website/docs/r/container_vpc_cluster.html.markdown @@ -174,10 +174,11 @@ Review the argument references that you can specify for your resource. - `disable_public_service_endpoint` - (Optional, Bool) Disable the public service endpoint to prevent public access to the Kubernetes master. Default value is `false`. - `entitlement` - (Optional, String) Entitlement reduces additional OCP Licence cost in OpenShift clusters. Use Cloud Pak with OCP Licence entitlement to create the OpenShift cluster. **Note**
  • It is set only when the first time creation of the cluster, further modifications are not impacted.
  • Set this argument to `cloud_pak` only if you use the cluster with a Cloud Pak that has an OpenShift entitlement.
. - `force_delete_storage` - (Optional, Bool) If set to **true**,force the removal of persistent storage associated with the cluster during cluster deletion. Default value is **false**. **Note** If `force_delete_storage` parameter is used after provisioning the cluster, then, you need to execute `terraform apply` before `terraform destroy` for `force_delete_storage` parameter to take effect. -- `flavor` - (Required, Forces new resource, String) The flavor of the VPC worker node that you want to use. +- `flavor` - (Required, String) The flavor of the VPC worker nodes in the default worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `image_security_enforcement` - (Optional, Bool) Set to **true** to enable image security enforcement policies in a cluster. - `name` - (Required, Forces new resource, String) The name of the cluster. - `kms_config` - (Optional, String) Use to attach a Key Protect instance to a cluster. Nested `kms_config` block has an `instance_id`, `crk_id`, `private_endpoint` and `account_id`. +- `host_pool_id` - (Optional, String) If provided, the default worker pool will be associated with a dedicated host pool identified by this ID. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. Nested scheme for `kms_config`: - `crk_id` - (Optional, String) The ID of the customer root key (CRK). @@ -186,13 +187,13 @@ Review the argument references that you can specify for your resource. - `account_id` - (Optional, String) Account ID of KMS instance holder - if not provided, defaults to the account in use. - `host_pool_id` - (Optional, String) If provided, the cluster will be associated with a dedicated host pool identified by this ID. - `kube_version` - (Optional, String) Specify the Kubernetes version, including the major.minor version. If you do not include this flag, the default version is used. To see available versions, run `ibmcloud ks versions`. -- `operating_system` - (Optional, Forces new resource, String) The operating system of the workers in the default worker pool. For supported options, see [Red Hat OpenShift on IBM Cloud version information](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions) or [IBM Cloud Kubernetes Service version information](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). -- `secondary_storage` - (Optional, Forces new resource, String) The secondary storage option for the default worker pool. +- `operating_system` - (Optional, String) The operating system of the workers in the default worker pool. For supported options, see [Red Hat OpenShift on IBM Cloud version information](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions) or [IBM Cloud Kubernetes Service version information](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. +- `secondary_storage` - (Optional, String) The secondary storage option for the workers in the default worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `patch_version` - (Optional, String) Updates the worker nodes with the required patch version. The patch_version should be in the format: `patch_version_fixpack_version`. For more information, about Kubernetes version information and update, see [Kubernetes version update](https://cloud.ibm.com/docs/containers?topic=containers-cs_versions). **Note** To update the patch or fix pack versions of the worker nodes, run the command `ibmcloud ks workers -c output json`. Fetch the required patch & fix pack versions from `kubeVersion.target` and set the `patch_version` parameter. - `pod_subnet` - (Optional, Forces new resource, String) Specify a custom subnet CIDR to provide private IP addresses for pods. The subnet must have a CIDR of at least `/23` or larger. For more information, see the [documentation](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#cs_subnets). Default value is `172.30.0.0/16`. - `retry_patch_version` - (Optional, Integer) This argument retries the update of `patch_version` if the previous update fails. Increment the value to retry the update of `patch_version` on worker nodes. - `service_subnet` - (Optional, Forces new resource, String) Specify a custom subnet CIDR to provide private IP addresses for services. The subnet must be at least ’/24’ or larger. For more information, see the [documentation](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#cs_messages). Default value is `172.21.0.0/16`. -- `taints` - (Optional, Set) A nested block that sets or removes Kubernetes taints for all worker nodes in a worker pool +- `taints` - (Optional, Set) A nested block that sets or removes Kubernetes taints for all worker nodes in a worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. Nested scheme for `taints`: - `key` - (Required, String) Key for taint. @@ -201,13 +202,13 @@ Review the argument references that you can specify for your resource. - `wait_for_worker_update` - (Optional, Bool) Set to **true** to wait and update the Kubernetes version of worker nodes. **NOTE** Setting wait_for_worker_update to **false** is not recommended. Setting **false** results in upgrading all the worker nodes in the cluster at the same time causing the cluster downtime. - `wait_till` - (Optional, String) The creation of a cluster can take a few minutes (for virtual servers) or even hours (for Bare Metal servers) to complete. To avoid long wait times when you run your Terraform code, you can specify the stage when you want Terraform to mark the cluster resource creation as completed. Depending on what stage you choose, the cluster creation might not be fully completed and continues to run in the background. However, your Terraform code can continue to run without waiting for the cluster to be fully created. Supported stages are:
  • `Normal`: Terraform marks the creation of your cluster complete when the cluster is in a [Normal](https://cloud.ibm.com/docs/containers?topic=containers-cluster-states-reference#cluster-state-normal) state. If you plan to do reading on the cluster from a datasource, use `Normal`. At the moment wait_till `Normal` also ignores the critical and warning states that occasionally happen during cluster creation, but cannot distinguish it from actual critical or warning states.
  • `MasterNodeReady`: Terraform marks the creation of your cluster complete when the cluster master is in a ready state.
  • `OneWorkerNodeReady`: Terraform marks the creation of your cluster complete when the master and at least one worker node are in a ready state.
  • `IngressReady`: Terraform marks the creation of your cluster complete when the cluster master and all worker nodes are in a ready state, and the Ingress subdomain is fully set up.
If you do not specify this option, `IngressReady` is used by default. You can set this option only when the cluster is created. If this option is set during a cluster update or deletion, the parameter is ignored by the Terraform provider. -- `worker_count` - (Optional, Forces new resource, Integer) The number of worker nodes per zone in the default worker pool. Default value `1`. **Note** If the requested number of worker nodes is fewer than the minimum 2 worker nodes that are required for an OpenShift cluster, cluster creation does not happen. -- `worker_labels` (Optional, Map) Labels on all the workers in the default worker pool. +- `worker_count` - (Optional, Integer) The number of worker nodes per zone in the default worker pool. Default value `1`. **Note** If the requested number of worker nodes is fewer than the minimum 2 worker nodes that are required for an OpenShift cluster, cluster creation will be rejected. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. +- `worker_labels` (Optional, Map) Labels on all the workers in the default worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. - `resource_group_id` - (Optional, Forces new resource, String) The ID of the resource group. You can retrieve the value by running `ibmcloud resource groups` or by using the `ibm_resource_group` data source. If no value is provided, the `default` resource group is used. - `tags` (Optional, Array of Strings) A list of tags that you want to associate with your VPC cluster. **Note** For users on account to add tags to a resource, they must be assigned the [appropriate permissions]/docs/account?topic=account-access). - `update_all_workers` - (Optional, Bool) Set to true, if you want to update workers Kubernetes version with the cluster kube_version. -- `vpc_id` - (Required, Forces new resource, String) The ID of the VPC that you want to use for your cluster. To list available VPCs, run `ibmcloud is vpcs`. -- `zones` - (Required, List) A nested block describes the zones of this VPC cluster's default worker pool. +- `vpc_id` - (Required, String) The ID of the VPC that you want to use for your cluster. To list available VPCs, run `ibmcloud is vpcs`. +- `zones` - (Required, List) A nested block describes the zones of this VPC cluster's default worker pool. This field only affects cluster creation, to manage the default worker pool, create a dedicated worker pool resource. Nested scheme for `zones`: - `name` - (Required, Forces new resource, String) The zone name for the default worker pool in a multizone cluster. @@ -216,6 +217,7 @@ Review the argument references that you can specify for your resource. - `crk` - (Optional, String) Root Key ID for boot volume encryption. - `kms_instance_id` - (Optional, String) Instance ID for boot volume encryption. - `kms_account_id` - (Optional, String) Account ID for boot volume encryption, if other account is providing the kms. +- `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. **Note** diff --git a/website/docs/r/container_vpc_worker_pool.html.markdown b/website/docs/r/container_vpc_worker_pool.html.markdown index fa9359a1a8..693e4611a6 100644 --- a/website/docs/r/container_vpc_worker_pool.html.markdown +++ b/website/docs/r/container_vpc_worker_pool.html.markdown @@ -104,6 +104,7 @@ Review the argument references that you can specify for your resource. - `crk` - Root Key ID for boot volume encryption. - `kms_instance_id` - Instance ID for boot volume encryption. - `kms_account_id` - Account ID for boot volume encryption, if other account is providing the kms. +- `security_groups` - (Optional, List) Enables users to define specific security groups for their workers. ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. diff --git a/website/docs/r/database.html.markdown b/website/docs/r/database.html.markdown index b27f42deab..6e8815f1d3 100644 --- a/website/docs/r/database.html.markdown +++ b/website/docs/r/database.html.markdown @@ -118,6 +118,53 @@ output "ICD Etcd database connection string" { ``` +### Sample database instance by using `host_flavor` attribute +An example to configure and deploy database by using `host_flavor` attribute. + +```terraform +data "ibm_resource_group" "group" { + name = "" +} + +resource "ibm_database" "" { + name = "" + plan = "standard" + location = "eu-gb" + service = "databases-for-etcd" + resource_group_id = data.ibm_resource_group.group.id + tags = ["tag1", "tag2"] + + adminpassword = "password12" + + group { + group_id = "member" + + host_flavor { + id = "b3c.8x32.encrypted" + } + + disk { + allocation_mb = 256000 + } + } + + users { + name = "user123" + password = "password12" + } + + allowlist { + address = "172.168.1.1/32" + description = "desc" + } +} + +output "ICD Etcd database connection string" { + value = "http://${ibm_database.test_acc.ibm_database_connection.icd_conn}" +} + +``` + ### Sample database instance by using `point_in_time_recovery` An example for configuring `point_in_time_recovery` time by using `ibm_database` resource. @@ -667,6 +714,16 @@ Review the argument reference that you can specify for your resource. - Nested scheme for `cpu`: - `allocation_count` - (Optional, Integer) Allocated dedicated CPU per-member. + - `host_flavor` (Set, Optional) + - Nested scheme for `host_flavor`: + - `id` - (Optional, String) **Beta feature:** The hosting infrastructure identifier. Selecting `multitenant` places your database on a logically separated, multi-tenant machine. With this identifier, minimum resource configurations apply. Alternatively, setting the identifier to any of the following host sizes places your database on the specified host size with no other tenants. + - `b3c.4x16.encrypted` + - `b3c.8x32.encrypted` + - `m3c.8x64.encrypted` + - `b3c.16x64.encrypted` + - `b3c.32x128.encrypted` + - `m3c.30x240.encrypted` + - `name` - (Required, String) A descriptive name that is used to identify the database instance. The name must not include spaces. - `offline_restore` - (Optional, Boolean) Enable or disable the Offline Restore option while performing a Point-in-time Recovery for MongoDB EE in a disaster recovery scenario when the source region is unavailable, see [Point-in-time Recovery](https://cloud.ibm.com/docs/databases-for-mongodb?topic=databases-for-mongodb-pitr&interface=api#pitr-offline-restore) - `plan` - (Required, Forces new resource, String) The name of the service plan that you choose for your instance. All databases use `standard`. `enterprise` is supported only for elasticsearch (`databases-for-elasticsearch`), cassandra (`databases-for-cassandra`), and mongodb(`databases-for-mongodb`). `platinum` is supported for elasticsearch (`databases-for-elasticsearch`). @@ -684,7 +741,7 @@ Review the argument reference that you can specify for your resource. - `name` - (Required, String) The user name to add to the database instance. The user name must be in the range 5 - 32 characters. - `password` - (Required, String) The password for the user. Passwords must be between 15 and 32 characters in length and contain a letter and a number. Users with an `ops_manager` user type must have a password containing a special character `~!@#$%^&*()=+[]{}|;:,.<>/?_-` as well as a letter and a number. Other user types may only use special characters `-_`. - `type` - (Optional, String) The type for the user. Examples: `database`, `ops_manager`, `read_only_replica`. The default value is `database`. - - `role` - (Optional, String) The role for the user. Only available for `ops_manager` user type. Examples: `group_read_only`, `group_data_access_admin`. + - `role` - (Optional, String) The role for the user. Only available for `ops_manager` user type or Redis 6.0 and above. Example roles for `ops_manager`: `group_read_only`, `group_data_access_admin`. For, Redis 6.0 and above, `role` must be in Redis ACL syntax for adding and removing command categories i.e. `+@category` or `-@category`. Allowed command categories are `all`, `admin`, `read`, `write`. Example Redis `role`: `-@all +@read` - `allowlist` - (Optional, List of Objects) A list of allowed IP addresses for the database. Multiple blocks are allowed. diff --git a/website/docs/r/iam_authorization_policy.html.markdown b/website/docs/r/iam_authorization_policy.html.markdown index 2758bb8b9d..bac30292cc 100644 --- a/website/docs/r/iam_authorization_policy.html.markdown +++ b/website/docs/r/iam_authorization_policy.html.markdown @@ -66,9 +66,9 @@ resource "ibm_resource_instance" "instance2" { resource "ibm_iam_authorization_policy" "policy" { source_service_name = "cloud-object-storage" - source_resource_instance_id = ibm_resource_instance.instance1.id + source_resource_instance_id = ibm_resource_instance.instance1.guid target_service_name = "kms" - target_resource_instance_id = ibm_resource_instance.instance2.id + target_resource_instance_id = ibm_resource_instance.instance2.guid roles = ["Reader"] } @@ -156,6 +156,40 @@ resource "ibm_iam_authorization_policy" "policy" { } ``` +### Authorization policy between all resource groups in an account and a target service using resource attributes + +```terraform + +resource "ibm_resource_group" "source_resource_group" { + name = "123123" +} + +resource "ibm_iam_authorization_policy" "policy" { + roles = [ + "Reader", + ] + + resource_attributes { + name = "accountId" + operator = "stringEquals" + value = "12345" + } + resource_attributes { + name = "serviceName" + operator = "stringEquals" + value = "cloud-object-storage" + } + + subject_attributes { + name = "accountId" + value = "12345" + } + subject_attributes { + name = "resourceGroupId" + value = "*" + } +} +``` ### Authorization policy between source service and target resource type "resource-group" using resource attributes diff --git a/website/docs/r/is_share.html.markdown b/website/docs/r/is_share.html.markdown index ffaa72f235..2319e75337 100644 --- a/website/docs/r/is_share.html.markdown +++ b/website/docs/r/is_share.html.markdown @@ -8,7 +8,7 @@ subcategory: "VPC infrastructure" # ibm\_is_share -Provides a resource for Share. This allows Share to be created, updated and deleted. +Provides a resource for Share. This allows Share to be created, updated and deleted. For more information, about share replication, see [Share replication](https://cloud.ibm.com/docs/vpc?topic=vpc-file-storage-replication). ~> **NOTE** New shares should be created with profile `dp2`. Old Tiered profiles will be deprecated soon. @@ -51,6 +51,25 @@ resource "ibm_is_share" "example-2" { } } ``` +## Example Usage (Create a cross regional replication) +```terraform +resource "ibm_is_share" "example-3" { + provider = ibm.syd + access_control_mode = "security_group" + name = "my-share" + size = 200 + profile = "dp2" + zone = "au-syd-2" +} +resource "ibm_is_share" "example-4" { + provider = ibm.ussouth + zone = "us-south-3" + source_share_crn = ibm_is_share.example-3.crn + name = "my-replica1" + profile = "dp2" + replication_cron_spec = "0 */5 * * *" +} +``` ## Argument Reference The following arguments are supported: @@ -130,6 +149,7 @@ The following arguments are supported: - `replication_cron_spec` - (Optional, String) The cron specification for the file share replication schedule. - `size` - (Required, Integer) The size of the file share rounded up to the next gigabyte. - `source_share` - (Optional, String) The ID of the source file share for this replica file share. The specified file share must not already have a replica, and must not be a replica. +- `source_share_crn` - (Optional, String) The CRN of the source file share. - `tags` - (Optional, List of Strings) The list of user tags to attach to the share. - `zone` - (Required, string) The globally unique name for this zone. @@ -146,7 +166,11 @@ The following attributes are exported: - `href` - (String) The URL for this share. - `id` - (String) The unique identifier of the Share. - `iops` - (Integer) The maximum input/output operation performance bandwidth per second for the file share. -- `last_sync_at` - (String) The date and time that the file share was last synchronized to its replica.This property will be present when the `replication_role` is `source`. +- `latest_sync` - (List) Information about the latest synchronization for this file share. +Nested `latest_sync` blocks have the following structure: + - `completed_at` - (String) The completed date and time of last synchronization between the replica share and its source. + - `data_transferred` - (Integer) The data transferred (in bytes) in the last synchronization between the replica and its source. + - `started_at` - (String) The start date and time of last synchronization between the replica share and its source. - `latest_job` - (List) The latest job associated with this file share.This property will be absent if no jobs have been created for this file share. Nested `latest_job` blocks have the following structure: - `status` - (String) The status of the file share job - `status_reasons` - (List) The reasons for the file share job status (if any). Nested `status_reasons` blocks have the following structure: diff --git a/website/docs/r/is_vpc.html.markdown b/website/docs/r/is_vpc.html.markdown index 0249e5778b..1ef48546e8 100644 --- a/website/docs/r/is_vpc.html.markdown +++ b/website/docs/r/is_vpc.html.markdown @@ -83,6 +83,22 @@ resource "ibm_is_vpc" "example-system" { } } +// delegated type resolver + +resource "ibm_is_vpc" "example-delegated" { + // required : add a dependency on ibm dns custom resolver of the hub vpc + depends_on = [ ibm_dns_custom_resolver.example-hub ] + name = "example-hub-false-delegated" + dns { + enable_hub = false + resolver { + type = "delegated" + vpc_id = ibm_is_vpc.example.id + dns_binding_name = "example-vpc-binding" + } + } +} + ``` ## Timeouts @@ -116,6 +132,9 @@ Review the argument references that you can specify for your resource. - `resolver` - (Optional, List) The zone list this backup policy plan will create snapshot clones in. Nested scheme for `resolver`: + - `dns_binding_id` - (String) The VPC dns binding id whose DNS resolver provides the DNS server addresses for this VPC. (If any) + - `dns_binding_name` - (Optional, String) The VPC dns binding name whose DNS resolver provides the DNS server addresses for this VPC. Only applicable for `delegated`, providing value would create binding with this name. + ~> **Note:** `manual_servers` must be set if and only if `dns.resolver.type` is manual. - `manual_servers` - (Optional, List) The DNS servers to use for this VPC, replacing any existing servers. All the DNS servers must either: **have a unique zone_affinity**, or **not have a zone_affinity**. @@ -139,7 +158,7 @@ Review the argument references that you can specify for your resource. ~> **Note:** Updating from `manual` requires dns resolver `manual_servers` to be specified as null.
Updating to `manual` requires dns resolver `manual_servers` to be specified and not empty.
- Updating from `delegated` requires `dns.resolver.vpc` to be specified as null. + Updating from `delegated` requires `dns.resolver.vpc` to be specified as null. If type is `delegated` while creation then `vpc_id` is required - `vpc_id` - (Optional, List) (update only) The VPC ID to provide DNS server addresses for this VPC. The specified VPC must be configured with a DNS Services custom resolver and must be in one of this VPC's DNS resolution bindings. Mutually exclusive with `vpc_crn` ~> **Note:** diff --git a/website/docs/r/is_vpc_routing_table.html.markdown b/website/docs/r/is_vpc_routing_table.html.markdown index e84b962497..ace815208c 100644 --- a/website/docs/r/is_vpc_routing_table.html.markdown +++ b/website/docs/r/is_vpc_routing_table.html.markdown @@ -38,11 +38,27 @@ resource "ibm_is_vpc_routing_table" "example" { ``` +## Example usage: Advertising routes +``` +resource "ibm_is_vpc" "example" { + name = "example-vpc" +} +resource "ibm_is_vpc_routing_table" "is_vpc_routing_table_instance" { + vpc = ibm_is_vpc.example.id + name = "example-vpc-routing-table" + route_direct_link_ingress = true + route_transit_gateway_ingress = false + route_vpc_zone_ingress = false + advertise_routes_to = ["direct_link", "transit_gateway"] + +} +``` # Example usage for accept_routes_from_resource_type ```terraform resource "ibm_is_vpc" "example" { name = "example-vpc" } + resource "ibm_is_vpc_routing_table" "example" { vpc = ibm_is_vpc.example.id name = "example-vpc-routing-table" @@ -51,10 +67,17 @@ resource "ibm_is_vpc_routing_table" "example" { route_vpc_zone_ingress = false accept_routes_from_resource_type = ["vpn_server"] } - ``` + + ## Argument reference Review the argument references that you can specify for your resource. + +- `advertise_routes_to` - (Optional, List) The ingress sources to advertise routes to. Routes in the table with `advertise` enabled will be advertised to these sources. + + ->**Options** An ingress source that routes can be advertised to:
+ **•** `direct_link` (requires `route_direct_link_ingress` be set to `true`)
+ **•** `transit_gateway` (requires `route_transit_gateway_ingress` be set to `true`) - `accept_routes_from_resource_type` - (Optional, List) The resource type filter specifying the resources that may create routes in this routing table. Ex: `vpn_server`, `vpn_gateway` - `created_at` - (Timestamp) The date and time when the routing table was created. - `name` - (Optional, String) The routing table name. diff --git a/website/docs/r/is_vpc_routing_table_route.html.markdown b/website/docs/r/is_vpc_routing_table_route.html.markdown index 234faacca6..b17eec3634 100644 --- a/website/docs/r/is_vpc_routing_table_route.html.markdown +++ b/website/docs/r/is_vpc_routing_table_route.html.markdown @@ -30,7 +30,8 @@ resource "ibm_is_vpc" "example" { resource "ibm_is_vpc_routing_table" "example" { vpc = ibm_is_vpc.example.id name = "example-routing-table" - route_direct_link_ingress = false + advertise_routes_to = ["direct_link", "transit_gateway"] + route_direct_link_ingress = true route_transit_gateway_ingress = false route_vpc_zone_ingress = false } @@ -41,6 +42,7 @@ resource "ibm_is_vpc_routing_table_route" "example" { name = "custom-route-2" destination = "192.168.4.0/24" action = "deliver" + advertise = true next_hop = ibm_is_vpn_gateway_connection.example.gateway_connection // Example value "10.0.0.4" } ``` @@ -63,6 +65,7 @@ resource "ibm_is_vpc_routing_table_route" "example" { Review the argument references that you can specify for your resource. - `action` - (Optional, String) The action to perform with a packet matching the route `delegate`, `delegate_vpc`, `deliver`, `drop`. +- `advertise` - (Optional, Bool) Indicates whether this route will be advertised to the ingress sources specified by the `advertise_routes_to` routing table's property. - `destination` - (Required, Forces new resource, String) The destination of the route. - `name` - (Optional, String) The user-defined name of the route. If unspecified, the name will be a hyphenated list of randomly selected words. You need to provide unique name within the VPC routing table the route resides in. - `next_hop` - (Required, String) The next hop of the route. It accepts IP address or a VPN gateway connection ID (`ibm_is_vpn_gateway_connection`) of a VPN Gateway (`ibm_is_vpn_gateway`) with the `mode = "route"` argument and in the same VPC as the route table for this route for an egress route. For action other than deliver, you must specify `0.0.0.0`. diff --git a/website/docs/r/is_vpn_gateway.html.markdown b/website/docs/r/is_vpn_gateway.html.markdown index e5b632a5a9..a0e030c34d 100644 --- a/website/docs/r/is_vpn_gateway.html.markdown +++ b/website/docs/r/is_vpn_gateway.html.markdown @@ -74,12 +74,10 @@ In addition to all argument reference list, you can access the following attribu - `address` - (String) The public IP address assigned to the VPN gateway member. - `private_address` - (String) The private IP address assigned to the VPN gateway member. - `role` - (String) The high availability role assigned to the VPN gateway member. - - `status` - (String) The status of the VPN gateway member. - `public_ip_address` - (String) The IP address assigned to this VPN gateway. - `public_ip_address2` - (String) The Second Public IP address assigned to this VPN gateway member. - `private_ip_address` - (String) The Private IP address assigned to this VPN gateway member. - `private_ip_address2` - (String) The Second Private IP address assigned to this VPN gateway. -- `status` - (String) The status of the VPN gateway. Supported values are **available**, **deleting**, **failed**, or **pending**. - `health_reasons` - (List) The reasons for the current health_state (if any). Nested scheme for `health_reasons`: diff --git a/website/docs/r/mqcloud_keystore_certificate.html.markdown b/website/docs/r/mqcloud_keystore_certificate.html.markdown index e3b1a0108c..b4740f2661 100644 --- a/website/docs/r/mqcloud_keystore_certificate.html.markdown +++ b/website/docs/r/mqcloud_keystore_certificate.html.markdown @@ -14,6 +14,7 @@ Create, update, and delete mqcloud_keystore_certificates with this resource. ```hcl resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instance" { + certificate_file = filebase64("certificate_file.data") label = "label" queue_manager_id = var.queue_manager_id service_instance_guid = var.service_instance_guid @@ -24,7 +25,9 @@ resource "ibm_mqcloud_keystore_certificate" "mqcloud_keystore_certificate_instan You can specify the following arguments for this resource. -* `label` - (Required, Forces new resource, String) Certificate label in queue manager store. +* `certificate_file` - (Required, Forces new resource, String) The filename and path of the certificate to be uploaded. + * Constraints: The maximum length is `65537` characters. The minimum length is `1500` characters. +* `label` - (Required, Forces new resource, String) The label to use for the certificate to be uploaded. * Constraints: The maximum length is `64` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9_.]*$/`. * `queue_manager_id` - (Required, Forces new resource, String) The id of the queue manager to retrieve its full details. * Constraints: The maximum length is `32` characters. The minimum length is `32` characters. The value must match regular expression `/^[0-9a-fA-F]{32}$/`. @@ -67,6 +70,10 @@ The `id` property can be formed from `service_instance_guid`, `queue_manager_id` * `queue_manager_id`: A string in the format `b8e1aeda078009cf3db74e90d5d42328`. The id of the queue manager to retrieve its full details. * `certificate_id`: A string. ID of the certificate. +> ### Important Note +> When configuring the `ibm_mqcloud_keystore_certificate` resource in the root module: +> Ensure to set the `certificate_file` value to an empty string (`certificate_file=""`). This step is crucial as we are not downloading the certificate to the local system. + # Syntax
 $ terraform import ibm_mqcloud_keystore_certificate.mqcloud_keystore_certificate <service_instance_guid>/<queue_manager_id>/<certificate_id>
diff --git a/website/docs/r/mqcloud_queue_manager.html.markdown b/website/docs/r/mqcloud_queue_manager.html.markdown
index b26b62631e..0994cfb932 100644
--- a/website/docs/r/mqcloud_queue_manager.html.markdown
+++ b/website/docs/r/mqcloud_queue_manager.html.markdown
@@ -13,11 +13,18 @@ Create, update, and delete mqcloud_queue_managers with this resource.
 ## Example Usage
 
 ```hcl
+resource "ibm_resource_instance" "mqcloud_instance" {
+    name     = "mqcloud-service-name"
+    service  = "mqcloud"
+    plan     = "default"
+    location = "eu-de"
+}
+
 resource "ibm_mqcloud_queue_manager" "mqcloud_queue_manager_instance" {
   display_name = "A test queue manager"
-  location = "reserved-eu-fr-cluster-f884"
+  location = "reserved-eu-de-cluster-f884"
   name = "testqm"
-  service_instance_guid = var.service_instance_guid
+  service_instance_guid = ibm_resource_instance.mqcloud_instance.guid
   size = "lite"
   version = "9.3.2_2"
 }
diff --git a/website/docs/r/mqcloud_truststore_certificate.html.markdown b/website/docs/r/mqcloud_truststore_certificate.html.markdown
index ebc00ef103..66d9ab191b 100644
--- a/website/docs/r/mqcloud_truststore_certificate.html.markdown
+++ b/website/docs/r/mqcloud_truststore_certificate.html.markdown
@@ -14,6 +14,7 @@ Create, update, and delete mqcloud_truststore_certificates with this resource.
 
 ```hcl
 resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_instance" {
+  certificate_file = filebase64("certificate_file.data")
   label = "label"
   queue_manager_id = var.queue_manager_id
   service_instance_guid = var.service_instance_guid
@@ -24,7 +25,9 @@ resource "ibm_mqcloud_truststore_certificate" "mqcloud_truststore_certificate_in
 
 You can specify the following arguments for this resource.
 
-* `label` - (Required, Forces new resource, String) Certificate label in queue manager store.
+* `certificate_file` - (Required, Forces new resource, String) The filename and path of the certificate to be uploaded.
+  * Constraints: The maximum length is `65537` characters. The minimum length is `1500` characters.
+* `label` - (Required, Forces new resource, String) The label to use for the certificate to be uploaded.
   * Constraints: The maximum length is `64` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9_.]*$/`.
 * `queue_manager_id` - (Required, Forces new resource, String) The id of the queue manager to retrieve its full details.
   * Constraints: The maximum length is `32` characters. The minimum length is `32` characters. The value must match regular expression `/^[0-9a-fA-F]{32}$/`.
@@ -64,6 +67,10 @@ The `id` property can be formed from `service_instance_guid`, `queue_manager_id`
 * `queue_manager_id`: A string in the format `b8e1aeda078009cf3db74e90d5d42328`. The id of the queue manager to retrieve its full details.
 * `certificate_id`: A string. Id of the certificate.
 
+> ### Important Note
+> When configuring the `ibm_mqcloud_keystore_certificate` resource in the root module:
+> Ensure to set the `certificate_file` value to an empty string (`certificate_file=""`). This step is crucial as we are not downloading the certificate to the local system.
+
 # Syntax
 
 $ terraform import ibm_mqcloud_truststore_certificate.mqcloud_truststore_certificate <service_instance_guid>/<queue_manager_id>/<certificate_id>
diff --git a/website/docs/r/pi_image.html.markdown b/website/docs/r/pi_image.html.markdown
index ecdc35a01b..d7558bcea4 100644
--- a/website/docs/r/pi_image.html.markdown
+++ b/website/docs/r/pi_image.html.markdown
@@ -78,9 +78,8 @@ Review the argument references that you can specify for your resource.
   - `pi_image_bucket_region` is required with `pi_image_bucket_name`
 - `pi_image_secret_key` - (Optional, String, Sensitive) Cloud Object Storage secret key; required for buckets with private access.
   - `pi_image_secret_key` is required with `pi_image_access_key`
-- `pi_image_storage_pool` - (Optional, String) Storage pool where the image will be loaded, if provided then `pi_image_storage_type` and `pi_affinity_policy` will be ignored.
-- `pi_image_storage_type` - (Optional, String) Type of storage. Will be ignored if `pi_image_storage_pool` or `pi_affinity_policy` is provided. If only using `pi_image_storage_type` for storage selection then the storage pool with the most available space will be selected.
-
+- `pi_image_storage_pool` - (Optional, String) Storage pool where the image will be loaded, if provided then `pi_affinity_policy` will be ignored. Used only when importing an image from cloud storage.
+- `pi_image_storage_type` - (Optional, String) Type of storage; If not provided the storage type will default to 'tier3'. Used only when importing an image from cloud storage.
 
 ## Attribute reference
 In addition to all argument reference list, you can access the following attribute reference after your resource is created.
diff --git a/website/docs/r/pi_instance.html.markdown b/website/docs/r/pi_instance.html.markdown
index 1001d49f34..429320d53d 100644
--- a/website/docs/r/pi_instance.html.markdown
+++ b/website/docs/r/pi_instance.html.markdown
@@ -93,9 +93,9 @@ Review the argument references that you can specify for your resource.
   - Required only when creating SAP instances.
 - `pi_sap_deployment_type` - (Optional, String) Custom SAP deployment type information (For Internal Use Only).
 - `pi_shared_processor_pool` - (Optional, String) The shared processor pool for instance deployment. Conflicts with `pi_sap_profile_id`.
-- `pi_storage_pool` - (Optional, String) Storage Pool for server deployment; if provided then `pi_affinity_policy` and `pi_storage_type` will be ignored.
+- `pi_storage_pool` - (Optional, String) Storage Pool for server deployment; if provided then `pi_affinity_policy` will be ignored; Only valid when you deploy one of the IBM supplied stock images. Storage pool for a custom image (an imported image or an image that is created from a VM capture) defaults to the storage pool the image was created in.
 - `pi_storage_pool_affinity` - (Optional, Bool) Indicates if all volumes attached to the server must reside in the same storage pool. The default value is `true`. To attach data volumes from a different storage pool (mixed storage) set to `false` and use `pi_volume_attach` resource. Once set to `false`, cannot be set back to `true` unless all volumes attached reside in the same storage type and pool.
-- `pi_storage_type` - (Optional, String) - Storage type for server deployment. Only valid when you deploy one of the IBM supplied stock images. Storage type for a custom image (an imported image or an image that is created from a VM capture) defaults to the storage type the image was created in
+- `pi_storage_type` - (Optional, String) - Storage type for server deployment; If storage type is not provided the storage type will default to `tier3`.
 - `pi_storage_connection` - (Optional, String) - Storage Connectivity Group (SCG) for server deployment. Only supported value is `vSCSI`.
 - `pi_sys_type` - (Optional, String) The type of system on which to create the VM (s922/e880/e980/s1022).
   - Supported SAP system types are (e880/e980).
diff --git a/website/docs/r/pi_volume.html.markdown b/website/docs/r/pi_volume.html.markdown
index 31ca91e018..60b0ac5cc0 100644
--- a/website/docs/r/pi_volume.html.markdown
+++ b/website/docs/r/pi_volume.html.markdown
@@ -57,10 +57,10 @@ Review the argument references that you can specify for your resource.
 - `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account.
 - `pi_replication_enabled` - (Optional, Bool) Indicates if the volume should be replication enabled or not.
 - `pi_volume_name` - (Required, String) The name of the volume.
-- `pi_volume_pool` - (Optional, String) Volume pool where the volume will be created; if provided then `pi_volume_type` and `pi_affinity_policy` values will be ignored.
+- `pi_volume_pool` - (Optional, String) Volume pool where the volume will be created; if provided then `pi_affinity_policy` values will be ignored.
 - `pi_volume_shareable` - (Required, Bool) If set to **true**, the volume can be shared across Power Systems Virtual Server instances. If set to **false**, you can attach it only to one instance. 
 - `pi_volume_size`  - (Required, Integer) The size of the volume in gigabytes. 
-- `pi_volume_type` - (Optional, String) Type of Disk, required if `pi_affinity_policy` and `pi_volume_pool` not provided, otherwise ignored. Supported values are `ssd`, `standard`, `tier1`, and `tier3`.
+- `pi_volume_type` - (Optional, String) Type of disk, if diskType is not provided the disk type will default to `tier3`.
 
 ## Attribute reference
 In addition to all argument reference list, you can access the following attribute reference after your resource is created.
diff --git a/website/docs/r/pi_volume_clone.html.markdown b/website/docs/r/pi_volume_clone.html.markdown
new file mode 100644
index 0000000000..616b6b2f39
--- /dev/null
+++ b/website/docs/r/pi_volume_clone.html.markdown
@@ -0,0 +1,79 @@
+---
+
+subcategory: "Power Systems"
+layout: "ibm"
+page_title: "IBM: pi_volume_clone"
+description: |-
+   Manages IBM Volume Clone in the Power Virtual Server cloud.
+---
+
+# ibm_pi_volume_clone
+Create a volume clone. For more information, about managing volume clone, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started).
+
+## Example usage
+The following example creates a volume clone.
+
+```terraform
+resource "ibm_pi_volume_clone" "testacc_volume_clone" {
+  pi_cloud_instance_id    = ""
+  pi_volume_clone_name    = "test-volume-clone"
+  pi_volume_ids           = [""]
+  pi_target_storage_tier  = ""
+  pi_replication_enabled  = true
+}
+```
+
+**Note**
+* Please find [supported Regions](https://cloud.ibm.com/apidocs/power-cloud#endpoint) for endpoints.
+* If a Power cloud instance is provisioned at `lon04`, The provider level attributes should be as follows:
+  * `region` - `lon`
+  * `zone` - `lon04`
+
+  Example usage:
+  
+  ```terraform
+    provider "ibm" {
+      region    =   "lon"
+      zone      =   "lon04"
+    }
+  ```
+  
+## Timeouts
+
+ibm_pi_volume_clone provides the following [timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options:
+
+- **create** - (Default 15 minutes) Used for creating volume clone.
+- **delete** - (Default 15 minutes) Used for deleting volume clone.
+
+## Argument reference 
+Review the argument references that you can specify for your resource. 
+
+- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account.
+- `pi_replication_enabled` - (Optional, Boolean) Indicates whether the cloned volume should have replication enabled. If no value is provided, it will default to the replication status of the source volume(s).
+- `pi_target_storage_tier` - (Optional, String) The storage tier for the cloned volume(s).
+- `pi_volume_clone_name` - (Required, String) The base name of the newly cloned volume(s).
+- `pi_volume_ids` - (Required, Set of String) List of volumes to be cloned.
+
+## Attribute reference
+In addition to all argument reference list, you can access the following attribute reference after your resource is created.
+
+- `cloned_volumes` - (List of objects) The List of cloned volumes.
+  
+  Nested scheme for `cloned_volumes`:
+  - `clone_volume_id` - (String) The ID of the newly cloned volume.
+  - `source_volume_id` - (String) The ID of the source volume.
+- `failure_reason` - (String) The reason for the failure of the volume clone task.
+- `id` - (String) The unique identifier of the volume clone. The ID is composed of `/`.
+- `percent_complete` - (Integer) The completion percentage of the volume clone task.
+- `status` - (String) The status of the volume clone task.
+- `task_id` - (String) The ID of the volume clone task.
+
+## Import
+
+The `ibm_pi_volume_clone` resource can be imported by using `pi_cloud_instance_id` and `task_id`.
+
+**Example**
+
+```
+$ terraform import ibm_pi_volume_clone.example d7bec597-4726-451f-8a63-e62e6f19c32c/cea6651a-bc0a-4438-9f8a-a0770bbf3ebb
+```
diff --git a/website/docs/r/pi_vpn_connection.html.markdown b/website/docs/r/pi_vpn_connection.html.markdown
index 06a2b1d428..1fc2a0117a 100644
--- a/website/docs/r/pi_vpn_connection.html.markdown
+++ b/website/docs/r/pi_vpn_connection.html.markdown
@@ -8,7 +8,7 @@ description: |-
 ---
 
 # ibm_pi_vpn_connection
-Create, update, or delete a VPN connection. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started).
+Create, update, delete a VPN connection. For more information, about IBM power virtual server cloud, see [getting started with IBM Power Systems Virtual Servers](https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-getting-started).
 
 ## Example usage
 The following example creates a VPN Connection.
diff --git a/website/docs/r/project.html.markdown b/website/docs/r/project.html.markdown
index b447a1d627..9db8f477ef 100644
--- a/website/docs/r/project.html.markdown
+++ b/website/docs/r/project.html.markdown
@@ -30,8 +30,8 @@ You can specify the following arguments for this resource.
 
 * `definition` - (Required, List) The definition of the project.
 Nested schema for **definition**:
-	* `description` - (Optional, String) A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.
-	  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
+	* `description` - (Required, String) A brief explanation of the project's use in the configuration of a deployable architecture. It is possible to create a project without providing a description.
+	  * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 	* `destroy_on_delete` - (Required, Boolean) The policy that indicates whether the resources are destroyed or not when a project is deleted.
 	* `name` - (Required, String) The name of the project.  It is unique within the account across regions.
 	  * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`.
@@ -52,9 +52,11 @@ Nested schema for **configs**:
 	* `definition` - (List) The name and description of a project configuration.
 	Nested schema for **definition**:
 		* `description` - (String) A project configuration description.
-		  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
+		  * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 		* `name` - (String) The configuration name. It is unique within the account across projects and regions.
 		  * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9][a-zA-Z0-9-_ ]*$/`.
+	* `deployment_model` - (String) The configuration type.
+	  * Constraints: Allowable values are: `project_deployed`, `user_deployed`.
 	* `href` - (String) A URL.
 	  * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`.
 	* `id` - (String) The ID of the configuration. If this parameter is empty, an ID is automatically created for the configuration.
@@ -73,7 +75,7 @@ Nested schema for **configs**:
 		* `id` - (String) The unique ID.
 		  * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`.
 	* `state` - (String) The state of the configuration.
-	  * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`.
+	  * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`, `applied`, `apply_failed`.
 	* `version` - (Integer) The version of the configuration.
 * `created_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
 * `crn` - (String) An IBM Cloud resource name, which uniquely identifies a resource.
@@ -97,7 +99,7 @@ Nested schema for **environments**:
 	* `definition` - (List) The environment definition used in the project collection.
 	Nested schema for **definition**:
 		* `description` - (String) The description of the environment.
-		  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
+		  * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 		* `name` - (String) The name of the environment.  It is unique within the account across projects and regions.
 		  * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`.
 	* `href` - (String) A URL.
@@ -118,6 +120,8 @@ Nested schema for **environments**:
 		  * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`.
 * `event_notifications_crn` - (String) The CRN of the event notifications instance if one is connected to this project.
   * Constraints: The maximum length is `512` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^crn:v[0-9](:([A-Za-z0-9\\-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}$/`.
+* `href` - (String) A URL.
+  * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`.
 * `resource_group_id` - (String) The resource group id where the project's data and tools are created.
   * Constraints: The maximum length is `64` characters. The minimum length is `0` characters. The value must match regular expression `/^[0-9a-zA-Z]+$/`.
 * `state` - (String) The project status value.
diff --git a/website/docs/r/project_config.html.markdown b/website/docs/r/project_config.html.markdown
index 86be477768..9459f221fa 100644
--- a/website/docs/r/project_config.html.markdown
+++ b/website/docs/r/project_config.html.markdown
@@ -23,7 +23,7 @@ resource "ibm_project_config" "project_config_instance" {
     }
     locator_id = "1082e7d2-5e2f-0a11-a3bc-f88a8e1931fc.145be7c1-9ec4-4719-b586-584ee52fbed0-global"
     inputs = {
-      name = "app_repo_name"
+      app_repo_name = "static-website-repo"
     }
   }
   project_id = ibm_project.project_instance.id
@@ -34,7 +34,7 @@ resource "ibm_project_config" "project_config_instance" {
 
 You can specify the following arguments for this resource.
 
-* `definition` - (Required, List) The name and description of a project configuration.
+* `definition` - (Required, List) 
 Nested schema for **definition**:
 	* `authorizations` - (Optional, List) The authorization details. You can authorize by using a trusted profile or an API key in Secrets Manager.
 	Nested schema for **authorizations**:
@@ -57,14 +57,16 @@ Nested schema for **definition**:
 		* `profile_name` - (Optional, String) The name of the compliance profile.
 		  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^`<>\\x00-\\x1F]*$/`.
 	* `description` - (Optional, String) A project configuration description.
-	  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
+	  * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 	* `environment_id` - (Optional, String) The ID of the project environment.
 	  * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`.
 	* `inputs` - (Optional, Map) The input variables for configuration definition and environment.
-	* `locator_id` - (Required, Forces new resource, String) A unique concatenation of catalogID.versionID that identifies the DA in the catalog. Either schematics.workspace_crn, definition.locator_id, or both must be specified.
+	* `locator_id` - (Optional, Forces new resource, String) A unique concatenation of catalogID.versionID that identifies the DA in the catalog. Either schematics.workspace_crn, definition.locator_id, or both must be specified.
 	  * Constraints: The maximum length is `512` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[\\.0-9a-z-A-Z_-]+$/`.
 	* `name` - (Required, String) The configuration name. It is unique within the account across projects and regions.
 	  * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^[a-zA-Z0-9][a-zA-Z0-9-_ ]*$/`.
+	* `resource_crns` - (Optional, List) The CRNs of resources associated with this configuration.
+	  * Constraints: The list items must match regular expression `/(?!\\s)(?!.*\\s$)^(crn)[^'"`<>{}\\s\\x00-\\x1F]*/`. The maximum length is `110` items. The minimum length is `0` items.
 	* `settings` - (Optional, Map) Schematics environment variables to use to deploy the configuration. Settings are only available if they were specified when the configuration was initially created.
 * `project_id` - (Required, Forces new resource, String) The unique project ID.
   * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`.
@@ -127,6 +129,8 @@ After your resource is created, you can read values from the listed arguments an
 
 * `id` - The unique identifier of the project_config.
 * `created_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
+* `href` - (String) A URL.
+  * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`.
 * `is_draft` - (Boolean) The flag that indicates whether the version of the configuration is draft, or active.
 * `last_saved_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
 * `modified_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
@@ -139,7 +143,7 @@ Nested schema for **outputs**:
 	  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 	* `name` - (String) The variable name.
 	  * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$).+$/`.
-	* `value` - (String) Can be any value - a string, number, boolean, array, or object.
+	* `value` - (Map) Can be any value - a string, number, boolean, array, or object.
 * `project` - (List) The project referenced by this resource.
 Nested schema for **project**:
 	* `crn` - (String) An IBM Cloud resource name, which uniquely identifies a resource.
@@ -155,7 +159,7 @@ Nested schema for **project**:
 * `project_config_id` - (String) The ID of the configuration. If this parameter is empty, an ID is automatically created for the configuration.
   * Constraints: The maximum length is `128` characters. The value must match regular expression `/^[\\.\\-0-9a-zA-Z]+$/`.
 * `state` - (String) The state of the configuration.
-  * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`.
+  * Constraints: Allowable values are: `approved`, `deleted`, `deleting`, `deleting_failed`, `discarded`, `draft`, `deployed`, `deploying_failed`, `deploying`, `superseded`, `undeploying`, `undeploying_failed`, `validated`, `validating`, `validating_failed`, `applied`, `apply_failed`.
 * `update_available` - (Boolean) The flag that indicates whether a configuration update is available.
 * `version` - (Integer) The version of the configuration.
 
diff --git a/website/docs/r/project_environment.html.markdown b/website/docs/r/project_environment.html.markdown
index 99299f2fd4..db8000b55d 100644
--- a/website/docs/r/project_environment.html.markdown
+++ b/website/docs/r/project_environment.html.markdown
@@ -53,7 +53,7 @@ Nested schema for **definition**:
 		* `profile_name` - (Optional, String) The name of the compliance profile.
 		  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^`<>\\x00-\\x1F]*$/`.
 	* `description` - (Optional, String) The description of the environment.
-	  * Constraints: The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
+	  * Constraints: The default value is ``. The maximum length is `1024` characters. The minimum length is `0` characters. The value must match regular expression `/^$|^(?!\\s)(?!.*\\s$)[^\\x00-\\x1F]*$/`.
 	* `inputs` - (Optional, Map) The input variables for configuration definition and environment.
 	* `name` - (Required, String) The name of the environment.  It is unique within the account across projects and regions.
 	  * Constraints: The maximum length is `128` characters. The minimum length is `1` character. The value must match regular expression `/^(?!\\s)(?!.*\\s$)[^'"`<>{}\\x00-\\x1F]+$/`.
@@ -66,6 +66,8 @@ After your resource is created, you can read values from the listed arguments an
 
 * `id` - The unique identifier of the project_environment.
 * `created_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
+* `href` - (String) A URL.
+  * Constraints: The maximum length is `256` characters. The minimum length is `1` character. The value must match regular expression `/^(http(s)?:\/\/)[a-zA-Z0-9\\$\\-_\\.+!\\*'\\(\\),=&?\/]+$/`.
 * `modified_at` - (String) A date and time value in the format YYYY-MM-DDTHH:mm:ssZ or YYYY-MM-DDTHH:mm:ss.sssZ, matching the date and time format as specified by RFC 3339.
 * `project` - (List) The project referenced by this resource.
 Nested schema for **project**:
diff --git a/website/docs/r/satellite_cluster.html.markdown b/website/docs/r/satellite_cluster.html.markdown
index b4d8a42038..37f8431980 100644
--- a/website/docs/r/satellite_cluster.html.markdown
+++ b/website/docs/r/satellite_cluster.html.markdown
@@ -33,6 +33,54 @@ resource "ibm_satellite_cluster" "create_cluster" {
 
 ```
 
+### Create satellite cluster with calico ip autodetection
+
+```terraform
+data "ibm_resource_group" "rg_cluster" {
+  name = var.resource_group
+}
+
+resource "ibm_satellite_cluster" "create_cluster" {
+  count = var.create_cluster ? 1 : 0
+
+  name                   = var.cluster
+  location               = var.location
+  resource_group_id      = data.ibm_resource_group.rg_cluster.id
+  enable_config_admin    = true
+  kube_version           = var.kube_version
+  wait_for_worker_update = (var.wait_for_worker_update ? var.wait_for_worker_update : true)
+  worker_count           = (var.worker_count != null ? var.worker_count : null)
+  host_labels            = (var.host_labels != null ? var.host_labels : null)
+  operating_system       = var.operating_system
+
+  dynamic "zones" {
+    for_each = (var.zones != null ? var.zones : null)
+    content {
+      id = zones.value
+    }
+  }
+
+  default_worker_pool_labels = (var.default_worker_pool_labels != null ? var.default_worker_pool_labels : null)
+  tags                       = (var.tags != null ? var.tags : null)
+  calico_ip_autodetection = (var.calico_ip_autodetection != null ? var.calico_ip_autodetection : null)
+
+  timeouts {
+    create = (var.create_timeout != null ? var.create_timeout : null)
+    update = (var.update_timeout != null ? var.update_timeout : null)
+    delete = (var.delete_timeout != null ? var.delete_timeout : null)
+  }
+
+}
+```
+
+Example value for `calico_ip_autodetection`:
+
+```terraform
+calico_ip_autodetection = {
+  "can-reach" = "www.ibm.com",
+}
+```
+
 ## Timeouts
 
 The `ibm_satellite_cluster` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options:
@@ -72,6 +120,7 @@ Review the argument references that you can specify for your resource.
 - `tags` - (Optional, Array of Strings) Tags associated with the container cluster instance.
 - `pod_subnet` - Specify a custom subnet CIDR to provide private IP addresses for pods. The subnet must be at least `/23` or larger. For more information, see [Configuring VPC subnets](https://cloud.ibm.com/docs/containers?topic=containers-vpc-subnets).
 - `service_subnet` -  Specify a custom subnet CIDR to provide private IP addresses for services. The subnet must be at least `/24` or larger. For more information, see [Configuring VPC subnets](https://cloud.ibm.com/docs/containers?topic=containers-vpc-subnets#vpc_basics).
+- `calico_ip_autodetection` - (Optional, Map) "Set IP autodetection to use correct interface for Calico, works only with RHCOS"
 
 
 ## Attributes reference
diff --git a/website/docs/r/satellite_location.html.markdown b/website/docs/r/satellite_location.html.markdown
index e5241a334a..7ccb830858 100644
--- a/website/docs/r/satellite_location.html.markdown
+++ b/website/docs/r/satellite_location.html.markdown
@@ -44,6 +44,23 @@ resource "ibm_satellite_location" "create_location" {
 }
 ```
 
+### Sample to create location and specify pod- and service subnet
+
+```terraform
+data "ibm_resource_group" "group" {
+    name = "Default"
+}
+
+resource "ibm_satellite_location" "create_location" {
+  location          = var.location
+  zones             = var.location_zones
+  managed_from      = var.managed_from
+  resource_group_id = data.ibm_resource_group.group.id
+  pod_subnet        = var.pod_subnet // "10.42.0.0/16"
+  service_subnet    = var.service_subnet // "192.168.42.0/24"
+}
+```
+
 ## Timeouts
 
 The `ibm_satellite_location` provides the following [Timeouts](https://www.terraform.io/docs/language/resources/syntax.html) configuration options:
@@ -74,6 +91,8 @@ Review the argument references that you can specify for your resource.
 - `logging_account_id` - (Optional, String) The account ID for IBM Log Analysis with LogDNA log forwarding.
 - `managed_from` - (Required, String) The IBM Cloud regions that you can choose from to manage your Satellite location. To list available multizone regions, run `ibmcloud ks locations`. For more information, refer to [supported IBM Cloud locations](https://cloud.ibm.com/docs/satellite?topic=satellite-sat-regions).
 - `zones`- Array of Strings - Optional- The names for the host zones. For high availability, allocate your hosts across these three zones based on your infrastructure provider zones. For example, `us-east-1`, `us-east-2`, `us-east-3` .
+- `service_subnet` - (String) Custom subnet CIDR to provide private IP addresses for services
+- `pod_subnet` - (String) Custom subnet CIDR to provide private IP addresses for pods
 
 
 ## Attribute reference
diff --git a/website/docs/r/scc_instance.html.markdown b/website/docs/r/scc_instance.html.markdown
new file mode 100644
index 0000000000..1347256fb6
--- /dev/null
+++ b/website/docs/r/scc_instance.html.markdown
@@ -0,0 +1,41 @@
+---
+layout: "ibm"
+page_title: "IBM : ibm_scc_instance"
+description: |-
+  Manages scc_instance.
+subcategory: "Security and Compliance Center"
+---
+
+# ibm_scc_instance
+
+Create, update, and delete scc_instance with this resource.
+
+~> NOTE: This document details how to use the resource `ibm_resource_instance` targeting the service `Security and Compliance Center`. For more information about the Terraform resource `ibm_resource_instance`, click [here](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/resources/resource_instance)
+
+## Example Usage
+
+```hcl
+data "ibm_resource_group" "group" {
+  name = "test"
+}
+
+resource "ibm_resource_instance" "scc_instance" {
+  name              = "test"
+  service           = "compliance"
+  plan              = "security-compliance-center-standard-plan" # also support security-compliance-center-trial-plan
+  location          = "us-south"
+  resource_group_id = data.ibm_resource_group.group.id
+  tags              = ["tag1", "tag2"]
+}
+```
+
+## Argument reference
+Review the argument references that you can specify for your resource. 
+
+- `location` - (Required, Forces new resource, String) Target location or environment to create the resource instance.
+- `plan` - (Required, String) The name of the plan type supported by service. You can retrieve the value by running the `ibmcloud catalog service ` command.
+- `name` - (Required, String) A descriptive name used to identify the resource instance.
+- `resource_group_id` - (Optional, Forces new resource, String) The ID of the resource group where you want to create the service. You can retrieve the value from data source `ibm_resource_group`. If not provided creates the service in `default` resource group.
+- `tags` (Optional, Array of Strings) Tags associated with the instance.
+- `service` - (Required, Forces new resource, String) The name of the service offering.
+
diff --git a/website/docs/r/scc_instance_settings.html.markdown b/website/docs/r/scc_instance_settings.html.markdown
new file mode 100644
index 0000000000..98bd28df3b
--- /dev/null
+++ b/website/docs/r/scc_instance_settings.html.markdown
@@ -0,0 +1,65 @@
+---
+layout: "ibm"
+page_title: "IBM : ibm_scc_instance_settings"
+description: |-
+  Manages scc_instance_settings.
+subcategory: "Security and Compliance Center"
+---
+
+# ibm_scc_instance_settings
+
+Create, update, and delete scc_instance_settingss with this resource.
+
+## Example Usage
+
+```hcl
+resource "ibm_scc_instance_settings" "scc_instance_settings_instance" {
+  instance_id = "00000000-1111-2222-3333-444444444444"
+  event_notifications {
+		instance_crn = ""
+  }
+  object_storage {
+		instance_crn = ""
+		bucket = ""
+  }
+}
+```
+
+## Argument Reference
+
+You can specify the following arguments for this resource.
+
+* `instance_id` - (Required, Forces new resource, String) The ID of the SCC instance in a particular region.
+* `event_notifications` - (Optional, List) The Event Notifications settings.
+Nested schema for **event_notifications**:
+	* `instance_crn` - (Optional, String) The Event Notifications instance CRN.
+	  * Constraints: The maximum length is `512` characters. The minimum length is `0` characters. The value must match regular expression `/^crn:v[0-9](:([A-Za-z0-9-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}|$/`.
+	* `source_id` - (Computed, String) The connected Security and Compliance Center instance CRN.
+	  * Constraints: The maximum length is `512` characters. The minimum length is `1` character. The value must match regular expression `/([A-Za-z0-9]+(:[A-Za-z0-9]+)+)/`.
+	* `updated_on` - (Optional, String) The date when the Event Notifications connection was updated.
+* `object_storage` - (Optional, List) The Cloud Object Storage settings.
+Nested schema for **object_storage**:
+	* `bucket` - (Optional, String) The connected Cloud Object Storage bucket name.
+	  * Constraints: The maximum length is `64` characters. The minimum length is `0` characters. The value must match regular expression `/[A-Za-z]+|/`.
+	* `bucket_endpoint` - (Computed, String) The connected Cloud Object Storage bucket endpoint.
+	  * Constraints: The maximum length is `512` characters. The minimum length is `1` character. The value must match regular expression `/([A-Za-z0-9-]+)/`.
+	* `bucket_location` - (Computed, String) The connected Cloud Object Storage bucket location.
+	  * Constraints: The maximum length is `32` characters. The minimum length is `0` characters. The value must match regular expression `/[A-Za-z]+/`.
+	* `instance_crn` - (Optional, String) The connected Cloud Object Storage instance CRN.
+	  * Constraints: The maximum length is `512` characters. The minimum length is `0` characters. The value must match regular expression `/^crn:v[0-9](:([A-Za-z0-9-._~!$&'()*+,;=@\/]|%[0-9A-Z]{2})*){8}|$/`.
+	* `updated_on` - (Computed, String) The date when the bucket connection was updated.
+
+## Attribute Reference
+
+After your resource is created, you can read values from the listed arguments and the following attributes.
+
+* `id` - The unique identifier of the scc_instance_settings.
+
+## Import
+
+You can import the `ibm_scc_instance_settings` resource by using `instance_id`. The unique identifier of the scc_instance_settings.
+
+# Syntax
+```
+$ terraform import ibm_scc_instance_settings.scc_instance_settings 
+```
diff --git a/website/docs/r/schematics_agent.html.markdown b/website/docs/r/schematics_agent.html.markdown
index 4b04b38e6a..802bc9aa2d 100644
--- a/website/docs/r/schematics_agent.html.markdown
+++ b/website/docs/r/schematics_agent.html.markdown
@@ -33,7 +33,8 @@ resource "ibm_schematics_agent" "schematics_agent_instance" {
   name = "MyDevAgent"
   resource_group = "Default"
   schematics_location = "us-south"
-  version = "1.0.0-beta2"
+  tags = ["agent-MyDevAgent"]
+  version = "1.0.0"
 }
 ```
 
@@ -50,15 +51,6 @@ Nested scheme for **agent_infrastructure**:
 	* `cos_instance_name` - (Optional, String) The COS instance name to store the agent logs.
 	* `infra_type` - (Optional, String) Type of target agent infrastructure.
 	  * Constraints: Allowable values are: `ibm_kubernetes`, `ibm_openshift`, `ibm_satellite`.
-* `agent_kpi` - (Optional, List) Schematics Agent key performance indicators.
-Nested scheme for **agent_kpi**:
-	* `application_indicators` - (Optional, List) Agent application key performance indicators.
-	* `availability_indicator` - (Optional, String) Overall availability indicator reported by the agent.
-	  * Constraints: Allowable values are: `available`, `unavailable`, `error`.
-	* `infra_indicators` - (Optional, List) Agent infrastructure key performance indicators.
-	* `lifecycle_indicator` - (Optional, String) Overall lifecycle indicator reported by the agents.
-	  * Constraints: Allowable values are: `consistent`, `inconsistent`, `obselete`.
-	* `percent_usage_indicator` - (Optional, String) Percentage usage of the agent resources.
 * `agent_location` - (Required, String) The location where agent is deployed in the user environment.
 * `agent_metadata` - (Optional, List) The metadata of an agent.
 Nested scheme for **agent_metadata**:
@@ -76,7 +68,7 @@ Nested scheme for **user_state**:
 	* `set_by` - (Computed, String) Name of the User who set the state of the Object.
 	* `state` - (Optional, String) User-defined states  * `enable`  Agent is enabled by the user.  * `disable` Agent is disbaled by the user.
 	  * Constraints: Allowable values are: `enable`, `disable`.
-* `version` - (Required, String) Agent version. Available Versions are 1.0.0-beta1, 1.0.0-beta2
+* `version` - (Required, String) Agent version.
 
 ## Attribute Reference
 
@@ -84,6 +76,15 @@ In addition to all argument references listed, you can access the following attr
 
 * `id` - The unique identifier of the schematics_agent.
 * `agent_crn` - (String) The agent crn, obtained from the Schematics agent deployment configuration.
+* `agent_kpi` - (List) Schematics Agent key performance indicators.
+Nested scheme for **agent_kpi**:
+	* `application_indicators` - (List) Agent application key performance indicators.
+	* `availability_indicator` - (String) Overall availability indicator reported by the agent.
+	  * Constraints: Allowable values are: `available`, `unavailable`, `error`.
+	* `infra_indicators` - (List) Agent infrastructure key performance indicators.
+	* `lifecycle_indicator` - (String) Overall lifecycle indicator reported by the agents.
+	  * Constraints: Allowable values are: `consistent`, `inconsistent`, `obselete`.
+	* `percent_usage_indicator` - (String) Percentage usage of the agent resources.
 * `created_at` - (String) The agent creation date-time.
 * `creation_by` - (String) The email address of an user who created the agent.
 * `recent_deploy_job` - (List) Post-installations checks for Agent health.
diff --git a/website/docs/r/sm_iam_credentials_secret.html.markdown b/website/docs/r/sm_iam_credentials_secret.html.markdown
index 2f3e78f8f6..3c108eb82c 100644
--- a/website/docs/r/sm_iam_credentials_secret.html.markdown
+++ b/website/docs/r/sm_iam_credentials_secret.html.markdown
@@ -55,7 +55,6 @@ Nested scheme for **rotation**:
 	* `auto_rotate` - (Optional, Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval.
 	* `interval` - (Optional, Integer) The length of the secret rotation time interval.
 	  * Constraints: The minimum value is `1`.
-	* `rotate_keys` - (Optional, Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.
 	* `unit` - (Optional, String) The units for the secret rotation time interval.
 	  * Constraints: Allowable values are: `day`, `month`.
 * `secret_group_id` - (Optional, Forces new resource, String) A v4 UUID identifier, or `default` secret group.
diff --git a/website/docs/r/sm_private_certificate.html.markdown b/website/docs/r/sm_private_certificate.html.markdown
index 64f1bb83b7..98c1d2c122 100644
--- a/website/docs/r/sm_private_certificate.html.markdown
+++ b/website/docs/r/sm_private_certificate.html.markdown
@@ -56,7 +56,6 @@ Nested scheme for **rotation**:
     * `auto_rotate` - (Optional, Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval.
     * `interval` - (Optional, Integer) The length of the secret rotation time interval.
       * Constraints: The minimum value is `1`.
-    * `rotate_keys` - (Optional, Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.
     * `unit` - (Optional, String) The units for the secret rotation time interval.
       * Constraints: Allowable values are: `day`, `month`.
 * `secret_group_id` - (Optional, Forces new resource, String) A v4 UUID identifier, or `default` secret group.
diff --git a/website/docs/r/sm_private_certificate_configuration_template.html.markdown b/website/docs/r/sm_private_certificate_configuration_template.html.markdown
index d34b20e904..00f6d033fd 100644
--- a/website/docs/r/sm_private_certificate_configuration_template.html.markdown
+++ b/website/docs/r/sm_private_certificate_configuration_template.html.markdown
@@ -67,6 +67,7 @@ Review the argument reference that you can specify for your resource.
   * Constraints: The list items must match regular expression `/^[a-zA-Z]+$/`. The maximum length is `100` items. The minimum length is `0` items.
 * `locality` - (Optional, Forces new resource, List) The Locality (L) values to define in the subject field of the resulting certificate.
   * Constraints: The list items must match regular expression `/(.*?)/`. The maximum length is `10` items. The minimum length is `0` items.
+* `max_ttl` - (Optional, String) The maximum time-to-live (TTL) for certificates that are created by this template.
 * `name` - (Required, String) A human-readable unique name to assign to your configuration.
 * `organization` - (Optional, Forces new resource, List) The Organization (O) values to define in the subject field of the resulting certificate.
   * Constraints: The list items must match regular expression `/(.*?)/`. The maximum length is `10` items. The minimum length is `0` items.
@@ -84,6 +85,7 @@ Review the argument reference that you can specify for your resource.
   * Constraints: The maximum length is `64` characters. The minimum length is `32` characters. The value must match regular expression `/[^a-fA-F0-9]/`.
 * `street_address` - (Optional, Forces new resource, List) The street address values to define in the subject field of the resulting certificate.
   * Constraints: The list items must match regular expression `/(.*?)/`. The maximum length is `10` items. The minimum length is `0` items.
+* `ttl` - The requested time-to-live (TTL) for certificates that are created by this template. This field's value can't be longer than the max_ttl limit.
 * `use_csr_common_name` - (Optional, Boolean) When used with the `private_cert_configuration_action_sign_csr` action, this field determines whether to use the common name (CN) from a certificate signing request (CSR) instead of the CN that's included in the data of the certificate.Does not include any requested Subject Alternative Names (SANs) in the CSR. To use the alternative names, include the `use_csr_sans` property.
 * `use_csr_sans` - (Optional, Boolean) When used with the `private_cert_configuration_action_sign_csr` action, this field determines whether to use the Subject Alternative Names(SANs) from a certificate signing request (CSR) instead of the SANs that are included in the data of the certificate.Does not include the common name in the CSR. To use the common name, include the `use_csr_common_name` property.
 
diff --git a/website/docs/r/sm_public_certificate_configuration_ca_lets_encrypt.html.markdown b/website/docs/r/sm_public_certificate_configuration_ca_lets_encrypt.html.markdown
index 6c8cebb18c..e1e9cc1af3 100644
--- a/website/docs/r/sm_public_certificate_configuration_ca_lets_encrypt.html.markdown
+++ b/website/docs/r/sm_public_certificate_configuration_ca_lets_encrypt.html.markdown
@@ -32,8 +32,8 @@ Review the argument reference that you can specify for your resource.
   * Constraints: Allowable values are: `private`, `public`.
 * `lets_encrypt_environment` - (Required, String) The configuration of the Let's Encrypt CA environment.
   * Constraints: Allowable values are: `production`, `staging`.
-* `lets_encrypt_preferred_chain` - (Optional, String) Prefer the chain with an issuer matching this Subject Common Name.
-  * Constraints: The maximum length is `30` characters. The minimum length is `2` characters. The value must match regular expression `/(.*?)/`.
+* `lets_encrypt_preferred_chain` - (Optional, String) This field supports only the chains that Let's Encrypt provides. Keep empty to use the default or supply a valid Let's Encrypt-provided value. For a list of supported chains, see: https://letsencrypt.org/certificates/.
+  * Constraints: The value must match regular expression `/(.*?)/`.
 * `lets_encrypt_private_key` - (Required, String) The PEM encoded private key of your Lets Encrypt account.
   * Constraints: The maximum length is `100000` characters. The minimum length is `50` characters. The value must match regular expression `/(^-----BEGIN PRIVATE KEY-----.*?)/`.
 * `name` - (Required, String) A human-readable unique name to assign to your configuration.
diff --git a/website/docs/r/sm_service_credentials_secret.html.markdown b/website/docs/r/sm_service_credentials_secret.html.markdown
index cdf60c3e9f..12b43a166b 100644
--- a/website/docs/r/sm_service_credentials_secret.html.markdown
+++ b/website/docs/r/sm_service_credentials_secret.html.markdown
@@ -43,6 +43,7 @@ resource "ibm_sm_service_credentials_secret" "sm_service_credentials_secret" {
 
 ```terraform
 resource "ibm_sm_service_credentials_secret" "sm_service_credentials_secret" {
+  instance_id   = ibm_resource_instance.sm_instance.guid
   region        = "us-south"
   name 			= "secret-name"
   source_service {
@@ -89,11 +90,11 @@ Nested scheme for **rotation**:
 	  * Constraints: Allowable values are: `day`, `month`.
 * `secret_group_id` - (Optional, Forces new resource, String) A v4 UUID identifier, or `default` secret group.
   * Constraints: The maximum length is `36` characters. The minimum length is `7` characters. The value must match regular expression `/^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}|default)$/`.
-* `source_service` - (Optional, List) The properties required for creating the service credentials for the specified source service instance.
+* `source_service` - (Required, List) The properties required for creating the service credentials for the specified source service instance.
 Nested scheme for **source_service**:
-    * `instance` - (Optional, List) The source service instance identifier.
+    * `instance` - (Required, List) The source service instance identifier.
       Nested scheme for **instance**:
-          * `crn` - (Optional, String) A CRN that uniquely identifies a service credentials source.
+          * `crn` - (Required, String) A CRN that uniquely identifies a service credentials source.
     * `role` - (Optional, List) The service-specific custom role object, CRN role is accepted. Refer to the service’s documentation for supported roles.
       Nested scheme for **role**:
           * `crn` - (Optional, String) The service role CRN.
diff --git a/website/docs/r/sm_username_password_secret.html.markdown b/website/docs/r/sm_username_password_secret.html.markdown
index b8a07c3af0..fdb4605f15 100644
--- a/website/docs/r/sm_username_password_secret.html.markdown
+++ b/website/docs/r/sm_username_password_secret.html.markdown
@@ -55,7 +55,6 @@ Nested scheme for **rotation**:
 	* `auto_rotate` - (Optional, Boolean) Determines whether Secrets Manager rotates your secret automatically.Default is `false`. If `auto_rotate` is set to `true` the service rotates your secret based on the defined interval.
 	* `interval` - (Optional, Integer) The length of the secret rotation time interval.
 	  * Constraints: The minimum value is `1`.
-	* `rotate_keys` - (Optional, Boolean) Determines whether Secrets Manager rotates the private key for your public certificate automatically.Default is `false`. If it is set to `true`, the service generates and stores a new private key for your rotated certificate.
 	* `unit` - (Optional, String) The units for the secret rotation time interval.
 	  * Constraints: Allowable values are: `day`, `month`.
 * `secret_group_id` - (Optional, Forces new resource, String) A v4 UUID identifier, or `default` secret group.