From f5c220fa6ad3296346a5d7b83c2e3b9235d2c695 Mon Sep 17 00:00:00 2001 From: shiyuhang0 <52435083+shiyuhang0@users.noreply.github.com> Date: Fri, 16 Sep 2022 15:56:17 +0800 Subject: [PATCH] Implement terraform-provider-tidbcloud (#4) Co-authored-by: Xiang Zhang --- .github/CODEOWNERS | 1 - .github/CODE_OF_CONDUCT.md | 5 - .github/ISSUE_TEMPLATE/bug_report.md | 23 + .github/ISSUE_TEMPLATE/feature_request.md | 18 + .github/dependabot.yml | 6 +- .github/pull_request_template.md | 27 + .github/workflows/add-content-to-project.yml | 40 - .github/workflows/test.yml | 4 + LICENSE | 574 +++---- Notice | 39 + README.md | 1399 ++++++++++++++++- docs/data-sources/backup.md | 73 + docs/data-sources/cluster_spec.md | 130 ++ docs/data-sources/project.md | 65 + docs/data-sources/restore.md | 55 + docs/data-sources/scaffolding_example.md | 32 - docs/index.md | 12 +- docs/resources/backup.md | 58 + docs/resources/cluster.md | 166 ++ docs/resources/restore.md | 163 ++ docs/resources/scaffolding_example.md | 32 - .../scaffolding_example/data-source.tf | 3 - .../tidbcloud_backup/data-source.tf | 23 + .../tidbcloud_cluster_spec/data-source.tf | 19 + .../tidbcloud_project/data-source.tf | 21 + .../tidbcloud_restore/data_source.tf | 20 + examples/provider/provider.tf | 7 +- .../resources/scaffolding_example/resource.tf | 3 - .../resources/tidbcloud_backup/resource.tf | 19 + .../resources/tidbcloud_cluster/resource.tf | 55 + .../resources/tidbcloud_restore/resource.tf | 38 + go.mod | 14 +- go.sum | 71 +- internal/README.md | 57 + internal/provider/backup_data_source.go | 173 ++ internal/provider/backup_resource.go | 225 +++ internal/provider/cluster_resource.go | 626 ++++++++ internal/provider/cluster_spec_data_source.go | 296 ++++ internal/provider/example_data_source.go | 85 - internal/provider/example_resource.go | 157 -- internal/provider/example_resource_test.go | 52 - internal/provider/project_data_source.go | 155 ++ internal/provider/provider.go | 117 +- internal/provider/provider_test.go | 22 - internal/provider/restore_data_source.go | 201 +++ internal/provider/restore_resource.go | 389 +++++ .../testmanually/cluster_update_test.md | 138 ++ .../testmanually/restore_resource_test.go | 77 + .../backup_data_source_test.go | 31 + .../testwithcluster/backup_resource_test.go | 68 + .../provider/testwithcluster/basic_test.go | 38 + .../testwithproject/cluster_resource_test.go | 114 ++ .../cluster_spec_data_source_test.go} | 13 +- .../project_data_source_test.go | 28 + .../provider/testwithproject/provider_test.go | 39 + .../restore_data_source_test.go | 30 + main.go | 10 +- tidbcloud/api.go | 196 +++ tidbcloud/client.go | 81 + tidbcloud/type.go | 192 +++ 60 files changed, 5886 insertions(+), 939 deletions(-) delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/CODE_OF_CONDUCT.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/pull_request_template.md delete mode 100644 .github/workflows/add-content-to-project.yml create mode 100644 Notice create mode 100644 docs/data-sources/backup.md create mode 100644 docs/data-sources/cluster_spec.md create mode 100644 docs/data-sources/project.md create mode 100644 docs/data-sources/restore.md delete mode 100644 docs/data-sources/scaffolding_example.md create mode 100644 docs/resources/backup.md create mode 100644 docs/resources/cluster.md create mode 100644 docs/resources/restore.md delete mode 100644 docs/resources/scaffolding_example.md delete mode 100644 examples/data-sources/scaffolding_example/data-source.tf create mode 100644 examples/data-sources/tidbcloud_backup/data-source.tf create mode 100644 examples/data-sources/tidbcloud_cluster_spec/data-source.tf create mode 100644 examples/data-sources/tidbcloud_project/data-source.tf create mode 100644 examples/data-sources/tidbcloud_restore/data_source.tf delete mode 100644 examples/resources/scaffolding_example/resource.tf create mode 100644 examples/resources/tidbcloud_backup/resource.tf create mode 100644 examples/resources/tidbcloud_cluster/resource.tf create mode 100644 examples/resources/tidbcloud_restore/resource.tf create mode 100644 internal/README.md create mode 100644 internal/provider/backup_data_source.go create mode 100644 internal/provider/backup_resource.go create mode 100644 internal/provider/cluster_resource.go create mode 100644 internal/provider/cluster_spec_data_source.go delete mode 100644 internal/provider/example_data_source.go delete mode 100644 internal/provider/example_resource.go delete mode 100644 internal/provider/example_resource_test.go create mode 100644 internal/provider/project_data_source.go delete mode 100644 internal/provider/provider_test.go create mode 100644 internal/provider/restore_data_source.go create mode 100644 internal/provider/restore_resource.go create mode 100644 internal/provider/testmanually/cluster_update_test.md create mode 100644 internal/provider/testmanually/restore_resource_test.go create mode 100644 internal/provider/testwithcluster/backup_data_source_test.go create mode 100644 internal/provider/testwithcluster/backup_resource_test.go create mode 100644 internal/provider/testwithcluster/basic_test.go create mode 100644 internal/provider/testwithproject/cluster_resource_test.go rename internal/provider/{example_data_source_test.go => testwithproject/cluster_spec_data_source_test.go} (54%) create mode 100644 internal/provider/testwithproject/project_data_source_test.go create mode 100644 internal/provider/testwithproject/provider_test.go create mode 100644 internal/provider/testwithproject/restore_data_source_test.go create mode 100644 tidbcloud/api.go create mode 100644 tidbcloud/client.go create mode 100644 tidbcloud/type.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 922ee27..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @hashicorp/terraform-devex diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 0c8b092..0000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -HashiCorp Community Guidelines apply to you when interacting with the community here on GitHub and contributing code. - -Please read the full text at https://www.hashicorp.com/community-guidelines diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..0e09eae --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,23 @@ +--- +name: "\U0001F41B Bug Report" +about: Create a report to help us improve +title: "[BUG] Title of Bug Report" +labels: type/bug +--- + +### Terraform CLI and Terraform TiDB Cloud Provider Version + + +### Describe the bug + + +### Steps to Reproduce + \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..ec63fb3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: "\U0001F947 Feature Request" +about: Suggest an idea for this project +title: '' +labels: type/feature-request +--- + +### Is your feature request related to a problem? Please describe + + +### Describe the solution you'd like + + +### Describe alternatives you've considered + + +### Additional context + \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 87fdf72..629d1fb 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,8 +5,10 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: weekly + day: tuesday - package-ecosystem: "gomod" directory: "/" schedule: - interval: "daily" + interval: weekly + day: tuesday diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..28099ca --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,27 @@ +### What problem does this PR solve? + + +### What is changed and how it works? + + +### Check List + +Tests + +- Unit test +- Integration test +- Manual test (add detailed scripts or steps below) +- No code + +Side effects + +- Possible performance regression +- Increased code complexity +- Breaking backward compatibility + +Related changes + +- Need to cherry-pick to the release branch +- Need to update the documentation +- Need to update the `tidb-ansible` repository +- Need to be included in the release note diff --git a/.github/workflows/add-content-to-project.yml b/.github/workflows/add-content-to-project.yml deleted file mode 100644 index 29015ec..0000000 --- a/.github/workflows/add-content-to-project.yml +++ /dev/null @@ -1,40 +0,0 @@ -# Based on https://github.com/leonsteinhaeuser/project-beta-automations - -name: "Add Issues/PRs to TF Provider DevEx team board" - -on: - issues: - types: [opened, reopened] - pull_request_target: - # NOTE: The way content is added to project board is equivalent to an "upsert". - # Calling it multiple times will be idempotent. - # - # See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ - # to see the reasoning behind using `pull_request_target` instead of `pull_request` - types: [opened, reopened, ready_for_review] - -jobs: - add-content-to-project: - name: "Add Content to project" - runs-on: ubuntu-latest - steps: - - name: "Set Issue to 'Priority = Triage Next'" - uses: leonsteinhaeuser/project-beta-automations@v1.3.0 - if: github.event_name == 'issues' - with: - gh_token: ${{ secrets.TF_DEVEX_PROJECT_GITHUB_TOKEN }} - organization: "hashicorp" - project_id: 99 #< https://github.com/orgs/hashicorp/projects/99 - resource_node_id: ${{ github.event.issue.node_id }} - operation_mode: custom_field - custom_field_values: '[{\"name\":\"Priority\",\"type\":\"single_select\",\"value\":\"Triage Next\"}]' - - name: "Set Pull Request to 'Priority = Triage Next'" - uses: leonsteinhaeuser/project-beta-automations@v1.3.0 - if: github.event_name == 'pull_request_target' - with: - gh_token: ${{ secrets.TF_DEVEX_PROJECT_GITHUB_TOKEN }} - organization: "hashicorp" - project_id: 99 #< https://github.com/orgs/hashicorp/projects/99 - resource_node_id: ${{ github.event.pull_request.node_id }} - operation_mode: custom_field - custom_field_values: '[{\"name\":\"Priority\",\"type\":\"single_select\",\"value\":\"Triage Next\"}]' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index afe5cdd..6dec9a0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -5,9 +5,13 @@ name: Tests # Optionally, you can turn it on using a schedule for regular testing. on: pull_request: + branches: + - main paths-ignore: - 'README.md' push: + branches: + - main paths-ignore: - 'README.md' diff --git a/LICENSE b/LICENSE index a612ad9..261eeb9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,373 +1,201 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Notice b/Notice new file mode 100644 index 0000000..da90f32 --- /dev/null +++ b/Notice @@ -0,0 +1,39 @@ +// ------------------------------------------------------------------ +// NOTICE file corresponding to the section 4d of The Apache License, +// Version 2.0, in this case for +// ------------------------------------------------------------------ + +digest + Revision: 9fb4663eab3efe9542bde3773ec5435fb89e0dd9 + Address: https://github.com/icholy/digest + License: https://github.com/icholy/digest/blob/master/LICENSE + +resty + Revision: 3d08b3602d13ae8a44f246db2ec34402e18497c6 + Address: https://github.com/go-resty/resty + License: https://github.com/go-resty/resty/blob/master/LICENSE + +terraform-plugin-docs + Revision: ff6f844120050d19da921512bc94ee5b46d17b71 + Address: https://github.com/hashicorp/terraform-plugin-docs + License: https://github.com/hashicorp/terraform-plugin-docs/blob/main/LICENSE + +terraform-plugin-framework + Revision: 330a87f68e5c8fdbf2224e3111f9a430d26d9fb6 + Address: https://github.com/hashicorp/terraform-plugin-framework + License: https://github.com/hashicorp/terraform-plugin-framework/blob/main/LICENSE + +terraform-plugin-go + Revision: 1d4cacead6fb6e7f85810f94451c38baa2345ea2 + Address: https://github.com/hashicorp/terraform-plugin-go + License: https://github.com/hashicorp/terraform-plugin-go/blob/main/LICENSE + +terraform-plugin-log + Revision: 2a7a4a85e924b5cefe5189bd515220a207d90e30 + Address: https://github.com/hashicorp/terraform-plugin-log + License: https://github.com/hashicorp/terraform-plugin-log/blob/main/LICENSE + +terraform-plugin-sdk + Revision: a21509dacde9d30c1683b66b32d1ee0be1619c3d + Address: https://github.com/hashicorp/terraform-plugin-sdk + License: https://github.com/hashicorp/terraform-plugin-sdk/blob/main/LICENSE diff --git a/README.md b/README.md index da58802..0f79de8 100644 --- a/README.md +++ b/README.md @@ -1,64 +1,1403 @@ -# Terraform Provider Scaffolding (Terraform Plugin Framework) +# Terraform TiDB Cloud Provider -_This template repository is built on the [Terraform Plugin Framework](https://github.com/hashicorp/terraform-plugin-framework). The template repository built on the [Terraform Plugin SDK](https://github.com/hashicorp/terraform-plugin-sdk) can be found at [terraform-provider-scaffolding](https://github.com/hashicorp/terraform-provider-scaffolding). See [Which SDK Should I Use?](https://www.terraform.io/docs/plugin/which-sdk.html) in the Terraform documentation for additional information._ +[![License](https://img.shields.io/github/license/tidbcloud/terraform-provider-tidbcloud.svg)](https://github.com/tidbcloud/terraform-provider-tidbcloud/blob/master/LICENSE) -This repository is a *template* for a [Terraform](https://www.terraform.io) provider. It is intended as a starting point for creating Terraform providers, containing: +This is the repository for the terraform-provider-tidbcloud, which allows one to use Terraform with TiDB Cloud. Learn more about [TiDB Cloud](https://en.pingcap.com/tidb-cloud/) -- A resource and a data source (`internal/provider/`), -- Examples (`examples/`) and generated documentation (`docs/`), -- Miscellaneous meta files. +For general information about Terraform, visit the [official website](https://www.terraform.io) and the [GitHub project page](https://github.com/hashicorp/terraform). -These files contain boilerplate code that you will need to edit to create your own Terraform provider. Tutorials for creating Terraform providers can be found on the [HashiCorp Learn](https://learn.hashicorp.com/collections/terraform/providers) platform. _Terraform Plugin Framework specific guides are titled accordingly._ +## TOC -Please see the [GitHub template repository documentation](https://help.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-from-a-template) for how to create a new repository from this template on GitHub. +- [Requirements](#requirements) +- [Support](#support) +- [Using the provider](#using-the-provider) + * [Set up](#set-up) + * [Create an API key](#create-an-api-key) + * [Get TiDB Cloud provider](#get-tidb-cloud-provider) + * [Config the provider](#config-the-provider) + * [Get projectId with project Data Source](#get-projectid-with-project-data-source) + * [Get cluster spec info with cluster-spec Data Source](#get-cluster-spec-info-with-cluster-spec-data-source) + * [Create a dedicated cluster with cluster resource](#create-a-dedicated-cluster-with-cluster-resource) + * [Change the dedicated cluster](#change-the-dedicated-cluster) + * [Create a backup with backup resource](#create-a-backup-with-backup-resource) + * [Create a restore task with restore resource](#create-a-restore-task-with-restore-resource) + * [Importing the restore cluster](#importing-the-restore-cluster) + * [Destroy the dedicated cluster](#destroy-the-dedicated-cluster) +- [Developing the Provider](#developing-the-provider) + * [Environment](#environment) + * [Building the provider from source](#building-the-provider-from-source) + * [Generate or update documentation in docs file](#generate-or-update-documentation-in-docs-file) + * [Running the acceptance test](#running-the-acceptance-test) + * [Debug the provider](#debug-the-provider) +- [Follow us](#follow-us) + * [Twitter](#twitter) +- [License](#license) -Once you've written your provider, you'll want to [publish it on the Terraform Registry](https://www.terraform.io/docs/registry/providers/publishing.html) so that others can use it. ## Requirements - [Terraform](https://www.terraform.io/downloads.html) >= 1.0 -- [Go](https://golang.org/doc/install) >= 1.17 +- [Go](https://golang.org/doc/install) >= 1.18 (if you want to build the provider plugin) -## Building The Provider +## Support -1. Clone the repository -1. Enter the repository directory -1. Build the provider using the Go `install` command: +Resources +- [cluster](./docs/resources/cluster.md) +- [backup](./docs/resources/backup.md) (not support update) +- [restore](./docs/resources/restore.md) (not support update and delete) + +DataSource +- [project](./docs/data-sources/project.md) +- [cluster spec](./docs/data-sources/cluster_spec.md) +- [restore](./docs/data-sources/restore.md) +- [backcup](./docs/data-sources/backup.md) + + +## Using the provider + +Documentation about the provider usage and the corresponding specific configuration options can be found on the [official's website](https://www.terraform.io/language/providers). + +Here we just give an example to show how to use the TiDB Cloud provider. + +In this example, you will create and manage a dedicated cluster, create a backup for it and restore from the backup. + +### Set up + +TiDB Cloud provider has released to terraform registry. All you need to do is install terraform (>=1.0). + +For Mac user, you can install it with Homebrew. +First, install the HashiCorp tap, a repository of all our Homebrew packages. ```shell -go install +brew tap hashicorp/tap +``` +Now, install Terraform with hashicorp/tap/terraform. +```shell +brew install hashicorp/tap/terraform +``` + +See [official doc](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/aws-get-started) for other installation methods. + +### Create an API key + +The TiDB Cloud API uses HTTP Digest Authentication. It protects your private key from being sent over the network. + +However, terraform-provider-tidbcloud does not support managing API key now. So you need to create the API key in the [console](https://tidbcloud.com/console/clusters). + +Turn to [TiDB Cloud API doc](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication/API-Key-Management) for help if you meet any problems. + +### Get TiDB Cloud provider + +Create a main.tf file to get the TiDB Cloud provider: + +``` +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + version = "~> 0.0.1" + } + } + required_version = ">= 1.0.0" +} ``` +- The `source` attribute defines the provider which will be downloaded from [Terraform Registry](https://registry.terraform.io/) by default +- The `version` attribute is optional which defines the version of the provider, it will use the latest version by default +- The `required_version` is optional which defines the version of the terraform, it will use the latest version by default -## Adding Dependencies +### Config the provider + +You need to config the provider like: + +``` +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + version = "~> 0.0.1" + } + } + required_version = ">= 1.0.0" +} -This provider uses [Go modules](https://github.com/golang/go/wiki/Modules). -Please see the Go documentation for the most up to date information about using Go modules. +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} +``` + +username and password are the API key's public key and private key, you can also pass them with the environment: + +``` +export TIDBCLOUD_USERNAME = ${public_key} +export TIDBCLOUD_PASSWORD = ${private_key} +``` + +Now, you can use the tidbcloud provider! + +### Get projectId with project Data Source + +Let us get all the projects by project data source first: +- Use `data` block to define the data source of tidbcloud, it consists of the data source type and the data source name. In this example, data source type is `tidbcloud_project` and the name is `example_project`. The prefix of the type maps to the name of the provider. +- Use `output` block to get the information, and expose information for other Terraform configurations to use. It is similar to return values in programming languages. See [official doc](https://www.terraform.io/language/values/outputs) for more detail + +Besides, you can find all the supported configs for the data source and resource [here](./docs) + +``` +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + version = "~> 0.0.1" + } + } + required_version = ">= 1.0.0" +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_project" "example_project" { + page = 1 + page_size = 10 +} + +output "projects" { + value = data.tidbcloud_project.example_project.items +} +``` -To add a new dependency `github.com/author/dependency` to your Terraform provider: +Then you can apply the configuration with the `terraform apply`, you need to type `yes` at the confirmation prompt to proceed. Use `terraform apply --auto-approve` to skip the type. ```shell -go get github.com/author/dependency -go mod tidy +$ terraform apply --auto-approve +data.tidbcloud_project.example_project: Reading... +data.tidbcloud_project.example_project: Read complete after 1s [id=just for test] + +Changes to Outputs: + + projects = [ + + { + + cluster_count = 0 + + create_timestamp = "1649154426" + + id = "1372813089191121286" + + name = "test1" + + org_id = "1372813089189921287" + + user_count = 1 + }, + + { + + cluster_count = 1 + + create_timestamp = "1640602740" + + id = "1372813089189561287" + + name = "default project" + + org_id = "1372813089189921287" + + user_count = 1 + }, + ] + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +projects = tolist([ + { + "cluster_count" = 0 + "create_timestamp" = "1649154426" + "id" = "1372813089191121286" + "name" = "test1" + "org_id" = "1372813089189921287" + "user_count" = 1 + }, + { + "cluster_count" = 1 + "create_timestamp" = "1640602740" + "id" = "1372813089189561287" + "name" = "default project" + "org_id" = "1372813089189921287" + "user_count" = 1 + }, +]) ``` -Then commit the changes to `go.mod` and `go.sum`. +Now, you get all the available projects, copy one of the id you need. Here we use the default project's ID. -## Using the provider +### Get cluster spec info with cluster-spec Data Source + +Before creating a TiDB cluster, you may need to get the available config values (providers, regions, etc.) by cluster-spec Data Source: + +``` +data "tidbcloud_cluster_spec" "example_cluster_spec" { +} + +output "cluster_spec" { + value = data.tidbcloud_cluster_spec.example_cluster_spec.items +} +``` + +Execute the `terraform apply --auto-approve`, we will get all the specifications. Here we show a part of the results: + +``` +{ + "cloud_provider" = "AWS" + "cluster_type" = "DEDICATED" + "region" = "eu-central-1" + "tidb" = tolist([ + { + "node_quantity_range" = { + "min" = 1 + "step" = 1 + } + "node_size" = "2C8G" + }, + { + "node_quantity_range" = { + "min" = 1 + "step" = 1 + } + "node_size" = "4C16G" + }, + { + "node_quantity_range" = { + "min" = 1 + "step" = 1 + } + "node_size" = "8C16G" + }, + { + "node_quantity_range" = { + "min" = 1 + "step" = 1 + } + "node_size" = "16C32G" + }, + ]) + "tiflash" = tolist([ + { + "node_quantity_range" = { + "min" = 0 + "step" = 1 + } + "node_size" = "8C64G" + "storage_size_gib_range" = { + "max" = 2048 + "min" = 500 + } + }, + { + "node_quantity_range" = { + "min" = 0 + "step" = 1 + } + "node_size" = "16C128G" + "storage_size_gib_range" = { + "max" = 2048 + "min" = 500 + } + }, + ]) + "tikv" = tolist([ + { + "node_quantity_range" = { + "min" = 3 + "step" = 3 + } + "node_size" = "2C8G" + "storage_size_gib_range" = { + "max" = 500 + "min" = 200 + } + }, + { + "node_quantity_range" = { + "min" = 3 + "step" = 3 + } + "node_size" = "4C16G" + "storage_size_gib_range" = { + "max" = 2048 + "min" = 200 + } + }, + { + "node_quantity_range" = { + "min" = 3 + "step" = 3 + } + "node_size" = "8C32G" + "storage_size_gib_range" = { + "max" = 4096 + "min" = 500 + } + }, + { + "node_quantity_range" = { + "min" = 3 + "step" = 3 + } + "node_size" = "8C64G" + "storage_size_gib_range" = { + "max" = 4096 + "min" = 500 + } + }, + { + "node_quantity_range" = { + "min" = 3 + "step" = 3 + } + "node_size" = "16C64G" + "storage_size_gib_range" = { + "max" = 4096 + "min" = 500 + } + }, + ]) + } +``` + +- `cloud_provider` is the cloud provider on which your TiDB cluster is hosted +- `region` is the region of cloud_provider +- `node_quantity_range` shows the min quantity of the node and the step if you want to scale the node +- `node_size` is the size of the node +- `storage_size_gib_range` shows the min and max storage you can set to the node + + +### Create a dedicated cluster with cluster resource + +> Make sure you have set a Project CIDR on TiDB Cloud console first. + +Now, you can create a dedicated cluster with the projectId and the spec info: +- Use `resource` block to define the resource of tidbcloud, it consists of the resource type and the resource name. In this example, resource type is `tidbcloud_cluster` and the name is `example_cluster` + +Once again, you can find all the supported configs for the data source and resource [here](./docs) + +Here I give an example for tidbcloud_cluster: + +``` +resource "tidbcloud_cluster" "example_cluster" { + project_id = "1372813089189561287" + name = "firstCluster" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "eu-central-1" + config = { + root_password = "Your_root_password1." + port = 4000 + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + } + } +} +``` + +Execute `terraform apply`, it is not recommended to use `terraform apply --auto-approve` when you apply a resource. + +```shell +$ terraform apply +data.tidbcloud_project.example_project: Reading... +data.tidbcloud_project.example_project: Read complete after 1s [id=just for test] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # tidbcloud_cluster.example_cluster will be created + + resource "tidbcloud_cluster" "example_cluster" { + + cloud_provider = "AWS" + + cluster_type = "DEDICATED" + + config = { + + components = { + + tidb = { + + node_quantity = 1 + + node_size = "8C16G" + } + + tikv = { + + node_quantity = 3 + + node_size = "8C32G" + + storage_size_gib = 500 + } + } + + ip_access_list = [ + + { + + cidr = "0.0.0.0/0" + + description = "all" + }, + ] + + port = 4000 + + root_password = "Your_root_password1." + } + + id = (known after apply) + + name = "firstCluster" + + project_id = "1372813089189561287" + + region = "eu-central-1" + + status = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: +``` + +Terraform will generate an execution plan for you: +- You can check the diff between the configuration and the state +- You can also see the results of this `apply`: it will add a new resource, and no resource will be changed or destroyed +- The `known after apply` shows that you will get the value after `apply` + +If everything is in your plan, type the `yes` to continue: + +``` +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_cluster.example_cluster: Creating... +tidbcloud_cluster.example_cluster: Creation complete after 1s [id=1379661944630234067] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +``` + +Use `terraform show` or `terraform state show tidbcloud_cluster.example_cluster` to inspect the state of your resource. The former will show all the states (all the resources and the data source). + +```shell +$ terraform state show tidbcloud_cluster.example_cluster + +# tidbcloud_cluster.example_cluster: +resource "tidbcloud_cluster" "example_cluster" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 1 + node_size = "8C16G" + } + tikv = { + node_quantity = 3 + node_size = "8C32G" + storage_size_gib = 500 + } + } + ip_access_list = [ + # (1 unchanged element hidden) + ] + port = 4000 + root_password = "Your_root_password1." + } + id = "1379661944630234067" + name = "firstCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "CREATING" +} +``` + +The status of the cluster is CREATING, we need to wait until it changes to `AVAILABLE`, it usually takes 10+ minutes. + +Once you want to check the status, execute `terraform refresh` to update the state, then use `terraform state show tidbcloud_cluster.example_cluster` to check the status. + +``` +$ terraform refresh + +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] + +$ terraform state show tidbcloud_cluster.example_cluste + +# tidbcloud_cluster.example_cluster: +resource "tidbcloud_cluster" "example_cluster" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 1 + node_size = "8C16G" + } + tikv = { + node_quantity = 3 + node_size = "8C32G" + storage_size_gib = 500 + } + } + ip_access_list = [ + # (1 unchanged element hidden) + ] + port = 4000 + root_password = "Your_root_password1." + } + id = "1379661944630234067" + name = "firstCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "AVAILABLE" +} +``` + +Congratulations! Your cluster is available now. + + +### Change the dedicated cluster + +We can also use terraform to manage the resource. As for cluster resource, we can: +- Increase TiFlash component for the dedicated cluster +- Scale the TiDB cluster +- Pause or resume the cluster + +**Increase TiFlash component** + +First, add tiflash config in components: + +``` + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 1 + } + } +``` + +Then, execute `terraform apply`: + +``` +$ terraform apply + +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # tidbcloud_cluster.example_cluster will be updated in-place + ~ resource "tidbcloud_cluster" "example_cluster" { + ~ config = { + ~ components = { + + tiflash = { + + node_quantity = 1 + + node_size = "8C64G" + + storage_size_gib = 500 + } + # (2 unchanged attributes hidden) + } + # (3 unchanged attributes hidden) + } + id = "1379661944630234067" + name = "firstCluster" + ~ status = "AVAILABLE" -> (known after apply) + # (4 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: + +``` + +Check the plan, you will find tiflash is added. And one resource will be changed after apply. Type `yes`: + +``` + Enter a value: yes + +tidbcloud_cluster.example_cluster: Modifying... [id=1379661944630234067] +tidbcloud_cluster.example_cluster: Modifications complete after 2s [id=1379661944630234067] + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. +``` + +Use `terraform state show tidbcloud_cluster.example_cluster` to see the status: + +``` +$ terraform state show tidbcloud_cluster.example_cluster + +# tidbcloud_cluster.example_cluster: +resource "tidbcloud_cluster" "example_cluster" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 1 + node_size = "8C16G" + } + tiflash = { + node_quantity = 1 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 3 + node_size = "8C32G" + storage_size_gib = 500 + } + } + ip_access_list = [ + # (1 unchanged element hidden) + ] + port = 4000 + root_password = "Your_root_password1." + } + id = "1379661944630234067" + name = "firstCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "MODIFYING" +} +``` + +The `MODIFYING` status shows the cluster is changing now. Wait for a moment, the status will be changed to `AVAILABLE`. + +**Scale the TiDB cluster** + +After the status is `AVAILABLE`, let us try to scale the TiDB cluster. + +Add one node for TiDB and TiFlash, TiKV needs to add at least 3 nodes for its step is 3. + +``` + components = { + tidb = { + node_size : "8C16G" + node_quantity : 2 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 6 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 2 + } + } +``` + +Execute `terraform apply` and type `yes` after check: + +``` +$ terraform apply + +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # tidbcloud_cluster.example_cluster will be updated in-place + ~ resource "tidbcloud_cluster" "example_cluster" { + ~ config = { + ~ components = { + ~ tidb = { + ~ node_quantity = 1 -> 2 + # (1 unchanged attribute hidden) + } + ~ tiflash = { + ~ node_quantity = 1 -> 2 + # (2 unchanged attributes hidden) + } + ~ tikv = { + ~ node_quantity = 3 -> 6 + # (2 unchanged attributes hidden) + } + } + # (3 unchanged attributes hidden) + } + id = "1379661944630234067" + name = "firstCluster" + ~ status = "AVAILABLE" -> (known after apply) + # (4 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_cluster.example_cluster: Modifying... [id=1379661944630234067] +tidbcloud_cluster.example_cluster: Modifications complete after 2s [id=1379661944630234067] + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. +``` + +Wait for the status from `MODIFYING` to `AVAILABLE`. + +**Pause or resume the cluster** + +The cluster can also be paused when the status is `AVAILABLE` or be resumed when the status is `PAUSED`. +- set `paused = true ` to pause the cluster +- set `paused = false ` to resume the cluster + +``` +config = { + paused = true + root_password = "Your_root_password1." + port = 4000 + ... + } +``` + +execute `terraform apply` and type `yes` after check: + +``` +$ terraform apply + +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # tidbcloud_cluster.example_cluster will be updated in-place + ~ resource "tidbcloud_cluster" "example_cluster" { + ~ config = { + + paused = true + # (4 unchanged attributes hidden) + } + id = "1379661944630234067" + name = "firstCluster" + ~ status = "AVAILABLE" -> (known after apply) + # (4 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. -Fill this in for each provider +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_cluster.example_cluster: Modifying... [id=1379661944630234067] +tidbcloud_cluster.example_cluster: Modifications complete after 2s [id=1379661944630234067] + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. +``` + +Use the `terraform state show tidbcloud_cluster.example_cluster` to check the status: + +``` +$ terraform state show tidbcloud_cluster.example_cluster + +# tidbcloud_cluster.example_cluster: +resource "tidbcloud_cluster" "example_cluster" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 2 + node_size = "8C16G" + } + tiflash = { + node_quantity = 2 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 6 + node_size = "8C32G" + storage_size_gib = 500 + } + } + ip_access_list = [ + # (1 unchanged element hidden) + ] + paused = true + port = 4000 + root_password = "Your_root_password1." + } + id = "1379661944630234067" + name = "firstCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "PAUSED" +} +``` + +Now, resume the cluster by set `paused = false`: + +``` +config = { + paused = false + root_password = "Your_root_password1." + port = 4000 + ... + } +``` + +After apply you will find the status turns to `RESUMING`: + +``` +# tidbcloud_cluster.example_cluster: +resource "tidbcloud_cluster" "example_cluster" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 2 + node_size = "8C16G" + } + tiflash = { + node_quantity = 2 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 6 + node_size = "8C32G" + storage_size_gib = 500 + } + } + ip_access_list = [ + # (1 unchanged element hidden) + ] + paused = false + port = 4000 + root_password = "Your_root_password1." + } + id = "1379661944630234067" + name = "firstCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "RESUMING" +} +``` + +Wait for a moment, the status will be changed to `AVAILABLE` again. + +### Create a backup with backup resource + +You have created and managed a dedicated cluster with terraform now. + +Next, you will create a backup for the cluster by the backup resource. + +First, copy the following config: + +``` +resource "tidbcloud_backup" "example_backup" { + project_id = "1372813089189561287" + cluster_id = "1379661944630234067" + name = "firstBackup" + description = "create by terraform" +} +``` + +You can also get project_id and cluster_id from the cluster resource like: + +``` +resource "tidbcloud_backup" "example_backup" { + project_id = tidbcloud_cluster.example_cluster.project_id + cluster_id = tidbcloud_cluster.example_cluster.id + name = "firstBackup" + description = "create by terraform" +} +``` + +Here we use the second config and execute `terraform apply`: + +``` +$ terraform apply + +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # tidbcloud_backup.example_backup will be created + + resource "tidbcloud_backup" "example_backup" { + + cluster_id = "1379661944630234067" + + create_timestamp = (known after apply) + + description = "create by terraform" + + id = (known after apply) + + name = "firstBackup" + + project_id = "1372813089189561287" + + size = (known after apply) + + status = (known after apply) + + type = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: +``` + +Type `yes` to create a backup: + +``` + Enter a value: yes + +tidbcloud_backup.example_backup: Creating... +tidbcloud_backup.example_backup: Creation complete after 2s [id=1350048] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +``` + +Use `terraform state show tidbcloud_backup.example_backup` to check the state of the backup: + +``` +$ terraform state show tidbcloud_backup.example_backup + +# tidbcloud_backup.example_backup: +resource "tidbcloud_backup" "example_backup" { + cluster_id = "1379661944630234067" + create_timestamp = "2022-08-26T07:56:10Z" + description = "create by terraform" + id = "1350048" + name = "firstBackup" + project_id = "1372813089189561287" + size = "0" + status = "PENDING" + type = "MANUAL" +} +``` + +Wait for some minutes and use `terraform refersh` to update the states: + +``` +$ terraform refresh +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] +tidbcloud_backup.example_backup: Refreshing state... [id=1350048] +$ terraform state show tidbcloud_backup.example_backup +# tidbcloud_backup.example_backup: +resource "tidbcloud_backup" "example_backup" { + cluster_id = "1379661944630234067" + create_timestamp = "2022-08-26T07:56:10Z" + description = "create by terraform" + id = "1350048" + name = "firstBackup" + project_id = "1372813089189561287" + size = "198775" + status = "SUCCESS" + type = "MANUAL" +} + +``` + +Congratulations! You have create a backup for your cluster. Pay attention that the backup can not be updated. + + +### Create a restore task with restore resource + +You have created a dedicated cluster and have a backup of the cluster. + +Now, it is time to create a restore task by restore resource. With it, you can restore a cluster according to a backup. + +Here is the config for restore resource. Note that you can only restore data from a smaller node size to a larger node size: + +``` + +resource "tidbcloud_restore" "example_restore" { + project_id = tidbcloud_cluster.example_cluster.project_id + backup_id = tidbcloud_backup.example_backup.id + name = "restoreCluster" + config = { + root_password = "Your_root_password1." + port = 4000 + components = { + tidb = { + node_size : "8C16G" + node_quantity : 2 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 6 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 2 + } + } + } +} +``` + +Execute `terraform apply` and type `yes`: + +``` +$ terraform apply +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] +tidbcloud_backup.example_backup: Refreshing state... [id=1350048] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # tidbcloud_restore.example_restore will be created + + resource "tidbcloud_restore" "example_restore" { + + backup_id = "1350048" + + cluster = { + + id = (known after apply) + + name = (known after apply) + + status = (known after apply) + } + + cluster_id = (known after apply) + + config = { + + components = { + + tidb = { + + node_quantity = 2 + + node_size = "8C16G" + } + + tiflash = { + + node_quantity = 2 + + node_size = "8C64G" + + storage_size_gib = 500 + } + + tikv = { + + node_quantity = 6 + + node_size = "8C32G" + + storage_size_gib = 500 + } + } + + port = 4000 + + root_password = "Your_root_password1." + } + + create_timestamp = (known after apply) + + error_message = (known after apply) + + id = (known after apply) + + name = "restoreCluster" + + project_id = "1372813089189561287" + + status = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_restore.example_restore: Creating... +tidbcloud_restore.example_restore: Creation complete after 1s [id=780114] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. +``` + +Check the state of the restore task with `terraform state show tidbcloud_restore.example_restore`: + +``` +$ terraform state show tidbcloud_restore.example_restore + +# tidbcloud_restore.example_restore: +resource "tidbcloud_restore" "example_restore" { + backup_id = "1350048" + cluster = { + id = "1379661944630264072" + name = "restoreCluster" + status = "INITIALIZING" + } + cluster_id = "1379661944630234067" + config = { + components = { + tidb = { + node_quantity = 2 + node_size = "8C16G" + } + tiflash = { + node_quantity = 2 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 6 + node_size = "8C32G" + storage_size_gib = 500 + } + } + port = 4000 + root_password = "Your_root_password1." + } + create_timestamp = "2022-08-26T08:16:33Z" + id = "780114" + name = "restoreCluster" + project_id = "1372813089189561287" + status = "PENDING" +} +``` + +You can see the restore task's status is `PENDING` and the cluster's status is `INITIALIZING`. + +After the cluster is `AVAILABLE`, the restore task will be `RUNNING` and turn to `SUCCESS` at last. + +It is everything ok? No, the bad news is the restored cluster is not managed by terraform. + +Don't worry, we can solve it in the next section. + +### Importing the restore cluster + +We can manage a cluster with terraform by import even if it is not created by terraform. + +Let us import the cluster which is created by the restore task in the last section. + +First add a cluster resource like: +``` +resource "tidbcloud_cluster" "restore_cluster1" {} +``` + +Then import the cluster by `terraform import tidbcloud_cluster.restore_cluster1 projectId,clusterId`, you can get the projectId and clusterId by restore resource: +``` +$ terraform import tidbcloud_cluster.restore_cluster1 1372813089189561287,1379661944630264072 + +tidbcloud_cluster.restore_cluster1: Importing from ID "1372813089189561287,1379661944630264072"... +tidbcloud_cluster.restore_cluster1: Import prepared! + Prepared tidbcloud_cluster for import +tidbcloud_cluster.restore_cluster1: Refreshing state... [id=1379661944630264072] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + +Use `terraform state show tidbcloud_cluster.restore_cluster1` to get the state of the cluster: +``` +$ terraform state show tidbcloud_cluster.restore_cluster1 + +# tidbcloud_cluster.restore_cluster1: +resource "tidbcloud_cluster" "restore_cluster1" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 2 + node_size = "8C16G" + } + tiflash = { + node_quantity = 2 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 6 + node_size = "8C32G" + storage_size_gib = 500 + } + } + port = 4000 + } + id = "1379661944630264072" + name = "restoreCluster" + project_id = "1372813089189561287" + region = "eu-central-1" + status = "AVAILABLE" +} +``` + +In order to manage it, you can copy it to your config file. Remember to delete the status and id, for they are computed by terraform and can not be set in the config: +``` +resource "tidbcloud_cluster" "restore_cluster1" { + cloud_provider = "AWS" + cluster_type = "DEDICATED" + config = { + components = { + tidb = { + node_quantity = 2 + node_size = "8C16G" + } + tiflash = { + node_quantity = 2 + node_size = "8C64G" + storage_size_gib = 500 + } + tikv = { + node_quantity = 6 + node_size = "8C32G" + storage_size_gib = 500 + } + } + port = 4000 + } + name = "restoreCluster" + project_id = "1372813089189561287" + region = "eu-central-1" +} +``` + +You can use `terraform fmt` to format your config file: +``` +$ terraform fmt + +main.tf +``` + +To ensure the consistency of the config and state, you can execute `terraform plan` or `terraform apply`. If you see `No changes`, the import is successful. + +``` +$ terraform apply + +tidbcloud_cluster.restore_cluster1: Refreshing state... [id=1379661944630264072] +tidbcloud_cluster.example_cluster: Refreshing state... [id=1379661944630234067] +tidbcloud_backup.example_backup: Refreshing state... [id=1350048] +tidbcloud_restore.example_restore: Refreshing state... [id=780114] + +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. +``` + +Now you can manage the cluster created by the restore task. + +### Destroy the dedicated cluster + +To destroy the resource, you can simply use `terraform destroy` and type `yes`. Don't worry about the order of deletion, terraform will generate a DAG based on the dependencies automatically. + +``` +$ terraform destroy + +Plan: 0 to add, 0 to change, 4 to destroy. + +Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + +tidbcloud_cluster.restore_cluster1: Destroying... [id=1379661944630264072] +tidbcloud_cluster.restore_cluster1: Destruction complete after 2s +tidbcloud_restore.example_restore: Destroying... [id=780114] +tidbcloud_restore.example_restore: Destruction complete after 0s +tidbcloud_backup.example_backup: Destroying... [id=1350048] +tidbcloud_backup.example_backup: Destruction complete after 2s +tidbcloud_cluster.example_cluster: Destroying... [id=1379661944630234067] +tidbcloud_cluster.example_cluster: Destruction complete after 0s +╷ +│ Warning: Unsupported +│ +│ restore can't be delete +╵ + +Destroy complete! Resources: 4 destroyed. +``` + +Note that a warning is appeared for restore can't be deleted. + +If you execute `terraform show`, you will find nothing for all the states is cleared: +``` +$ terraform show + +``` ## Developing the Provider +### Environment + If you wish to work on the provider, you'll first need [Go](http://www.golang.org) installed on your machine (see [Requirements](#requirements) above). -To compile the provider, run `go install`. This will build the provider and put the provider binary in the `$GOPATH/bin` directory. +### Building the provider from source -To generate or update documentation, run `go generate`. +1. Clone the repository +```shell +git clone git@github.com:tidbcloud/terraform-provider-tidbcloud +``` +2. Enter the repository directory +```shell +cd terraform-provider-tidbcloud +``` +3. Build the provider using the Go `install` command. This will build the provider and put the provider binary in the `$GOPATH/bin` directory. +```shell +go install +``` -In order to run the full suite of Acceptance tests, run `make testacc`. +### Generate or update documentation in docs file -*Note:* Acceptance tests create real resources, and often cost money to run. +run `go generate` -```shell -make testacc +### Running the acceptance test + +see [here](./internal/README.md) for more detail. + +### Debug the provider + +I will introduce how to debug with Terraform CLI development overrides. About other ways to debug the provider, see [terraform official doc](https://www.terraform.io/plugin/debugging) for more detail + +Development overrides is a method of using a specified local filesystem Terraform provider binary with Terraform CLI, such as one locally built with updated code, rather than a released binary. + +1. create `.terraformrc` in your operating system user directory +``` +provider_installation { + + dev_overrides { + "hashicorp.com/edu/tidbcloud" = "/usr/local/go/bin" + } + + # For all other providers, install them directly from their origin provider + # registries as normal. If you omit this, Terraform will _only_ use + # the dev_overrides block, and so no other providers will be available. + direct {} +} +``` + +2. run go install in the terraform-provider-tidbcloud, you will find the `terraform-provider-tidbcloud` will be installed under the `/usr/local/go/bin` ``` +go install +``` + +3. Terraform CLI commands, such as terraform apply, will now use the specified provider binary if you follow the below config: +``` +terraform { + required_providers { + tidbcloud = { + source = "hashicorp/tidbcloud" + } + } +} +``` + +## Follow us + +Twitter [@PingCAP](https://twitter.com/PingCAP) + + +## License + +terraform-provider-tidbcloud is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details. \ No newline at end of file diff --git a/docs/data-sources/backup.md b/docs/data-sources/backup.md new file mode 100644 index 0000000..3d2532a --- /dev/null +++ b/docs/data-sources/backup.md @@ -0,0 +1,73 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_backup Data Source - terraform-provider-tidbcloud" +subcategory: "" +description: |- + backup data source +--- + +# tidbcloud_backup (Data Source) + +backup data source + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_backup" "example" { + page = 1 + page_size = 10 + project_id = "fake_id" + cluster_id = "fake_id" +} + +output "output" { + value = data.tidbcloud_backup.example +} +``` + + +## Schema + +### Required + +- `cluster_id` (String) The ID of your cluster. +- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project). + +### Optional + +- `page` (Number) Default:1 The number of pages. +- `page_size` (Number) Default:10 The size of a pages. + +### Read-Only + +- `id` (String) ignore it, it is just for test. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) +- `total` (Number) The total number of backups in the project. + + +### Nested Schema for `items` + +Read-Only: + +- `create_timestamp` (String) The creation time of the backup in UTC. The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z. +- `description` (String) The description of the backup. It is specified by the user when taking a manual type backup. It helps you add additional information to the backup. +- `id` (String) The ID of the backup. It is generated by TiDB Cloud. +- `name` (String) The name of the backup. +- `size` (String) The bytes of the backup. +- `status` (String) Enum: PENDING/RUNNING/FAILED/SUCCESS.The status of backup. +- `type` (String) Enum: MANUAL/AUTO.The type of backup. TiDB Cloud only supports manual and auto backup. For more information, see [TiDB Cloud Documentation](https://docs.pingcap.com/tidbcloud/backup-and-restore#backup). + + diff --git a/docs/data-sources/cluster_spec.md b/docs/data-sources/cluster_spec.md new file mode 100644 index 0000000..bedbe91 --- /dev/null +++ b/docs/data-sources/cluster_spec.md @@ -0,0 +1,130 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_cluster_spec Data Source - terraform-provider-tidbcloud" +subcategory: "" +description: |- + cluster_spec data source +--- + +# tidbcloud_cluster_spec (Data Source) + +cluster_spec data source + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_cluster_spec" "example" { +} + +output "output" { + value = data.tidbcloud_cluster_spec.example +} +``` + + +## Schema + +### Read-Only + +- `id` (String) ignore it, it is just for test. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) +- `total` (Number) the total number of the spec. + + +### Nested Schema for `items` + +Read-Only: + +- `cloud_provider` (String) Enum: "AWS" "GCP", The cloud provider on which your TiDB cluster is hosted. +- `cluster_type` (String) Enum: "DEDICATED" "DEVELOPER", The cluster type. +- `region` (String) the region value should match the cloud provider's region code. +- `tidb` (Attributes List) The list of TiDB specifications in the region. (see [below for nested schema](#nestedatt--items--tidb)) +- `tiflash` (Attributes List) The list of TiFlash specifications in the region. (see [below for nested schema](#nestedatt--items--tiflash)) +- `tikv` (Attributes List) The list of TiKV specifications in the region. (see [below for nested schema](#nestedatt--items--tikv)) + + +### Nested Schema for `items.tidb` + +Read-Only: + +- `node_quantity_range` (Attributes) The range and step of node quantity of the TiDB component in the cluster. (see [below for nested schema](#nestedatt--items--tidb--node_quantity_range)) +- `node_size` (String) The size of the TiDB component in the cluster. + + +### Nested Schema for `items.tidb.node_quantity_range` + +Read-Only: + +- `min` (Number) The minimum node quantity of the component in the cluster. +- `step` (Number) The step of node quantity of the component in the cluster. + + + + +### Nested Schema for `items.tiflash` + +Read-Only: + +- `node_quantity_range` (Attributes) The range and step of node quantity of the TiFlash component in the cluster. (see [below for nested schema](#nestedatt--items--tiflash--node_quantity_range)) +- `node_size` (String) The size of the TiFlash component in the cluster. +- `storage_size_gib_range` (Attributes) The storage size range for each node of the TiFlash component in the cluster. (see [below for nested schema](#nestedatt--items--tiflash--storage_size_gib_range)) + + +### Nested Schema for `items.tiflash.node_quantity_range` + +Read-Only: + +- `min` (Number) The minimum node quantity of the component in the cluster. +- `step` (Number) The step of node quantity of the component in the cluster. + + + +### Nested Schema for `items.tiflash.storage_size_gib_range` + +Read-Only: + +- `max` (Number) The maximum storage size for each node of the component in the cluster. +- `min` (Number) The minimum storage size for each node of the component in the cluster. + + + + +### Nested Schema for `items.tikv` + +Read-Only: + +- `node_quantity_range` (Attributes) The range and step of node quantity of the TiKV component in the cluster. (see [below for nested schema](#nestedatt--items--tikv--node_quantity_range)) +- `node_size` (String) The size of the TiKV component in the cluster. +- `storage_size_gib_range` (Attributes) The storage size range for each node of the TiKV component in the cluster. (see [below for nested schema](#nestedatt--items--tikv--storage_size_gib_range)) + + +### Nested Schema for `items.tikv.node_quantity_range` + +Read-Only: + +- `min` (Number) The minimum node quantity of the component in the cluster. +- `step` (Number) The step of node quantity of the component in the cluster. + + + +### Nested Schema for `items.tikv.storage_size_gib_range` + +Read-Only: + +- `max` (Number) The maximum storage size for each node of the component in the cluster. +- `min` (Number) The minimum storage size for each node of the component in the cluster. + + diff --git a/docs/data-sources/project.md b/docs/data-sources/project.md new file mode 100644 index 0000000..0289701 --- /dev/null +++ b/docs/data-sources/project.md @@ -0,0 +1,65 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_project Data Source - terraform-provider-tidbcloud" +subcategory: "" +description: |- + project data source +--- + +# tidbcloud_project (Data Source) + +project data source + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_project" "example" { + page = 1 + page_size = 10 +} + +output "output" { + value = data.tidbcloud_project.example +} +``` + + +## Schema + +### Optional + +- `page` (Number) Default:1 The number of pages. +- `page_size` (Number) Default:10 The size of a pages. + +### Read-Only + +- `id` (String) ignore it, it is just for test. +- `items` (Attributes List) The items of accessible projects. (see [below for nested schema](#nestedatt--items)) +- `total` (Number) The total number of accessible projects. + + +### Nested Schema for `items` + +Read-Only: + +- `cluster_count` (Number) The number of TiDB Cloud clusters deployed in the project. +- `create_timestamp` (String) The creation time of the cluster in Unix timestamp seconds (epoch time). +- `id` (String) The ID of the project. +- `name` (String) The name of the project. +- `org_id` (String) The ID of the TiDB Cloud organization to which the project belongs. +- `user_count` (Number) The number of users in the project. + + diff --git a/docs/data-sources/restore.md b/docs/data-sources/restore.md new file mode 100644 index 0000000..a296f95 --- /dev/null +++ b/docs/data-sources/restore.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_restore Data Source - terraform-provider-tidbcloud" +subcategory: "" +description: |- + restore data source +--- + +# tidbcloud_restore (Data Source) + +restore data source + + + + +## Schema + +### Required + +- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project). + +### Optional + +- `page` (Number) Default:1 The number of pages. +- `page_size` (Number) Default:10 The size of a pages. + +### Read-Only + +- `id` (String) ignore it, it is just for test. +- `items` (Attributes List) Default:10 The size of a pages. (see [below for nested schema](#nestedatt--items)) +- `total` (Number) The total number of restore tasks in the project. + + +### Nested Schema for `items` + +Read-Only: + +- `backup_id` (String) The ID of the backup. +- `cluster` (Attributes) The information of the restored cluster. The restored cluster is the new cluster your backup data is restored to. (see [below for nested schema](#nestedatt--items--cluster)) +- `cluster_id` (String) The cluster ID of the backup. +- `create_timestamp` (String) The creation time of the backup in UTC.The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z. +- `error_message` (String) The error message of restore if failed. +- `id` (String) The ID of the restore task. +- `status` (String) Enum: "PENDING" "RUNNING" "FAILED" "SUCCESS", The status of the restore task. + + +### Nested Schema for `items.cluster` + +Read-Only: + +- `id` (String) The ID of the restored cluster. The restored cluster is the new cluster your backup data is restored to. +- `name` (String) The name of the restored cluster. The restored cluster is the new cluster your backup data is restored to. +- `status` (String) The status of the restored cluster. Possible values are "AVAILABLE", "CREATING", "MODIFYING", "PAUSED", "RESUMING", and "CLEARED". + + diff --git a/docs/data-sources/scaffolding_example.md b/docs/data-sources/scaffolding_example.md deleted file mode 100644 index 9f8f373..0000000 --- a/docs/data-sources/scaffolding_example.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -# generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "scaffolding_example Data Source - terraform-provider-scaffolding-framework" -subcategory: "" -description: |- - Example data source ---- - -# scaffolding_example (Data Source) - -Example data source - -## Example Usage - -```terraform -data "scaffolding_example" "example" { - configurable_attribute = "some-value" -} -``` - - -## Schema - -### Optional - -- `configurable_attribute` (String) Example configurable attribute - -### Read-Only - -- `id` (String) Example identifier - - diff --git a/docs/index.md b/docs/index.md index 21e1ab0..5486944 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,20 +1,21 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "scaffolding-framework Provider" +page_title: "tidbcloud Provider" subcategory: "" description: |- --- -# scaffolding-framework Provider +# tidbcloud Provider ## Example Usage ```terraform -provider "scaffolding" { - # example configuration here +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" } ``` @@ -23,4 +24,5 @@ provider "scaffolding" { ### Optional -- `example` (String) Example provider attribute +- `password` (String, Sensitive) Private Key +- `username` (String, Sensitive) Public Key diff --git a/docs/resources/backup.md b/docs/resources/backup.md new file mode 100644 index 0000000..e5b0a54 --- /dev/null +++ b/docs/resources/backup.md @@ -0,0 +1,58 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_backup Resource - terraform-provider-tidbcloud" +subcategory: "" +description: |- + backup resource +--- + +# tidbcloud_backup (Resource) + +backup resource + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_backup" "example" { + project_id = "fake_id" + cluster_id = "fake_id" + name = "example" + description = "create by terraform" +} +``` + + +## Schema + +### Required + +- `cluster_id` (String) The ID of your cluster that you want to take a manual backup. +- `name` (String) Specify the name for a manual backup. It is recommended that you use a unique name, so that it is easy to distinguish the backup when you query the backups. +- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project). + +### Optional + +- `description` (String) The description of the backup. It helps you add additional information to the backup. Allows up to 256 characters. + +### Read-Only + +- `create_timestamp` (String) The creation time of the backup in UTC. The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z. +- `id` (String) The ID of the backup. It is generated by TiDB Cloud. +- `size` (String) The bytes of the backup. +- `status` (String) Enum: PENDING/RUNNING/FAILED/SUCCESS.The status of backup. +- `type` (String) Enum: MANUAL/AUTO.The type of backup. TiDB Cloud only supports manual and auto backup. For more information, see [TiDB Cloud Documentation](https://docs.pingcap.com/tidbcloud/backup-and-restore#backup). + + diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md new file mode 100644 index 0000000..1697d77 --- /dev/null +++ b/docs/resources/cluster.md @@ -0,0 +1,166 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_cluster Resource - terraform-provider-tidbcloud" +subcategory: "" +description: |- + cluster resource +--- + +# tidbcloud_cluster (Resource) + +cluster resource + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_cluster" "example" { + project_id = "fake_id" + name = "example1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Fake_root_password1" + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 5000, + node_quantity : 2 + } + } + } +} + +resource "tidbcloud_cluster" "example2" { + project_id = "" // your projectId + name = "example2" + cluster_type = "DEVELOPER" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Fake_root_password1" + ip_access_list = [{ + cidr = "0.0.0.0/0" + description = "all" + } + ] + } +} +``` + + +## Schema + +### Required + +- `cloud_provider` (String) Enum: "AWS" "GCP", The cloud provider on which your TiDB cluster is hosted. +- `cluster_type` (String) Enum: "DEDICATED" "DEVELOPER", The cluster type. +- `config` (Attributes) The configuration of the cluster. (see [below for nested schema](#nestedatt--config)) +- `name` (String) The name of the cluster. +- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_project datasource](../data-sources/cluster_spec.md). +- `region` (String) the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + +### Read-Only + +- `id` (String) The ID of the cluster. +- `status` (String) the status of the cluster. + + +### Nested Schema for `config` + +Optional: + +- `components` (Attributes) The components of the cluster. + - For a Developer Tier cluster, the components value can not be set. - For a Dedicated Tier cluster, the components value must be set. (see [below for nested schema](#nestedatt--config--components)) +- `ip_access_list` (Attributes List) A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection). (see [below for nested schema](#nestedatt--config--ip_access_list)) +- `paused` (Boolean) lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster. + - The cluster can be paused only when the cluster_status is "AVAILABLE". - The cluster can be resumed only when the cluster_status is "PAUSED". +- `port` (Number) The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default. + - For a Developer Tier cluster, only port 4000 is available. +- `root_password` (String) The root password to access the cluster. It must be 8-64 characters. + + +### Nested Schema for `config.components` + +Required: + +- `tidb` (Attributes) The TiDB component of the cluster (see [below for nested schema](#nestedatt--config--components--tidb)) +- `tikv` (Attributes) The TiKV component of the cluster (see [below for nested schema](#nestedatt--config--components--tikv)) + +Optional: + +- `tiflash` (Attributes) The TiFlash component of the cluster. (see [below for nested schema](#nestedatt--config--components--tiflash)) + + +### Nested Schema for `config.components.tidb` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). +- `node_size` (String) The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same. + - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash. + - Can not modify node_size of an existing cluster. + + + +### Nested Schema for `config.components.tikv` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - TiKV do not support decreasing node quantity. + - The node_quantity of TiKV must be a multiple of 3. +- `node_size` (String) The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same. + - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash. + - Can not modify node_size of an existing cluster. +- `storage_size_gib` (Number) The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - Can not modify storage_size_gib of an existing cluster. + + + +### Nested Schema for `config.components.tiflash` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - TiFlash do not support decreasing node quantity. +- `node_size` (String) The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - Can not modify node_size of an existing cluster. +- `storage_size_gib` (Number) The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md). + - Can not modify storage_size_gib of an existing cluster. + + + + +### Nested Schema for `config.ip_access_list` + +Required: + +- `cidr` (String) The IP address or CIDR range that you want to add to the cluster's IP access list. +- `description` (String) Description that explains the purpose of the entry. + + diff --git a/docs/resources/restore.md b/docs/resources/restore.md new file mode 100644 index 0000000..ae269f5 --- /dev/null +++ b/docs/resources/restore.md @@ -0,0 +1,163 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "tidbcloud_restore Resource - terraform-provider-tidbcloud" +subcategory: "" +description: |- + restore resource +--- + +# tidbcloud_restore (Resource) + +restore resource + +## Example Usage + +```terraform +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_restore" "example" { + project_id = "fake_id" + backup_id = "fake_id" + name = "example" + config = { + root_password = "Fake_root_password1" + port = 4000 + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 1 + } + } + } +} +``` + + +## Schema + +### Required + +- `backup_id` (String) The ID of the backup +- `config` (Attributes) The configuration of the cluster (see [below for nested schema](#nestedatt--config)) +- `name` (String) The name of the restore +- `project_id` (String) The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project). + +### Read-Only + +- `cluster` (Attributes) The information of the restored cluster. The restored cluster is the new cluster your backup data is restored to. (see [below for nested schema](#nestedatt--cluster)) +- `cluster_id` (String) The ID of the cluster +- `create_timestamp` (String) The creation time of the backup in UTC.The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z. +- `error_message` (String) The error message of restore if failed. +- `id` (String) The ID of the restore +- `status` (String) Enum: "PENDING" "RUNNING" "FAILED" "SUCCESS" +The status of the restore task. + + +### Nested Schema for `config` + +Required: + +- `components` (Attributes) The components of the cluster. + - For a Developer Tier cluster, the components value can not be set. - For a Dedicated Tier cluster, the components value must be set. (see [below for nested schema](#nestedatt--config--components)) +- `root_password` (String) The root password to access the cluster. It must be 8-64 characters. + +Optional: + +- `ip_access_list` (Attributes List) A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection). (see [below for nested schema](#nestedatt--config--ip_access_list)) +- `port` (Number) The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default. + - For a Developer Tier cluster, only port 4000 is available. + + +### Nested Schema for `config.components` + +Required: + +- `tidb` (Attributes) (see [below for nested schema](#nestedatt--config--components--tidb)) +- `tikv` (Attributes) (see [below for nested schema](#nestedatt--config--components--tikv)) + +Optional: + +- `tiflash` (Attributes) (see [below for nested schema](#nestedatt--config--components--tiflash)) + + +### Nested Schema for `config.components.tidb` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). +- `node_size` (String) The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). + - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same. + - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash. + - Can not modify node_size of an existing cluster. + + + +### Nested Schema for `config.components.tikv` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). +- `node_size` (String) The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). + - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same. + - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash. + - Can not modify node_size of an existing cluster. +- `storage_size_gib` (Number) The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). + - Can not modify storage_size_gib of an existing cluster. + + + +### Nested Schema for `config.components.tiflash` + +Required: + +- `node_quantity` (Number) The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). +- `node_size` (String) The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). + - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same. + - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash. + - Can not modify node_size of an existing cluster. +- `storage_size_gib` (Number) The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](./cluster_spec.md). + - Can not modify storage_size_gib of an existing cluster. + + + + +### Nested Schema for `config.ip_access_list` + +Required: + +- `cidr` (String) The IP address or CIDR range that you want to add to the cluster's IP access list. +- `description` (String) Description that explains the purpose of the entry. + + + + +### Nested Schema for `cluster` + +Read-Only: + +- `id` (String) The ID of the restored cluster. The restored cluster is the new cluster your backup data is restored to. +- `name` (String) The name of the restored cluster. The restored cluster is the new cluster your backup data is restored to. +- `status` (String) The status of the restored cluster. Possible values are "AVAILABLE", "CREATING", "MODIFYING", "PAUSED", "RESUMING","UNAVAILABLE", "IMPORTING" and "CLEARED". + + diff --git a/docs/resources/scaffolding_example.md b/docs/resources/scaffolding_example.md deleted file mode 100644 index 62094d7..0000000 --- a/docs/resources/scaffolding_example.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -# generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "scaffolding_example Resource - terraform-provider-scaffolding-framework" -subcategory: "" -description: |- - Example resource ---- - -# scaffolding_example (Resource) - -Example resource - -## Example Usage - -```terraform -resource "scaffolding_example" "example" { - configurable_attribute = "some-value" -} -``` - - -## Schema - -### Optional - -- `configurable_attribute` (String) Example configurable attribute - -### Read-Only - -- `id` (String) Example identifier - - diff --git a/examples/data-sources/scaffolding_example/data-source.tf b/examples/data-sources/scaffolding_example/data-source.tf deleted file mode 100644 index a852489..0000000 --- a/examples/data-sources/scaffolding_example/data-source.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "scaffolding_example" "example" { - configurable_attribute = "some-value" -} diff --git a/examples/data-sources/tidbcloud_backup/data-source.tf b/examples/data-sources/tidbcloud_backup/data-source.tf new file mode 100644 index 0000000..675e809 --- /dev/null +++ b/examples/data-sources/tidbcloud_backup/data-source.tf @@ -0,0 +1,23 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_backup" "example" { + page = 1 + page_size = 10 + project_id = "fake_id" + cluster_id = "fake_id" +} + +output "output" { + value = data.tidbcloud_backup.example +} \ No newline at end of file diff --git a/examples/data-sources/tidbcloud_cluster_spec/data-source.tf b/examples/data-sources/tidbcloud_cluster_spec/data-source.tf new file mode 100644 index 0000000..37c1091 --- /dev/null +++ b/examples/data-sources/tidbcloud_cluster_spec/data-source.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_cluster_spec" "example" { +} + +output "output" { + value = data.tidbcloud_cluster_spec.example +} \ No newline at end of file diff --git a/examples/data-sources/tidbcloud_project/data-source.tf b/examples/data-sources/tidbcloud_project/data-source.tf new file mode 100644 index 0000000..133d247 --- /dev/null +++ b/examples/data-sources/tidbcloud_project/data-source.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_project" "example" { + page = 1 + page_size = 10 +} + +output "output" { + value = data.tidbcloud_project.example +} \ No newline at end of file diff --git a/examples/data-sources/tidbcloud_restore/data_source.tf b/examples/data-sources/tidbcloud_restore/data_source.tf new file mode 100644 index 0000000..f0bbe47 --- /dev/null +++ b/examples/data-sources/tidbcloud_restore/data_source.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +data "tidbcloud_restore" "example" { + project_id = "fake_id" +} + +output "output" { + value = data.tidbcloud_restore.example +} \ No newline at end of file diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf index 942db45..761a9ee 100644 --- a/examples/provider/provider.tf +++ b/examples/provider/provider.tf @@ -1,3 +1,4 @@ -provider "scaffolding" { - # example configuration here -} +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} \ No newline at end of file diff --git a/examples/resources/scaffolding_example/resource.tf b/examples/resources/scaffolding_example/resource.tf deleted file mode 100644 index 9ae3f57..0000000 --- a/examples/resources/scaffolding_example/resource.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "scaffolding_example" "example" { - configurable_attribute = "some-value" -} diff --git a/examples/resources/tidbcloud_backup/resource.tf b/examples/resources/tidbcloud_backup/resource.tf new file mode 100644 index 0000000..6ea7f3f --- /dev/null +++ b/examples/resources/tidbcloud_backup/resource.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_backup" "example" { + project_id = "fake_id" + cluster_id = "fake_id" + name = "example" + description = "create by terraform" +} \ No newline at end of file diff --git a/examples/resources/tidbcloud_cluster/resource.tf b/examples/resources/tidbcloud_cluster/resource.tf new file mode 100644 index 0000000..d7c59e0 --- /dev/null +++ b/examples/resources/tidbcloud_cluster/resource.tf @@ -0,0 +1,55 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_cluster" "example" { + project_id = "fake_id" + name = "example1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Fake_root_password1" + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 5000, + node_quantity : 2 + } + } + } +} + +resource "tidbcloud_cluster" "example2" { + project_id = "" // your projectId + name = "example2" + cluster_type = "DEVELOPER" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Fake_root_password1" + ip_access_list = [{ + cidr = "0.0.0.0/0" + description = "all" + } + ] + } +} \ No newline at end of file diff --git a/examples/resources/tidbcloud_restore/resource.tf b/examples/resources/tidbcloud_restore/resource.tf new file mode 100644 index 0000000..4cf3815 --- /dev/null +++ b/examples/resources/tidbcloud_restore/resource.tf @@ -0,0 +1,38 @@ +terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } +} + +provider "tidbcloud" { + username = "fake_username" + password = "fake_password" +} + +resource "tidbcloud_restore" "example" { + project_id = "fake_id" + backup_id = "fake_id" + name = "example" + config = { + root_password = "Fake_root_password1" + port = 4000 + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 1 + } + } + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index 5134556..003a2dd 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,15 @@ -module github.com/hashicorp/terraform-provider-scaffolding-framework +module github.com/tidbcloud/terraform-provider-tidbcloud -go 1.17 +go 1.18 require ( + github.com/go-resty/resty/v2 v2.7.0 github.com/hashicorp/terraform-plugin-docs v0.13.0 - github.com/hashicorp/terraform-plugin-framework v0.11.0 - github.com/hashicorp/terraform-plugin-go v0.13.0 + github.com/hashicorp/terraform-plugin-framework v0.11.1 + github.com/hashicorp/terraform-plugin-go v0.14.0 github.com/hashicorp/terraform-plugin-log v0.7.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 + github.com/icholy/digest v0.1.15 ) require ( @@ -66,5 +68,5 @@ require ( google.golang.org/appengine v1.6.6 // indirect google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect google.golang.org/grpc v1.48.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/go.sum b/go.sum index 7b482c9..18d6a75 100644 --- a/go.sum +++ b/go.sum @@ -16,16 +16,11 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C6 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -62,7 +57,6 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -73,8 +67,9 @@ github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -101,7 +96,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -120,7 +114,6 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= @@ -147,26 +140,25 @@ github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY= github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ= -github.com/hashicorp/terraform-plugin-framework v0.11.0 h1:NLWMUMNp0HjCjJctNzEv3FJsR/ndYTSbO9XfwlSVFeQ= -github.com/hashicorp/terraform-plugin-framework v0.11.0/go.mod h1:gVjHP7o0QzWpHe5dvA+GUu+mH0/vQDZeexkkq2RS/II= -github.com/hashicorp/terraform-plugin-go v0.12.0/go.mod h1:kwhmaWHNDvT1B3QiSJdAtrB/D4RaKSY/v3r2BuoWK4M= -github.com/hashicorp/terraform-plugin-go v0.13.0 h1:Zm+o91HUOcTLotaEu3X2jV/6wNi6f09gkZwGg/MDvCk= -github.com/hashicorp/terraform-plugin-go v0.13.0/go.mod h1:NYGFEM9GeRdSl52txue3RcBDFt2tufaqS22iURP8Bxs= -github.com/hashicorp/terraform-plugin-log v0.6.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-framework v0.11.1 h1:rq8f+TLDO4tJu+n9mMYlDrcRoIdrg0gTUvV2Jr0Ya24= +github.com/hashicorp/terraform-plugin-framework v0.11.1/go.mod h1:GENReHOz6GEt8Jk3UN94vk8BdC6irEHFgN3Z9HPhPUU= +github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= +github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0 h1:+KxZULPsbjpAVoP0WNj/8aVW6EqpcX5JcUcQ5wl7Da4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.20.0/go.mod h1:DwGJG3KNxIPluVk6hexvDfYR/MS/eKGpiztJoT3Bbbw= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 h1:eIJjFlI4k6BMso6Wq/bq56U0RukXc4JbwJJ8Oze2/tg= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0/go.mod h1:mYPs/uchNcBq7AclQv9QUtSf9iNcfp1Ag21jqTlDf2M= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/icholy/digest v0.1.15 h1:3vCTbaXcUjF84YlICrP/4FvfVX2TKDKgMheLwNZA+GM= +github.com/icholy/digest v0.1.15/go.mod h1:uLAeDdWKIWNFMH0wqbwchbTQOmJWhzSnL7zmqSPqEEc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= @@ -175,30 +167,24 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -209,10 +195,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -222,10 +206,10 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -235,14 +219,11 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -250,7 +231,7 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -270,32 +251,26 @@ github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37w github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -304,15 +279,14 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -322,15 +296,12 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -361,10 +332,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -372,24 +340,20 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -403,15 +367,14 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -422,5 +385,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/README.md b/internal/README.md new file mode 100644 index 0000000..c639a0a --- /dev/null +++ b/internal/README.md @@ -0,0 +1,57 @@ +# Running the acceptance test + +*Note:* Acceptance tests may create real resources, and often cost money to run. + +## Requirements +- Go: The most recent stable version. +- Terraform CLI: Version 0.12.26 or later. + +## Auth +You need to set API Key with environment before all the tests +``` +export TIDBCLOUD_USERNAME=${public_key} +export TIDBCLOUD_PASSWORD=${private_key} +``` + +## Test With Project +The tests need project are put into the /internal/provide/testwithproject path. + + +> some tests like cluster_resource_test may cause cost, make sure you have enough balance +> +Here are the steps to test them: + +1. create a new project for test in tidb cloud (You can also use the default project, but it is not recommended) + +2. set projectId with environment +``` +export TIDBCLOUD_PROJECTID=${your_project_id} +``` +3. test +``` +TF_ACC=1 go test -v ./internal/provider/testwithproject +``` + +## Test With Cluster +The tests need pre-created TiDB cluster are put into the /internal/provider/testwithcluster path + +Here are the steps to test them: + +1. Create a dedicated cluster and wait for it is ready. You can create it with tidb cloud or terraform + +2. set projectId and clusterId with environment +``` +export TIDBCLOUD_PROJECTID=${your_project_id} +export TIDBCLOUD_CLUSTERID=${your_cluster_id} +``` + +3. test +``` +TF_ACC=1 go test -v ./internal/provider/testwithcluster +``` + + +## Test Manually +The tests can't be tested directly are put into the /internal/provider/testmanually path + +You need to test them manually following the code annotation in every test. \ No newline at end of file diff --git a/internal/provider/backup_data_source.go b/internal/provider/backup_data_source.go new file mode 100644 index 0000000..9303d29 --- /dev/null +++ b/internal/provider/backup_data_source.go @@ -0,0 +1,173 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type backupDataSourceData struct { + Id types.String `tfsdk:"id"` + ProjectId string `tfsdk:"project_id"` + ClusterId string `tfsdk:"cluster_id"` + Page types.Int64 `tfsdk:"page"` + PageSize types.Int64 `tfsdk:"page_size"` + Items []backup `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` +} + +type backup struct { + Id string `tfsdk:"id"` + Name string `tfsdk:"name"` + Description string `tfsdk:"description"` + Type string `tfsdk:"type"` + Size string `tfsdk:"size"` + CreateTimestamp string `tfsdk:"create_timestamp"` + Status string `tfsdk:"status"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.DataSourceType = backupDataSourceType{} +var _ datasource.DataSource = backupDataSource{} + +type backupDataSourceType struct{} + +func (t backupDataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "backup data source", + Attributes: map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "ignore it, it is just for test.", + Computed: true, + Type: types.StringType, + }, + "project_id": { + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project).", + Required: true, + Type: types.StringType, + }, + "cluster_id": { + MarkdownDescription: "The ID of your cluster.", + Required: true, + Type: types.StringType, + }, + "page": { + MarkdownDescription: "Default:1 The number of pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "page_size": { + MarkdownDescription: "Default:10 The size of a pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "items": { + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "The ID of the backup. It is generated by TiDB Cloud.", + Computed: true, + Type: types.StringType, + }, + "description": { + MarkdownDescription: "The description of the backup. It is specified by the user when taking a manual type backup. It helps you add additional information to the backup.", + Computed: true, + Type: types.StringType, + }, + "name": { + MarkdownDescription: "The name of the backup.", + Computed: true, + Type: types.StringType, + }, + "type": { + MarkdownDescription: "Enum: MANUAL/AUTO.The type of backup. TiDB Cloud only supports manual and auto backup. For more information, see [TiDB Cloud Documentation](https://docs.pingcap.com/tidbcloud/backup-and-restore#backup).", + Computed: true, + Type: types.StringType, + }, + "size": { + MarkdownDescription: "The bytes of the backup.", + Computed: true, + Type: types.StringType, + }, + "create_timestamp": { + MarkdownDescription: "The creation time of the backup in UTC. The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z.", + Computed: true, + Type: types.StringType, + }, + "status": { + MarkdownDescription: "Enum: PENDING/RUNNING/FAILED/SUCCESS.The status of backup.", + Computed: true, + Type: types.StringType, + }, + }), + }, + "total": { + MarkdownDescription: "The total number of backups in the project.", + Computed: true, + Type: types.Int64Type, + }, + }, + }, nil +} + +func (t backupDataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return backupDataSource{ + provider: provider, + }, diags +} + +type backupDataSource struct { + provider tidbcloudProvider +} + +func (d backupDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data backupDataSourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // set default value + if data.Page.IsNull() || data.Page.IsUnknown() { + data.Page = types.Int64{Value: 1} + } + if data.PageSize.IsNull() || data.PageSize.IsUnknown() { + data.PageSize = types.Int64{Value: 10} + } + + tflog.Trace(ctx, "read backup data source") + backups, err := d.provider.client.GetBackups(data.ProjectId, data.ClusterId, data.Page.Value, data.PageSize.Value) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetBackups, got error: %s", err)) + return + } + + data.Id = types.String{Value: "just for test"} + data.Total = types.Int64{Value: backups.Total} + var items []backup + for _, key := range backups.Items { + items = append(items, backup{ + Id: key.Id, + Description: key.Description, + Name: key.Name, + Type: key.Type, + Size: key.Size, + CreateTimestamp: key.CreateTimestamp, + Status: key.Status, + }) + } + data.Items = items + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/backup_resource.go b/internal/provider/backup_resource.go new file mode 100644 index 0000000..859420c --- /dev/null +++ b/internal/provider/backup_resource.go @@ -0,0 +1,225 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + "strings" +) + +type backupResourceData struct { + ClusterId string `tfsdk:"cluster_id"` + ProjectId string `tfsdk:"project_id"` + Name string `tfsdk:"name"` + Description types.String `tfsdk:"description"` + BackupId types.String `tfsdk:"id"` + Type types.String `tfsdk:"type"` + Size types.String `tfsdk:"size"` + Status types.String `tfsdk:"status"` + CreateTimestamp types.String `tfsdk:"create_timestamp"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.ResourceType = backupResourceType{} +var _ resource.Resource = backupResource{} +var _ resource.ResourceWithImportState = backupResource{} + +type backupResourceType struct{} + +func (t backupResourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "backup resource", + Attributes: map[string]tfsdk.Attribute{ + "project_id": { + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project).", + Required: true, + Type: types.StringType, + }, + "cluster_id": { + Required: true, + MarkdownDescription: "The ID of your cluster that you want to take a manual backup.", + Type: types.StringType, + }, + "name": { + MarkdownDescription: "Specify the name for a manual backup. It is recommended that you use a unique name, so that it is easy to distinguish the backup when you query the backups.", + Required: true, + Type: types.StringType, + }, + "description": { + MarkdownDescription: "The description of the backup. It helps you add additional information to the backup. Allows up to 256 characters.", + Optional: true, + Computed: true, + Type: types.StringType, + }, + "id": { + Computed: true, + MarkdownDescription: "The ID of the backup. It is generated by TiDB Cloud.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "type": { + Computed: true, + MarkdownDescription: "Enum: MANUAL/AUTO.The type of backup. TiDB Cloud only supports manual and auto backup. For more information, see [TiDB Cloud Documentation](https://docs.pingcap.com/tidbcloud/backup-and-restore#backup).", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Computed: true, + MarkdownDescription: "The bytes of the backup.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "status": { + Computed: true, + MarkdownDescription: "Enum: PENDING/RUNNING/FAILED/SUCCESS.The status of backup.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "create_timestamp": { + Computed: true, + MarkdownDescription: "The creation time of the backup in UTC. The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }, + }, nil +} + +func (t backupResourceType) NewResource(ctx context.Context, in provider.Provider) (resource.Resource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return backupResource{ + provider: provider, + }, diags +} + +type backupResource struct { + provider tidbcloudProvider +} + +func (r backupResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data backupResourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + createBackupReq := tidbcloud.CreateBackupReq{ + Name: data.Name, + } + if !data.Description.IsUnknown() && !data.Description.IsNull() { + createBackupReq.Description = data.Description.Value + } + + tflog.Trace(ctx, "created backup resource") + createBackupResp, err := r.provider.client.CreateBackup(data.ProjectId, data.ClusterId, createBackupReq) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call create backup, got error: %s", err)) + return + } + + tflog.Trace(ctx, "get backup resource") + getBackupResp, err := r.provider.client.GetBackupById(data.ProjectId, data.ClusterId, createBackupResp.BackupId) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetBackupById, got error: %s", err)) + return + } + + // save into the Terraform state. + refreshBackupResourceData(getBackupResp, &data) + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func (r backupResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var projectId, clusterId, backupId string + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("cluster_id"), &clusterId)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &backupId)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "get backup resource") + getBackupResp, err := r.provider.client.GetBackupById(projectId, clusterId, backupId) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetBackupById, got error: %s", err)) + return + } + + var data backupResourceData + data.ClusterId = clusterId + data.ProjectId = projectId + data.BackupId = types.String{Value: backupId} + refreshBackupResourceData(getBackupResp, &data) + + diags := resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func refreshBackupResourceData(resp *tidbcloud.GetBackupResp, data *backupResourceData) { + data.Name = resp.Name + data.BackupId = types.String{Value: resp.Id} + data.Type = types.String{Value: resp.Type} + data.Size = types.String{Value: resp.Size} + data.Status = types.String{Value: resp.Status} + data.CreateTimestamp = types.String{Value: resp.CreateTimestamp} + data.Description = types.String{Value: resp.Description} +} + +func (r backupResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + resp.Diagnostics.AddError("Unsupported", fmt.Sprintf("backup can't be updated")) +} + +func (r backupResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data backupResourceData + + diags := req.State.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "delete backup resource") + err := r.provider.client.DeleteBackupById(data.ProjectId, data.ClusterId, data.BackupId.Value) + if err != nil { + resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteBackupById, got error: %s", err)) + return + } +} + +func (r backupResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: project_id,cluster_id,backup_id. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("cluster_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[2])...) +} diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go new file mode 100644 index 0000000..a79a7f7 --- /dev/null +++ b/internal/provider/cluster_resource.go @@ -0,0 +1,626 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + "strconv" +) + +const dev = "DEVELOPER" +const ded = "DEDICATED" + +type clusterResourceData struct { + ClusterId types.String `tfsdk:"id"` + ProjectId string `tfsdk:"project_id"` + Name string `tfsdk:"name"` + ClusterType string `tfsdk:"cluster_type"` + CloudProvider string `tfsdk:"cloud_provider"` + Region string `tfsdk:"region"` + Status types.String `tfsdk:"status"` + Config clusterConfig `tfsdk:"config"` +} + +type clusterConfig struct { + Paused *bool `tfsdk:"paused"` + RootPassword types.String `tfsdk:"root_password"` + Port types.Int64 `tfsdk:"port"` + Components *components `tfsdk:"components"` + IPAccessList []ipAccess `tfsdk:"ip_access_list"` +} + +type components struct { + TiDB *componentTiDB `tfsdk:"tidb"` + TiKV *componentTiKV `tfsdk:"tikv"` + TiFlash *componentTiFlash `tfsdk:"tiflash"` +} + +type componentTiDB struct { + NodeSize string `tfsdk:"node_size"` + NodeQuantity int `tfsdk:"node_quantity"` +} + +type componentTiKV struct { + NodeSize string `tfsdk:"node_size"` + StorageSizeGib int `tfsdk:"storage_size_gib"` + NodeQuantity int `tfsdk:"node_quantity"` +} + +type componentTiFlash struct { + NodeSize string `tfsdk:"node_size"` + StorageSizeGib int `tfsdk:"storage_size_gib"` + NodeQuantity int `tfsdk:"node_quantity"` +} + +type ipAccess struct { + CIDR string `tfsdk:"cidr"` + Description string `tfsdk:"description"` +} + +type clusterResourceType struct{} + +func (t clusterResourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "cluster resource", + Attributes: map[string]tfsdk.Attribute{ + "project_id": { + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_project datasource](../data-sources/cluster_spec.md).", + Required: true, + Type: types.StringType, + }, + "name": { + MarkdownDescription: "The name of the cluster.", + Required: true, + Type: types.StringType, + }, + "id": { + Computed: true, + MarkdownDescription: "The ID of the cluster.", + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + Type: types.StringType, + }, + "cluster_type": { + MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", + Required: true, + Type: types.StringType, + }, + "cloud_provider": { + MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", + Required: true, + Type: types.StringType, + }, + "region": { + MarkdownDescription: "the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).", + Required: true, + Type: types.StringType, + }, + "status": { + MarkdownDescription: "the status of the cluster.", + Computed: true, + Type: types.StringType, + }, + "config": { + MarkdownDescription: "The configuration of the cluster.", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "root_password": { + MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", + Optional: true, + Type: types.StringType, + }, + "port": { + MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + + " - For a Developer Tier cluster, only port 4000 is available.", + Optional: true, + Computed: true, + Type: types.Int64Type, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "paused": { + MarkdownDescription: "lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster.\n" + + " - The cluster can be paused only when the cluster_status is \"AVAILABLE\"." + + " - The cluster can be resumed only when the cluster_status is \"PAUSED\".", + Optional: true, + Type: types.BoolType, + }, + "components": { + MarkdownDescription: "The components of the cluster.\n" + + " - For a Developer Tier cluster, the components value can not be set." + + " - For a Dedicated Tier cluster, the components value must be set.", + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown()}, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "tidb": { + MarkdownDescription: "The TiDB component of the cluster", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + Required: true, + MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Type: types.StringType, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).", + Required: true, + Type: types.Int64Type, + }, + }), + }, + "tikv": { + MarkdownDescription: "The TiKV component of the cluster", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + Type: types.StringType, + }, + "storage_size_gib": { + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + Type: types.Int64Type, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - TiKV do not support decreasing node quantity.\n" + + " - The node_quantity of TiKV must be a multiple of 3.", + Required: true, + Type: types.Int64Type, + }, + }), + }, + "tiflash": { + MarkdownDescription: "The TiFlash component of the cluster.", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + Type: types.StringType, + }, + "storage_size_gib": { + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + Type: types.Int64Type, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](../data-sources/cluster_spec.md).\n" + + " - TiFlash do not support decreasing node quantity.", + Required: true, + Type: types.Int64Type, + }, + }), + }, + }), + }, + "ip_access_list": { + MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", + Optional: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "cidr": { + MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", + Required: true, + Type: types.StringType, + }, + "description": { + MarkdownDescription: "Description that explains the purpose of the entry.", + Required: true, + Type: types.StringType, + }, + }), + }, + }), + }, + }, + }, nil +} + +func (t clusterResourceType) NewResource(ctx context.Context, in provider.Provider) (resource.Resource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return clusterResource{ + provider: provider, + }, diags +} + +type clusterResource struct { + provider tidbcloudProvider +} + +func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + if !r.provider.configured { + resp.Diagnostics.AddError( + "Provider not configured", + "The provider hasn't been configured before apply, likely because it depends on an unknown value from another resource. This leads to weird stuff happening, so we'd prefer if you didn't do that. Thanks!", + ) + return + } + + // get data from config + var data clusterResourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // for DEVELOPER cluster, components is not allowed. or plan and state may be inconsistent + if data.ClusterType == dev { + if data.Config.Components != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is not allowed in %s cluster_type", dev)) + return + } + } + + // for DEDICATED cluster, components is required. + if data.ClusterType == ded { + if data.Config.Components == nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is required in %s cluster_type", ded)) + return + } + } + + // write logs using the tflog package + // see https://pkg.go.dev/github.com/hashicorp/terraform-plugin-log/tflog + tflog.Trace(ctx, "created cluster_resource") + createClusterResp, err := r.provider.client.CreateCluster(data.ProjectId, buildCreateClusterReq(data)) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateCluster, got error: %s", err)) + return + } + // set clusterId. other computed attributes are not returned by create, they will be set when refresh + data.ClusterId = types.String{Value: strconv.FormatUint(createClusterResp.ClusterId, 10)} + + // we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic + tflog.Trace(ctx, "read cluster_resource") + cluster, err := r.provider.client.GetClusterById(data.ProjectId, data.ClusterId.Value) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + return + } + refreshClusterResourceData(cluster, &data) + + // save into the Terraform state. + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func buildCreateClusterReq(data clusterResourceData) *tidbcloud.CreateClusterReq { + // required + payload := tidbcloud.CreateClusterReq{ + Name: data.Name, + ClusterType: data.ClusterType, + CloudProvider: data.CloudProvider, + Region: data.Region, + Config: tidbcloud.ClusterConfig{ + RootPassword: data.Config.RootPassword.Value, + }, + } + + // optional + if data.Config.Components != nil { + tidb := data.Config.Components.TiDB + tikv := data.Config.Components.TiKV + tiflash := data.Config.Components.TiFlash + components := tidbcloud.Components{ + TiDB: tidbcloud.ComponentTiDB{ + NodeSize: tidb.NodeSize, + NodeQuantity: tidb.NodeQuantity, + }, + TiKV: tidbcloud.ComponentTiKV{ + NodeSize: tikv.NodeSize, + StorageSizeGib: tikv.StorageSizeGib, + NodeQuantity: tikv.NodeQuantity, + }, + } + // tiflash is optional + if tiflash != nil { + components.TiFlash = &tidbcloud.ComponentTiFlash{ + NodeSize: tiflash.NodeSize, + StorageSizeGib: tiflash.StorageSizeGib, + NodeQuantity: tiflash.NodeQuantity, + } + } + + payload.Config.Components = components + } + if data.Config.IPAccessList != nil { + var IPAccessList []tidbcloud.IPAccess + for _, key := range data.Config.IPAccessList { + IPAccessList = append(IPAccessList, tidbcloud.IPAccess{ + CIDR: key.CIDR, + Description: key.Description, + }) + } + payload.Config.IPAccessList = IPAccessList + } + if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { + payload.Config.Port = int(data.Config.Port.Value) + } + + return &payload +} + +func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var projectId, clusterId string + + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &clusterId)...) + if resp.Diagnostics.HasError() { + return + } + + // call read api + tflog.Trace(ctx, "read cluster_resource") + cluster, err := r.provider.client.GetClusterById(projectId, clusterId) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + return + } + + // refresh data with read result + var data clusterResourceData + // root_password, ip_access_list and pause will not return by read api, so we just use state's value even it changed on console! + // use types.String in case ImportState method throw unhandled null value + var rootPassword types.String + var iPAccessList []ipAccess + var paused *bool + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("root_password"), &rootPassword)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("ip_access_list"), &iPAccessList)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("paused"), &paused)...) + data.Config.RootPassword = rootPassword + data.Config.IPAccessList = iPAccessList + data.Config.Paused = paused + + refreshClusterResourceData(cluster, &data) + + // save into the Terraform state + diags := resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func refreshClusterResourceData(resp *tidbcloud.GetClusterResp, data *clusterResourceData) { + // must return + data.Name = resp.Name + data.ClusterId = types.String{Value: strconv.FormatUint(resp.Id, 10)} + data.Region = resp.Region + data.ProjectId = strconv.FormatUint(resp.ProjectId, 10) + data.ClusterType = resp.ClusterType + data.CloudProvider = resp.CloudProvider + data.Config.Port = types.Int64{Value: int64(resp.Config.Port)} + tidb := resp.Config.Components.TiDB + tikv := resp.Config.Components.TiKV + data.Config.Components = &components{ + TiDB: &componentTiDB{ + NodeSize: tidb.NodeSize, + NodeQuantity: tidb.NodeQuantity, + }, + TiKV: &componentTiKV{ + NodeSize: tikv.NodeSize, + NodeQuantity: tikv.NodeQuantity, + StorageSizeGib: tikv.StorageSizeGib, + }, + } + data.Status = types.String{Value: resp.Status.ClusterStatus} + // may return + tiflash := resp.Config.Components.TiFlash + if tiflash != nil { + data.Config.Components.TiFlash = &componentTiFlash{ + NodeSize: tiflash.NodeSize, + NodeQuantity: tiflash.NodeQuantity, + StorageSizeGib: tiflash.StorageSizeGib, + } + } + + // not return + // IPAccessList, password and pause will not update for it will not return by read api(in GetClusterResp) + +} + +// Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency +// check the date can't be updated +// if plan and state is different, we can execute updated +func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // get plan + var data clusterResourceData + diags := req.Plan.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + // get state + var state clusterResourceData + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // DEVELOPER can not be changed now + if data.ClusterType == dev { + resp.Diagnostics.AddError( + "Update error", + "Unable to update DEVELOPER cluster", + ) + return + } + + // only components and paused can be changed now + if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || + data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { + resp.Diagnostics.AddError( + "Update error", + "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", + ) + return + } + if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.Value != state.Config.Port.Value { + resp.Diagnostics.AddError( + "Update error", + "port can not be changed, only components can be changed now", + ) + return + } + if data.Config.IPAccessList != nil { + for index, key := range data.Config.IPAccessList { + if state.Config.IPAccessList[index].CIDR != key.CIDR || state.Config.IPAccessList[index].Description != key.Description { + resp.Diagnostics.AddError( + "Update error", + "ip_access_list can not be changed, only components can be changed now", + ) + return + } + } + } + + // check Components + tidb := data.Config.Components.TiDB + tikv := data.Config.Components.TiKV + tiflash := data.Config.Components.TiFlash + tidbState := state.Config.Components.TiDB + tikvState := state.Config.Components.TiKV + tiflashState := state.Config.Components.TiFlash + if tidb.NodeSize != tidbState.NodeSize { + resp.Diagnostics.AddError( + "Update error", + "tidb node_size can't be changed", + ) + return + } + if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { + resp.Diagnostics.AddError( + "Update error", + "tikv node_size or storage_size_gib can't be changed", + ) + return + } + if tiflash != nil && tiflashState != nil { + // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib + if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { + resp.Diagnostics.AddError( + "Update error", + "tiflash node_size or storage_size_gib can't be changed", + ) + return + } + } + + // build UpdateClusterReq + var updateClusterReq tidbcloud.UpdateClusterReq + // build paused + if data.Config.Paused != nil { + if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { + updateClusterReq.Config.Paused = data.Config.Paused + } + } + // build components + var isComponentsChanged = false + if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { + isComponentsChanged = true + } + + var componentTiFlash *tidbcloud.ComponentTiFlash + if tiflash != nil { + if tiflashState == nil { + isComponentsChanged = true + componentTiFlash = &tidbcloud.ComponentTiFlash{ + NodeQuantity: tiflash.NodeQuantity, + NodeSize: tiflash.NodeSize, + StorageSizeGib: tiflash.StorageSizeGib, + } + } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { + isComponentsChanged = true + componentTiFlash = &tidbcloud.ComponentTiFlash{ + NodeQuantity: tiflash.NodeQuantity, + } + } + } + if isComponentsChanged { + updateClusterReq.Config.Components = &tidbcloud.Components{ + TiDB: tidbcloud.ComponentTiDB{ + NodeQuantity: tidb.NodeQuantity, + }, + TiKV: tidbcloud.ComponentTiKV{ + NodeQuantity: tikv.NodeQuantity, + }, + TiFlash: componentTiFlash, + } + } + + tflog.Trace(ctx, "update cluster_resource") + err := r.provider.client.UpdateClusterById(data.ProjectId, data.ClusterId.Value, updateClusterReq) + if err != nil { + resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) + return + } + + // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic + tflog.Trace(ctx, "read cluster_resource") + cluster, err := r.provider.client.GetClusterById(data.ProjectId, data.ClusterId.Value) + if err != nil { + resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + return + } + refreshClusterResourceData(cluster, &data) + + // save into the Terraform state. + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func (r clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data clusterResourceData + + diags := req.State.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "delete cluster_resource") + err := r.provider.client.DeleteClusterById(data.ProjectId, data.ClusterId.Value) + if err != nil { + resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteClusterById, got error: %s", err)) + return + } +} + +func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) +} diff --git a/internal/provider/cluster_spec_data_source.go b/internal/provider/cluster_spec_data_source.go new file mode 100644 index 0000000..34782d1 --- /dev/null +++ b/internal/provider/cluster_spec_data_source.go @@ -0,0 +1,296 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type clusterSpecDataSourceData struct { + Id types.String `tfsdk:"id"` + Items []clusterSpecItem `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` +} + +type clusterSpecItem struct { + ClusterType string `tfsdk:"cluster_type"` + CloudProvider string `tfsdk:"cloud_provider"` + Region string `tfsdk:"region"` + Tidb []tidbSpec `tfsdk:"tidb"` + Tikv []tikvSpec `tfsdk:"tikv"` + Tifalsh []tiflashSpec `tfsdk:"tiflash"` +} + +type tidbSpec struct { + NodeSize string `tfsdk:"node_size"` + NodeQuantityRange nodeQuantityRange `tfsdk:"node_quantity_range"` +} + +type tikvSpec struct { + NodeSize string `tfsdk:"node_size"` + StorageSizeGiRange storageSizeGiRange `tfsdk:"storage_size_gib_range"` + NodeQuantityRange nodeQuantityRange `tfsdk:"node_quantity_range"` +} + +type tiflashSpec struct { + NodeSize string `tfsdk:"node_size"` + StorageSizeGiRange storageSizeGiRange `tfsdk:"storage_size_gib_range"` + NodeQuantityRange nodeQuantityRange `tfsdk:"node_quantity_range"` +} + +type nodeQuantityRange struct { + Min int `tfsdk:"min"` + Step int `tfsdk:"step"` +} + +type storageSizeGiRange struct { + Min int `tfsdk:"min"` + Max int `tfsdk:"max"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.DataSourceType = clusterSpecDataSourceType{} +var _ datasource.DataSource = clusterSpecDataSource{} + +type clusterSpecDataSourceType struct{} + +func (t clusterSpecDataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "cluster_spec data source", + Attributes: map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "ignore it, it is just for test.", + Computed: true, + Type: types.StringType, + }, + "total": { + MarkdownDescription: "the total number of the spec.", + Computed: true, + Type: types.Int64Type, + }, + "items": { + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "cluster_type": { + MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", + Computed: true, + Type: types.StringType, + }, + "cloud_provider": { + MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", + Computed: true, + Type: types.StringType, + }, + "region": { + MarkdownDescription: "the region value should match the cloud provider's region code.", + Computed: true, + Type: types.StringType, + }, + "tidb": { + MarkdownDescription: "The list of TiDB specifications in the region.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiDB component in the cluster.", + Computed: true, + Type: types.StringType, + }, + "node_quantity_range": { + MarkdownDescription: "The range and step of node quantity of the TiDB component in the cluster.", + Computed: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "min": { + MarkdownDescription: "The minimum node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + "step": { + MarkdownDescription: "The step of node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + }), + }, + }), + }, + "tikv": { + MarkdownDescription: "The list of TiKV specifications in the region.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiKV component in the cluster.", + Computed: true, + Type: types.StringType, + }, + "node_quantity_range": { + MarkdownDescription: "The range and step of node quantity of the TiKV component in the cluster.", + Computed: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "min": { + MarkdownDescription: "The minimum node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + "step": { + MarkdownDescription: "The step of node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + }), + }, + "storage_size_gib_range": { + MarkdownDescription: "The storage size range for each node of the TiKV component in the cluster.", + Computed: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "min": { + MarkdownDescription: "The minimum storage size for each node of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + "max": { + MarkdownDescription: "The maximum storage size for each node of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + }), + }, + }), + }, + "tiflash": { + MarkdownDescription: "The list of TiFlash specifications in the region.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiFlash component in the cluster.", + Computed: true, + Type: types.StringType, + }, + "node_quantity_range": { + MarkdownDescription: "The range and step of node quantity of the TiFlash component in the cluster.", + Computed: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "min": { + MarkdownDescription: "The minimum node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + "step": { + MarkdownDescription: "The step of node quantity of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + }), + }, + "storage_size_gib_range": { + MarkdownDescription: "The storage size range for each node of the TiFlash component in the cluster.", + Computed: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "min": { + MarkdownDescription: "The minimum storage size for each node of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + "max": { + MarkdownDescription: "The maximum storage size for each node of the component in the cluster.", + Computed: true, + Type: types.Int64Type, + }, + }), + }, + }), + }, + }), + }, + }, + }, nil +} + +func (t clusterSpecDataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return clusterSpecDataSource{ + provider: provider, + }, diags +} + +type clusterSpecDataSource struct { + provider tidbcloudProvider +} + +func (d clusterSpecDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data clusterSpecDataSourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "read cluster_spec data source") + spec, err := d.provider.client.GetSpecifications() + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call read specifications, got error: %s", err)) + return + } + + var items []clusterSpecItem + for _, key := range spec.Items { + var tidbs []tidbSpec + for _, tidb := range key.Tidb { + tidbs = append(tidbs, tidbSpec{ + NodeSize: tidb.NodeSize, + NodeQuantityRange: nodeQuantityRange{ + Min: tidb.NodeQuantityRange.Min, + Step: tidb.NodeQuantityRange.Step, + }, + }) + } + var tikvs []tikvSpec + for _, tikv := range key.Tikv { + tikvs = append(tikvs, tikvSpec{ + NodeSize: tikv.NodeSize, + NodeQuantityRange: nodeQuantityRange{ + Min: tikv.NodeQuantityRange.Min, + Step: tikv.NodeQuantityRange.Step, + }, + StorageSizeGiRange: storageSizeGiRange{ + Min: tikv.StorageSizeGibRange.Min, + Max: tikv.StorageSizeGibRange.Max, + }, + }) + } + var tiflashs []tiflashSpec + for _, tiflash := range key.Tiflash { + tiflashs = append(tiflashs, tiflashSpec{ + NodeSize: tiflash.NodeSize, + NodeQuantityRange: nodeQuantityRange{ + Min: tiflash.NodeQuantityRange.Min, + Step: tiflash.NodeQuantityRange.Step, + }, + StorageSizeGiRange: storageSizeGiRange{ + Min: tiflash.StorageSizeGibRange.Min, + Max: tiflash.StorageSizeGibRange.Max, + }, + }) + } + items = append(items, clusterSpecItem{ + ClusterType: key.ClusterType, + CloudProvider: key.CloudProvider, + Region: key.Region, + Tidb: tidbs, + Tikv: tikvs, + Tifalsh: tiflashs, + }) + } + + data.Items = items + data.Total = types.Int64{Value: int64(len(items))} + data.Id = types.String{Value: "just for test"} + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/example_data_source.go b/internal/provider/example_data_source.go deleted file mode 100644 index daa2080..0000000 --- a/internal/provider/example_data_source.go +++ /dev/null @@ -1,85 +0,0 @@ -package provider - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflog" -) - -// Ensure provider defined types fully satisfy framework interfaces -var _ provider.DataSourceType = exampleDataSourceType{} -var _ datasource.DataSource = exampleDataSource{} - -type exampleDataSourceType struct{} - -func (t exampleDataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { - return tfsdk.Schema{ - // This description is used by the documentation generator and the language server. - MarkdownDescription: "Example data source", - - Attributes: map[string]tfsdk.Attribute{ - "configurable_attribute": { - MarkdownDescription: "Example configurable attribute", - Optional: true, - Type: types.StringType, - }, - "id": { - MarkdownDescription: "Example identifier", - Type: types.StringType, - Computed: true, - }, - }, - }, nil -} - -func (t exampleDataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { - provider, diags := convertProviderType(in) - - return exampleDataSource{ - provider: provider, - }, diags -} - -type exampleDataSourceData struct { - ConfigurableAttribute types.String `tfsdk:"configurable_attribute"` - Id types.String `tfsdk:"id"` -} - -type exampleDataSource struct { - provider scaffoldingProvider -} - -func (d exampleDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var data exampleDataSourceData - - diags := req.Config.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - - if resp.Diagnostics.HasError() { - return - } - - // If applicable, this is a great opportunity to initialize any necessary - // provider client data and make a call using it. - // example, err := d.provider.client.ReadExample(...) - // if err != nil { - // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read example, got error: %s", err)) - // return - // } - - // For the purposes of this example code, hardcoding a response value to - // save into the Terraform state. - data.Id = types.String{Value: "example-id"} - - // Write logs using the tflog package - // Documentation: https://terraform.io/plugin/log - tflog.Trace(ctx, "read a data source") - - diags = resp.State.Set(ctx, &data) - resp.Diagnostics.Append(diags...) -} diff --git a/internal/provider/example_resource.go b/internal/provider/example_resource.go deleted file mode 100644 index cebc6f0..0000000 --- a/internal/provider/example_resource.go +++ /dev/null @@ -1,157 +0,0 @@ -package provider - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" - "github.com/hashicorp/terraform-plugin-framework/provider" - "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflog" -) - -// Ensure provider defined types fully satisfy framework interfaces -var _ provider.ResourceType = exampleResourceType{} -var _ resource.Resource = exampleResource{} -var _ resource.ResourceWithImportState = exampleResource{} - -type exampleResourceType struct{} - -func (t exampleResourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { - return tfsdk.Schema{ - // This description is used by the documentation generator and the language server. - MarkdownDescription: "Example resource", - - Attributes: map[string]tfsdk.Attribute{ - "configurable_attribute": { - MarkdownDescription: "Example configurable attribute", - Optional: true, - Type: types.StringType, - }, - "id": { - Computed: true, - MarkdownDescription: "Example identifier", - PlanModifiers: tfsdk.AttributePlanModifiers{ - resource.UseStateForUnknown(), - }, - Type: types.StringType, - }, - }, - }, nil -} - -func (t exampleResourceType) NewResource(ctx context.Context, in provider.Provider) (resource.Resource, diag.Diagnostics) { - provider, diags := convertProviderType(in) - - return exampleResource{ - provider: provider, - }, diags -} - -type exampleResourceData struct { - ConfigurableAttribute types.String `tfsdk:"configurable_attribute"` - Id types.String `tfsdk:"id"` -} - -type exampleResource struct { - provider scaffoldingProvider -} - -func (r exampleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var data exampleResourceData - - diags := req.Config.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - - if resp.Diagnostics.HasError() { - return - } - - // If applicable, this is a great opportunity to initialize any necessary - // provider client data and make a call using it. - // example, err := d.provider.client.CreateExample(...) - // if err != nil { - // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create example, got error: %s", err)) - // return - // } - - // For the purposes of this example code, hardcoding a response value to - // save into the Terraform state. - data.Id = types.String{Value: "example-id"} - - // Write logs using the tflog package - // Documentation: https://terraform.io/plugin/log - tflog.Trace(ctx, "created a resource") - - diags = resp.State.Set(ctx, &data) - resp.Diagnostics.Append(diags...) -} - -func (r exampleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - var data exampleResourceData - - diags := req.State.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - - if resp.Diagnostics.HasError() { - return - } - - // If applicable, this is a great opportunity to initialize any necessary - // provider client data and make a call using it. - // example, err := d.provider.client.ReadExample(...) - // if err != nil { - // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read example, got error: %s", err)) - // return - // } - - diags = resp.State.Set(ctx, &data) - resp.Diagnostics.Append(diags...) -} - -func (r exampleResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var data exampleResourceData - - diags := req.Plan.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - - if resp.Diagnostics.HasError() { - return - } - - // If applicable, this is a great opportunity to initialize any necessary - // provider client data and make a call using it. - // example, err := d.provider.client.UpdateExample(...) - // if err != nil { - // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update example, got error: %s", err)) - // return - // } - - diags = resp.State.Set(ctx, &data) - resp.Diagnostics.Append(diags...) -} - -func (r exampleResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - var data exampleResourceData - - diags := req.State.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - - if resp.Diagnostics.HasError() { - return - } - - // If applicable, this is a great opportunity to initialize any necessary - // provider client data and make a call using it. - // example, err := d.provider.client.DeleteExample(...) - // if err != nil { - // resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete example, got error: %s", err)) - // return - // } -} - -func (r exampleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) -} diff --git a/internal/provider/example_resource_test.go b/internal/provider/example_resource_test.go deleted file mode 100644 index cd7582c..0000000 --- a/internal/provider/example_resource_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package provider - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func TestAccExampleResource(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Steps: []resource.TestStep{ - // Create and Read testing - { - Config: testAccExampleResourceConfig("one"), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("scaffolding_example.test", "configurable_attribute", "one"), - resource.TestCheckResourceAttr("scaffolding_example.test", "id", "example-id"), - ), - }, - // ImportState testing - { - ResourceName: "scaffolding_example.test", - ImportState: true, - ImportStateVerify: true, - // This is not normally necessary, but is here because this - // example code does not have an actual upstream service. - // Once the Read method is able to refresh information from - // the upstream service, this can be removed. - ImportStateVerifyIgnore: []string{"configurable_attribute"}, - }, - // Update and Read testing - { - Config: testAccExampleResourceConfig("two"), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("scaffolding_example.test", "configurable_attribute", "two"), - ), - }, - // Delete testing automatically occurs in TestCase - }, - }) -} - -func testAccExampleResourceConfig(configurableAttribute string) string { - return fmt.Sprintf(` -resource "scaffolding_example" "test" { - configurable_attribute = %[1]q -} -`, configurableAttribute) -} diff --git a/internal/provider/project_data_source.go b/internal/provider/project_data_source.go new file mode 100644 index 0000000..1f5261e --- /dev/null +++ b/internal/provider/project_data_source.go @@ -0,0 +1,155 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type projectDataSourceData struct { + Id types.String `tfsdk:"id"` + Page types.Int64 `tfsdk:"page"` + PageSize types.Int64 `tfsdk:"page_size"` + Projects []project `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` +} + +type project struct { + Id string `tfsdk:"id"` + OrgId string `tfsdk:"org_id"` + Name string `tfsdk:"name"` + ClusterCount int64 `tfsdk:"cluster_count"` + UserCount int64 `tfsdk:"user_count"` + CreateTimestamp string `tfsdk:"create_timestamp"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.DataSourceType = projectDataSourceType{} +var _ datasource.DataSource = projectDataSource{} + +type projectDataSourceType struct{} + +func (t projectDataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "project data source", + Attributes: map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "ignore it, it is just for test.", + Computed: true, + Type: types.StringType, + }, + "page": { + MarkdownDescription: "Default:1 The number of pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "page_size": { + MarkdownDescription: "Default:10 The size of a pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "items": { + MarkdownDescription: "The items of accessible projects.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "The ID of the project.", + Computed: true, + Type: types.StringType, + }, + "org_id": { + MarkdownDescription: "The ID of the TiDB Cloud organization to which the project belongs.", + Computed: true, + Type: types.StringType, + }, + "name": { + MarkdownDescription: "The name of the project.", + Computed: true, + Type: types.StringType, + }, + "cluster_count": { + MarkdownDescription: "The number of TiDB Cloud clusters deployed in the project.", + Computed: true, + Type: types.Int64Type, + }, + "user_count": { + MarkdownDescription: "The number of users in the project.", + Computed: true, + Type: types.Int64Type, + }, + "create_timestamp": { + MarkdownDescription: "The creation time of the cluster in Unix timestamp seconds (epoch time).", + Computed: true, + Type: types.StringType, + }, + }), + }, + "total": { + MarkdownDescription: "The total number of accessible projects.", + Computed: true, + Type: types.Int64Type, + }, + }, + }, nil +} + +func (t projectDataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return projectDataSource{ + provider: provider, + }, diags +} + +type projectDataSource struct { + provider tidbcloudProvider +} + +func (d projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data projectDataSourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // set default value + if data.Page.IsNull() || data.Page.IsUnknown() { + data.Page = types.Int64{Value: 1} + } + if data.PageSize.IsNull() || data.PageSize.IsUnknown() { + data.PageSize = types.Int64{Value: 10} + } + + tflog.Trace(ctx, "read project data source") + projects, err := d.provider.client.GetAllProjects(data.Page.Value, data.PageSize.Value) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call read project, got error: %s", err)) + return + } + + data.Total = types.Int64{Value: projects.Total} + var items []project + for _, key := range projects.Items { + items = append(items, project{ + Id: key.Id, + OrgId: key.OrgId, + Name: key.Name, + ClusterCount: key.ClusterCount, + UserCount: key.UserCount, + CreateTimestamp: key.CreateTimestamp, + }) + } + data.Projects = items + data.Id = types.String{Value: "just for test"} + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 2ad2bf9..7016f30 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -3,6 +3,8 @@ package provider import ( "context" "fmt" + "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + "os" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" @@ -11,17 +13,15 @@ import ( ) // Ensure provider defined types fully satisfy framework interfaces -var _ provider.Provider = &scaffoldingProvider{} +var _ provider.Provider = &tidbcloudProvider{} // provider satisfies the tfsdk.Provider interface and usually is included // with all Resource and DataSource implementations. -type scaffoldingProvider struct { +type tidbcloudProvider struct { // client can contain the upstream provider SDK or HTTP client used to // communicate with the upstream service. Resource and DataSource // implementations can then make calls using this client. - // - // TODO: If appropriate, implement upstream provider SDK or HTTP client. - // client vendorsdk.ExampleClient + client *tidbcloud.TiDBCloudClient // configured is set to true at the end of the Configure method. // This can be used in Resource and DataSource implementations to verify @@ -36,10 +36,12 @@ type scaffoldingProvider struct { // providerData can be used to store data from the Terraform configuration. type providerData struct { - Example types.String `tfsdk:"example"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` } -func (p *scaffoldingProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { +func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + // get providerData var data providerData diags := req.Config.Get(ctx, &data) resp.Diagnostics.Append(diags...) @@ -48,34 +50,103 @@ func (p *scaffoldingProvider) Configure(ctx context.Context, req provider.Config return } - // Configuration values are now available. - // if data.Example.Null { /* ... */ } + // User must provide a username to the provider + var username string + if data.Username.Unknown { + // Cannot connect to client with an unknown value + resp.Diagnostics.AddWarning( + "Unable to create client", + "Cannot use unknown value as username", + ) + return + } - // If the upstream provider SDK or HTTP client requires configuration, such - // as authentication or logging, this is a great opportunity to do so. + if data.Username.Null { + username = os.Getenv("TIDBCLOUD_USERNAME") + } else { + username = data.Username.Value + } + if username == "" { + // Error vs warning - empty value must stop execution + resp.Diagnostics.AddError( + "Unable to find username", + "Username cannot be an empty string", + ) + return + } + + // User must provide a password to the provider + var password string + if data.Password.Unknown { + // Cannot connect to client with an unknown value + resp.Diagnostics.AddError( + "Unable to create client", + "Cannot use unknown value as password", + ) + return + } + + if data.Password.Null { + password = os.Getenv("TIDBCLOUD_PASSWORD") + } else { + password = data.Password.Value + } + + if password == "" { + // Error vs warning - empty value must stop execution + resp.Diagnostics.AddError( + "Unable to find password", + "password cannot be an empty string", + ) + return + } + + // Create a new tidb client and set it to the provider client + c, err := tidbcloud.NewTiDBCloudClient(username, password) + if err != nil { + resp.Diagnostics.AddError( + "Unable to create client", + "Unable to create tidb client:\n\n"+err.Error(), + ) + return + } + + p.client = c p.configured = true } -func (p *scaffoldingProvider) GetResources(ctx context.Context) (map[string]provider.ResourceType, diag.Diagnostics) { +func (p *tidbcloudProvider) GetResources(ctx context.Context) (map[string]provider.ResourceType, diag.Diagnostics) { return map[string]provider.ResourceType{ - "scaffolding_example": exampleResourceType{}, + "tidbcloud_cluster": clusterResourceType{}, + "tidbcloud_backup": backupResourceType{}, + "tidbcloud_restore": restoreResourceType{}, }, nil } -func (p *scaffoldingProvider) GetDataSources(ctx context.Context) (map[string]provider.DataSourceType, diag.Diagnostics) { +func (p *tidbcloudProvider) GetDataSources(ctx context.Context) (map[string]provider.DataSourceType, diag.Diagnostics) { return map[string]provider.DataSourceType{ - "scaffolding_example": exampleDataSourceType{}, + "tidbcloud_project": projectDataSourceType{}, + "tidbcloud_cluster_spec": clusterSpecDataSourceType{}, + "tidbcloud_backup": backupDataSourceType{}, + "tidbcloud_restore": restoreDataSourceType{}, }, nil } -func (p *scaffoldingProvider) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { +func (p *tidbcloudProvider) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ - "example": { - MarkdownDescription: "Example provider attribute", + "username": { + MarkdownDescription: "Public Key", + Type: types.StringType, Optional: true, + Sensitive: true, + }, + "password": { + MarkdownDescription: "Private Key", Type: types.StringType, + Optional: true, + Sensitive: true, }, }, }, nil @@ -83,7 +154,7 @@ func (p *scaffoldingProvider) GetSchema(ctx context.Context) (tfsdk.Schema, diag func New(version string) func() provider.Provider { return func() provider.Provider { - return &scaffoldingProvider{ + return &tidbcloudProvider{ version: version, } } @@ -94,17 +165,17 @@ func New(version string) func() provider.Provider { // this helper can be skipped and the provider type can be directly type // asserted (e.g. provider: in.(*scaffoldingProvider)), however using this can prevent // potential panics. -func convertProviderType(in provider.Provider) (scaffoldingProvider, diag.Diagnostics) { +func convertProviderType(in provider.Provider) (tidbcloudProvider, diag.Diagnostics) { var diags diag.Diagnostics - p, ok := in.(*scaffoldingProvider) + p, ok := in.(*tidbcloudProvider) if !ok { diags.AddError( "Unexpected Provider Instance Type", fmt.Sprintf("While creating the data source or resource, an unexpected provider type (%T) was received. This is always a bug in the provider code and should be reported to the provider developers.", p), ) - return scaffoldingProvider{}, diags + return tidbcloudProvider{}, diags } if p == nil { @@ -112,7 +183,7 @@ func convertProviderType(in provider.Provider) (scaffoldingProvider, diag.Diagno "Unexpected Provider Instance Type", "While creating the data source or resource, an unexpected empty provider instance was received. This is always a bug in the provider code and should be reported to the provider developers.", ) - return scaffoldingProvider{}, diags + return tidbcloudProvider{}, diags } return *p, diags diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go deleted file mode 100644 index 05e143b..0000000 --- a/internal/provider/provider_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package provider - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" -) - -// testAccProtoV6ProviderFactories are used to instantiate a provider during -// acceptance testing. The factory function will be invoked for every Terraform -// CLI command executed to create a provider server to which the CLI can -// reattach. -var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ - "scaffolding": providerserver.NewProtocol6WithError(New("test")()), -} - -func testAccPreCheck(t *testing.T) { - // You can add code here to run prior to any test case execution, for example assertions - // about the appropriate environment variables being set are common to see in a pre-check - // function. -} diff --git a/internal/provider/restore_data_source.go b/internal/provider/restore_data_source.go new file mode 100644 index 0000000..72be492 --- /dev/null +++ b/internal/provider/restore_data_source.go @@ -0,0 +1,201 @@ +package provider + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type restoreDataSourceData struct { + Id types.String `tfsdk:"id"` + ProjectId string `tfsdk:"project_id"` + Page types.Int64 `tfsdk:"page"` + PageSize types.Int64 `tfsdk:"page_size"` + Items []restore `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` +} + +type restore struct { + Id string `tfsdk:"id"` + CreateTimestamp string `tfsdk:"create_timestamp"` + BackupId string `tfsdk:"backup_id"` + ClusterId string `tfsdk:"cluster_id"` + Status string `tfsdk:"status"` + Cluster cluster `tfsdk:"cluster"` + ErrorMessage string `tfsdk:"error_message"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.DataSourceType = restoreDataSourceType{} +var _ datasource.DataSource = restoreDataSource{} + +type restoreDataSourceType struct{} + +func (t restoreDataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "restore data source", + Attributes: map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "ignore it, it is just for test.", + Computed: true, + Type: types.StringType, + }, + "project_id": { + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project).", + Required: true, + Type: types.StringType, + }, + "page": { + MarkdownDescription: "Default:1 The number of pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "page_size": { + MarkdownDescription: "Default:10 The size of a pages.", + Optional: true, + Computed: true, + Type: types.Int64Type, + }, + "items": { + MarkdownDescription: "Default:10 The size of a pages.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "The ID of the restore task.", + Computed: true, + Type: types.StringType, + }, + "create_timestamp": { + MarkdownDescription: "The creation time of the backup in UTC.The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z.", + Computed: true, + Type: types.StringType, + }, + "backup_id": { + MarkdownDescription: "The ID of the backup.", + Computed: true, + Type: types.StringType, + }, + "cluster_id": { + MarkdownDescription: "The cluster ID of the backup.", + Computed: true, + Type: types.StringType, + }, + "status": { + MarkdownDescription: "Enum: \"PENDING\" \"RUNNING\" \"FAILED\" \"SUCCESS\", The status of the restore task.", + Computed: true, + Type: types.StringType, + }, + "error_message": { + MarkdownDescription: "The error message of restore if failed.", + Computed: true, + Type: types.StringType, + }, + "cluster": { + MarkdownDescription: "The information of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "The ID of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "name": { + MarkdownDescription: "The name of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "status": { + MarkdownDescription: "The status of the restored cluster. Possible values are \"AVAILABLE\", \"CREATING\", \"MODIFYING\", \"PAUSED\", \"RESUMING\", and \"CLEARED\".", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + }), + }, + "total": { + MarkdownDescription: "The total number of restore tasks in the project.", + Computed: true, + Type: types.Int64Type, + }, + }, + }, nil +} + +func (t restoreDataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return restoreDataSource{ + provider: provider, + }, diags +} + +type restoreDataSource struct { + provider tidbcloudProvider +} + +func (d restoreDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data restoreDataSourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // set default value + if data.Page.IsNull() || data.Page.IsUnknown() { + data.Page = types.Int64{Value: 1} + } + if data.PageSize.IsNull() || data.PageSize.IsUnknown() { + data.PageSize = types.Int64{Value: 10} + } + + tflog.Trace(ctx, "read restore data source") + restores, err := d.provider.client.GetRestoreTasks(data.ProjectId, data.Page.Value, data.PageSize.Value) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetRestoreTasks, got error: %s", err)) + return + } + + data.Total = types.Int64{Value: restores.Total} + var items []restore + for _, key := range restores.Items { + items = append(items, restore{ + Id: key.Id, + CreateTimestamp: key.CreateTimestamp, + BackupId: key.BackupId, + ClusterId: key.ClusterId, + ErrorMessage: key.ErrorMessage, + Status: key.Status, + Cluster: cluster{ + Id: key.Cluster.Id, + Name: key.Cluster.Name, + Status: key.Cluster.Status, + }, + }) + } + data.Items = items + data.Id = types.String{Value: "just for test"} + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/restore_resource.go b/internal/provider/restore_resource.go new file mode 100644 index 0000000..80f57d7 --- /dev/null +++ b/internal/provider/restore_resource.go @@ -0,0 +1,389 @@ +package provider + +import ( + "context" + "fmt" + "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type restoreResourceData struct { + ClusterId types.String `tfsdk:"cluster_id"` + RestoreId types.String `tfsdk:"id"` + ProjectId string `tfsdk:"project_id"` + Name string `tfsdk:"name"` + BackupId string `tfsdk:"backup_id"` + Config restoreConfig `tfsdk:"config"` + CreateTimestamp types.String `tfsdk:"create_timestamp"` + Status types.String `tfsdk:"status"` + Cluster *cluster `tfsdk:"cluster"` + ErrorMessage types.String `tfsdk:"error_message"` +} + +type restoreConfig struct { + RootPassword types.String `tfsdk:"root_password"` + Port types.Int64 `tfsdk:"port"` + Components *components `tfsdk:"components"` + IPAccessList []ipAccess `tfsdk:"ip_access_list"` +} + +type cluster struct { + Id string `tfsdk:"id"` + Name string `tfsdk:"name"` + Status string `tfsdk:"status"` +} + +// Ensure provider defined types fully satisfy framework interfaces +var _ provider.ResourceType = restoreResourceType{} +var _ resource.Resource = restoreResource{} +var _ resource.ResourceWithImportState = restoreResource{} + +type restoreResourceType struct{} + +func (t restoreResourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + MarkdownDescription: "restore resource", + Attributes: map[string]tfsdk.Attribute{ + "cluster_id": { + MarkdownDescription: "The ID of the cluster", + Computed: true, + Type: types.StringType, + }, + "id": { + MarkdownDescription: "The ID of the restore", + Computed: true, + Type: types.StringType, + }, + "project_id": { + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_project datasource](../project).", + Required: true, + Type: types.StringType, + }, + "name": { + MarkdownDescription: "The name of the restore", + Required: true, + Type: types.StringType, + }, + "backup_id": { + Required: true, + MarkdownDescription: "The ID of the backup", + Type: types.StringType, + }, + "config": { + MarkdownDescription: "The configuration of the cluster", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "root_password": { + MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", + Required: true, + Type: types.StringType, + }, + "port": { + MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + + " - For a Developer Tier cluster, only port 4000 is available.", + Optional: true, + Computed: true, + Type: types.Int64Type, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "components": { + MarkdownDescription: "The components of the cluster.\n" + + " - For a Developer Tier cluster, the components value can not be set." + + " - For a Dedicated Tier cluster, the components value must be set.", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "tidb": { + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + Type: types.StringType, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).", + Required: true, + Type: types.Int64Type, + }, + }), + }, + "tikv": { + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + Type: types.StringType, + }, + "storage_size_gib": { + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + Type: types.Int64Type, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).", + Required: true, + Type: types.Int64Type, + }, + }), + }, + "tiflash": { + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "node_size": { + MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + Type: types.StringType, + }, + "storage_size_gib": { + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + Type: types.Int64Type, + }, + "node_quantity": { + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_spec datasource](./cluster_spec.md).", + Required: true, + Type: types.Int64Type, + }, + }), + }, + }), + }, + "ip_access_list": { + MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", + Optional: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "cidr": { + MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", + Required: true, + Type: types.StringType, + }, + "description": { + MarkdownDescription: "Description that explains the purpose of the entry.", + Required: true, + Type: types.StringType, + }, + }), + }, + }), + }, + "status": { + Computed: true, + MarkdownDescription: "Enum: \"PENDING\" \"RUNNING\" \"FAILED\" \"SUCCESS\"\nThe status of the restore task.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "create_timestamp": { + Computed: true, + MarkdownDescription: "The creation time of the backup in UTC.The time format follows the ISO8601 standard, which is YYYY-MM-DD (year-month-day) + T +HH:MM:SS (hour-minutes-seconds) + Z. For example, 2020-01-01T00:00:00Z.", + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "cluster": { + MarkdownDescription: "The information of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + MarkdownDescription: "The ID of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "name": { + MarkdownDescription: "The name of the restored cluster. The restored cluster is the new cluster your backup data is restored to.", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "status": { + MarkdownDescription: "The status of the restored cluster. Possible values are \"AVAILABLE\", \"CREATING\", \"MODIFYING\", \"PAUSED\", \"RESUMING\",\"UNAVAILABLE\", \"IMPORTING\" and \"CLEARED\".", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "error_message": { + MarkdownDescription: "The error message of restore if failed.", + Computed: true, + Type: types.StringType, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }, + }, nil +} + +func (t restoreResourceType) NewResource(ctx context.Context, in provider.Provider) (resource.Resource, diag.Diagnostics) { + provider, diags := convertProviderType(in) + + return restoreResource{ + provider: provider, + }, diags +} + +type restoreResource struct { + provider tidbcloudProvider +} + +func (r restoreResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data restoreResourceData + + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "create restore resource") + + createRestoreTaskResp, err := r.provider.client.CreateRestoreTask(data.ProjectId, buildCreateRestoreTaskReq(data)) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateRestoreTask, got error: %s", err)) + return + } + + tflog.Trace(ctx, "read restore resource") + getRestoreTaskResp, err := r.provider.client.GetRestoreTask(data.ProjectId, createRestoreTaskResp.Id) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetRestoreTask, got error: %s", err)) + return + } + refreshRestoreResourceData(getRestoreTaskResp, &data) + + // save into the Terraform state. + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func buildCreateRestoreTaskReq(data restoreResourceData) tidbcloud.CreateRestoreTaskReq { + tidb := data.Config.Components.TiDB + tikv := data.Config.Components.TiKV + tiflash := data.Config.Components.TiFlash + // required + payload := tidbcloud.CreateRestoreTaskReq{ + BackupId: data.BackupId, + Name: data.Name, + Config: tidbcloud.ClusterConfig{ + RootPassword: data.Config.RootPassword.Value, + Components: tidbcloud.Components{ + TiDB: tidbcloud.ComponentTiDB{ + NodeSize: tidb.NodeSize, + NodeQuantity: tidb.NodeQuantity, + }, + TiKV: tidbcloud.ComponentTiKV{ + NodeSize: tikv.NodeSize, + StorageSizeGib: tikv.StorageSizeGib, + NodeQuantity: tikv.NodeQuantity, + }, + }, + }, + } + + // port is optional + if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { + payload.Config.Port = int(data.Config.Port.Value) + } + // tiflash is optional + if tiflash != nil { + payload.Config.Components.TiFlash = &tidbcloud.ComponentTiFlash{ + NodeSize: tiflash.NodeSize, + StorageSizeGib: tiflash.StorageSizeGib, + NodeQuantity: tiflash.NodeQuantity, + } + } + // ip_access_list is optional + if data.Config.IPAccessList != nil { + var IPAccessList []tidbcloud.IPAccess + for _, key := range data.Config.IPAccessList { + IPAccessList = append(IPAccessList, tidbcloud.IPAccess{ + CIDR: key.CIDR, + Description: key.Description, + }) + } + payload.Config.IPAccessList = IPAccessList + } + + return payload +} + +func (r restoreResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data restoreResourceData + + diags := req.State.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "read restore resource") + getRestoreTaskResp, err := r.provider.client.GetRestoreTask(data.ProjectId, data.RestoreId.Value) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetRestoreTask, got error: %s", err)) + return + } + + refreshRestoreResourceData(getRestoreTaskResp, &data) + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func refreshRestoreResourceData(resp *tidbcloud.GetRestoreTaskResp, data *restoreResourceData) { + data.ClusterId = types.String{Value: resp.ClusterId} + data.RestoreId = types.String{Value: resp.Id} + data.CreateTimestamp = types.String{Value: resp.CreateTimestamp} + data.Status = types.String{Value: resp.Status} + data.Cluster = &cluster{ + Id: resp.Cluster.Id, + Name: resp.Cluster.Name, + Status: resp.Cluster.Status, + } + data.ErrorMessage = types.String{Value: resp.ErrorMessage} +} + +func (r restoreResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + resp.Diagnostics.AddError("Unsupported", fmt.Sprintf("restore can't be updated")) +} + +func (r restoreResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + resp.Diagnostics.AddWarning("Unsupported", fmt.Sprintf("restore can't be deleted")) +} + +func (r restoreResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +} diff --git a/internal/provider/testmanually/cluster_update_test.md b/internal/provider/testmanually/cluster_update_test.md new file mode 100644 index 0000000..fe8ebe4 --- /dev/null +++ b/internal/provider/testmanually/cluster_update_test.md @@ -0,0 +1,138 @@ +# Test cluster update + +It is hard for us to test update cluster with accepting test for it takes too long for the ready of the dedicated cluster. + +Here are the steps to test cluster update manually: + +1. write config file +``` +terraform { + required_providers { + tidbcloud = { + source = "" // update it + } + } +} + +provider "tidbcloud" { + username = "" // update it + password = "" // update it +} + +resource "tidbcloud_cluster" "cluster1" { + project_id = "" // update it + name = "cluster1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "" // update it + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + } + } +} +``` + +2. execute `terraform apply --auto-approve`, it should be success +3. wait the cluster status turned to available +4. change the config file: add tiflash +``` +resource "tidbcloud_cluster" "cluster1" { + project_id = "" // update it + name = "cluster1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "" // update it + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500, + node_quantity : 1 + } + } + } +} +``` +5. execute `terraform apply --auto-approve` +6. wait the cluster status turned to available +7. change the config file: add tiflash's storage_size_gib +``` +resource "tidbcloud_cluster" "cluster1" { + project_id = "" // update it + name = "cluster1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "" // update it + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 1000, + node_quantity : 1 + } + } + } +} +``` +8. execute `terraform apply --auto-approve`, and it should return error like `tiflash node_size or storage_size_gib can't be changed` +9. change the config file: scale tiflash with the increasing of node_quantity +``` +resource "tidbcloud_cluster" "cluster1" { + project_id = "" // update it + name = "cluster1" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "" // update it + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500, + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500, + node_quantity : 2 + } + } + } +} +``` +10. execute `terraform apply --auto-approve`, it should be success +11. delete the cluster with `terraform destroy --auto-approve` diff --git a/internal/provider/testmanually/restore_resource_test.go b/internal/provider/testmanually/restore_resource_test.go new file mode 100644 index 0000000..5842eaf --- /dev/null +++ b/internal/provider/testmanually/restore_resource_test.go @@ -0,0 +1,77 @@ +package manually + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" + "regexp" + "testing" +) + +// Test restore resource, if you want to test it: +// 1. delete the t.Skip +// 2. make sure a backup is set up already, set project_id and backup_id in testAccRestoreResourceConfig +// 3. test with `TF_ACC=1 go test -v ./internal/provider/testmanually/restore_resource_test.go` +// 4. ignore the delete error, because restore task can not be deleted. Check the console. if the cluster is in creating, the test is regarded as success +// 5. delete the restored cluster manually +func TestAccRestoreResource(t *testing.T) { + t.Skip("skip for restored can't be delete") + reg, _ := regexp.Compile(".*restore can't be updated.*") + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: map[string]func() (tfprotov6.ProviderServer, error){ + "tidbcloud": providerserver.NewProtocol6WithError(provider.New("test")()), + }, + Steps: []resource.TestStep{ + // Create and Read restore + { + Config: testAccRestoreResourceConfig("restore-test"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("tidbcloud_restore.test", "id"), + resource.TestCheckResourceAttr("tidbcloud_restore.test", "error_message", ""), + ), + }, + // Update is not supported + { + Config: testAccRestoreResourceConfig("restore-test2"), + ExpectError: reg, + }, + }, + }) +} + +// full in the project_id and backup_id +func testAccRestoreResourceConfig(name string) string { + return fmt.Sprintf(` +resource "tidbcloud_restore" "test" { + project_id = "1372813089189561287" + backup_id = "1320143" + name = "%s" + config = { + root_password = "Shiyuhang1." + port = 4002 + components = { + tidb = { + node_size : "8C16G" + node_quantity : 1 + } + tikv = { + node_size : "8C32G" + storage_size_gib : 500 + node_quantity : 3 + } + tiflash = { + node_size : "8C64G" + storage_size_gib : 500 + node_quantity : 1 + } + } + ip_access_list = [{ + cidr = "0.0.0.0/0" + description = "all" + } + ] + } +}`, name) +} diff --git a/internal/provider/testwithcluster/backup_data_source_test.go b/internal/provider/testwithcluster/backup_data_source_test.go new file mode 100644 index 0000000..38e9b3e --- /dev/null +++ b/internal/provider/testwithcluster/backup_data_source_test.go @@ -0,0 +1,31 @@ +package testwithcluster + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccBackupDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccBackupDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("data.tidbcloud_backup.test", "total"), + ), + }, + }, + }) +} + +var testAccBackupDataSourceConfig = fmt.Sprintf(` +data "tidbcloud_backup" "test" { + project_id = %s + cluster_id = %s +} +`, projectId, clusterId) diff --git a/internal/provider/testwithcluster/backup_resource_test.go b/internal/provider/testwithcluster/backup_resource_test.go new file mode 100644 index 0000000..f0da7db --- /dev/null +++ b/internal/provider/testwithcluster/backup_resource_test.go @@ -0,0 +1,68 @@ +package testwithcluster + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "regexp" + "testing" + "time" +) + +// make sure a dedicated cluster is set up already +func TestAccBackupResource(t *testing.T) { + reg, _ := regexp.Compile(".*backup can't be updated.*") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and Read backup task + { + Config: testAccBackupResourceConfig("backup-test"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("tidbcloud_backup.test", "id"), + resource.TestCheckResourceAttrSet("tidbcloud_backup.test", "create_timestamp"), + resource.TestCheckResourceAttrSet("tidbcloud_backup.test", "type"), + resource.TestCheckResourceAttrSet("tidbcloud_backup.test", "size"), + resource.TestCheckResourceAttrSet("tidbcloud_backup.test", "status"), + ), + }, + // Test import + { + ResourceName: "tidbcloud_backup.test", + ImportState: true, + ImportStateIdPrefix: fmt.Sprintf("%s,%s,", projectId, clusterId), + }, + // Update is not supported + { + Config: testAccBackupResourceConfig("backup-test2"), + ExpectError: reg, + PreventPostDestroyRefresh: true, + }, + // just sleep 100s to wait backup ready, so that we can delete it + { + Config: testAccBackupResourceConfig("backup-test"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccBackupSleep(), + ), + }, + // Delete testing automatically occurs in TestCase + }, + }) +} + +func testAccBackupResourceConfig(name string) string { + return fmt.Sprintf(` +resource "tidbcloud_backup" "test" { + project_id = %s + cluster_id = %s + name = "%s" +}`, projectId, clusterId, name) +} + +func testAccBackupSleep() resource.TestCheckFunc { + return func(s *terraform.State) error { + time.Sleep(time.Duration(100) * time.Second) + return nil + } +} diff --git a/internal/provider/testwithcluster/basic_test.go b/internal/provider/testwithcluster/basic_test.go new file mode 100644 index 0000000..632c72d --- /dev/null +++ b/internal/provider/testwithcluster/basic_test.go @@ -0,0 +1,38 @@ +package testwithcluster + +import ( + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" + "os" + "testing" +) + +var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "tidbcloud": providerserver.NewProtocol6WithError(provider.New("test")()), +} + +var ( + projectId = os.Getenv("TIDBCLOUD_PROJECTID") + clusterId = os.Getenv("TIDBCLOUD_CLUSTERID") +) + +func testAccPreCheck(t *testing.T) { + var username, password, projectId, clusterId string + username = os.Getenv("TIDBCLOUD_USERNAME") + password = os.Getenv("TIDBCLOUD_PASSWORD") + projectId = os.Getenv("TIDBCLOUD_PROJECTID") + clusterId = os.Getenv("TIDBCLOUD_CLUSTERID") + if username == "" { + t.Fatal("TIDBCLOUD_USERNAME must be set for acceptance tests") + } + if password == "" { + t.Fatal("TIDBCLOUD_PASSWORD must be set for acceptance tests") + } + if projectId == "" { + t.Fatal("TIDBCLOUD_PROJECTID must be set for acceptance tests") + } + if clusterId == "" { + t.Fatal("TIDBCLOUD_CLUSTERID must be set for acceptance tests") + } +} diff --git a/internal/provider/testwithproject/cluster_resource_test.go b/internal/provider/testwithproject/cluster_resource_test.go new file mode 100644 index 0000000..ac71738 --- /dev/null +++ b/internal/provider/testwithproject/cluster_resource_test.go @@ -0,0 +1,114 @@ +package testwithproject + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "regexp" + "testing" +) + +// create dedicated cluster may cause cost, make sure you have enough balance +// update node_quantity is not tested for create dedicated tier needs too much time! +func TestAccClusterResource(t *testing.T) { + reg, _ := regexp.Compile(".*Unable to update DEVELOPER cluster.*") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and Read dev-tier + { + Config: testAccDevClusterResourceConfig("developer-test"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("tidbcloud_cluster.test", "id"), + resource.TestCheckResourceAttrSet("tidbcloud_cluster.test", "config.components.tidb.node_quantity"), + resource.TestCheckResourceAttrSet("tidbcloud_cluster.test", "config.port"), + ), + }, + // Test import + { + ResourceName: "tidbcloud_cluster.test", + ImportState: true, + ImportStateIdPrefix: fmt.Sprintf("%s,", projectId), + }, + // Update is not supported + { + Config: testAccDevClusterResourceConfig("developer-test2"), + ExpectError: reg, + }, + // Delete testing automatically occurs in TestCase + }, + }) + reg2, _ := regexp.Compile(".*only components can be changed now.*") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and Read dedicated tier + { + Config: testAccDedicatedClusterResourceConfig("dedicated-test"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("tidbcloud_cluster.test", "id"), + resource.TestCheckResourceAttrSet("tidbcloud_cluster.test", "config.port"), + ), + }, + // Test import + { + ResourceName: "tidbcloud_cluster.test", + ImportState: true, + ImportStateIdPrefix: fmt.Sprintf("%s,", projectId), + }, + // only node_quantity can be updated now + { + Config: testAccDedicatedClusterResourceConfig("dedicated-test2"), + ExpectError: reg2, + }, + // Delete testing automatically occurs in TestCase + }, + }) +} + +func testAccDevClusterResourceConfig(name string) string { + return fmt.Sprintf(` +resource "tidbcloud_cluster" "test" { + project_id = %s + name = "%s" + cluster_type = "DEVELOPER" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Shiyuhang1." + ip_access_list = [{ + cidr = "0.0.0.0/0" + description = "all" + } + ] + } +} +`, projectId, name) +} + +func testAccDedicatedClusterResourceConfig(name string) string { + return fmt.Sprintf(` +resource "tidbcloud_cluster" "test" { + project_id = %s + name = "%s" + cluster_type = "DEDICATED" + cloud_provider = "AWS" + region = "us-east-1" + config = { + root_password = "Shiyuhang1." + components = { + tidb = { + node_size : "2C8G" + node_quantity : 1 + } + tikv = { + node_size : "2C8G" + storage_size_gib : 500, + node_quantity : 3 + } + } + } +} +`, projectId, name) +} diff --git a/internal/provider/example_data_source_test.go b/internal/provider/testwithproject/cluster_spec_data_source_test.go similarity index 54% rename from internal/provider/example_data_source_test.go rename to internal/provider/testwithproject/cluster_spec_data_source_test.go index 78e4375..061812b 100644 --- a/internal/provider/example_data_source_test.go +++ b/internal/provider/testwithproject/cluster_spec_data_source_test.go @@ -1,4 +1,4 @@ -package provider +package testwithproject import ( "testing" @@ -6,24 +6,23 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccExampleDataSource(t *testing.T) { +func TestAccClusterSpecDataSource(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, Steps: []resource.TestStep{ // Read testing { - Config: testAccExampleDataSourceConfig, + Config: testAccClusterSpecDataSourceConfig, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.scaffolding_example.test", "id", "example-id"), + resource.TestCheckResourceAttrSet("data.tidbcloud_cluster_spec.test", "total"), ), }, }, }) } -const testAccExampleDataSourceConfig = ` -data "scaffolding_example" "test" { - configurable_attribute = "example" +const testAccClusterSpecDataSourceConfig = ` +data "tidbcloud_cluster_spec" "test" { } ` diff --git a/internal/provider/testwithproject/project_data_source_test.go b/internal/provider/testwithproject/project_data_source_test.go new file mode 100644 index 0000000..39b27d3 --- /dev/null +++ b/internal/provider/testwithproject/project_data_source_test.go @@ -0,0 +1,28 @@ +package testwithproject + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccProjectDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccProjectDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("data.tidbcloud_project.test", "total"), + ), + }, + }, + }) +} + +const testAccProjectDataSourceConfig = ` +data "tidbcloud_project" "test" { +} +` diff --git a/internal/provider/testwithproject/provider_test.go b/internal/provider/testwithproject/provider_test.go new file mode 100644 index 0000000..5b82385 --- /dev/null +++ b/internal/provider/testwithproject/provider_test.go @@ -0,0 +1,39 @@ +package testwithproject + +import ( + "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// testAccProtoV6ProviderFactories are used to instantiate a provider during +// acceptance testing. The factory function will be invoked for every Terraform +// CLI command executed to create a provider server to which the CLI can +// reattach. +var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "tidbcloud": providerserver.NewProtocol6WithError(provider.New("test")()), +} + +var ( + projectId = os.Getenv("TIDBCLOUD_PROJECTID") + clusterId = os.Getenv("TIDBCLOUD_CLUSTERID") +) + +func testAccPreCheck(t *testing.T) { + var username, password, projectId string + username = os.Getenv("TIDBCLOUD_USERNAME") + password = os.Getenv("TIDBCLOUD_PASSWORD") + projectId = os.Getenv("TIDBCLOUD_PROJECTID") + if username == "" { + t.Fatal("TIDBCLOUD_USERNAME must be set for acceptance tests") + } + if password == "" { + t.Fatal("TIDBCLOUD_PASSWORD must be set for acceptance tests") + } + if projectId == "" { + t.Fatal("TIDBCLOUD_PROJECTID must be set for acceptance tests") + } +} diff --git a/internal/provider/testwithproject/restore_data_source_test.go b/internal/provider/testwithproject/restore_data_source_test.go new file mode 100644 index 0000000..d1c89b3 --- /dev/null +++ b/internal/provider/testwithproject/restore_data_source_test.go @@ -0,0 +1,30 @@ +package testwithproject + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccRestoreDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccRestoreDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("data.tidbcloud_restore.test", "total"), + ), + }, + }, + }) +} + +var testAccRestoreDataSourceConfig = fmt.Sprintf(` +data "tidbcloud_restore" "test" { + project_id = %s +} +`, projectId) diff --git a/main.go b/main.go index bf7caca..8a6a293 100644 --- a/main.go +++ b/main.go @@ -3,10 +3,9 @@ package main import ( "context" "flag" - "log" - "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-provider-scaffolding-framework/internal/provider" + "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" + "log" ) // Run "go generate" to format example terraform files and generate the docs for the registry/website @@ -22,7 +21,7 @@ import ( var ( // these will be set by the goreleaser configuration // to appropriate values for the compiled binary - version string = "dev" + version string = "0.0.1" // goreleaser can also pass the specific commit if you want // commit string = "" @@ -36,7 +35,7 @@ func main() { opts := providerserver.ServeOpts{ // TODO: Update this string with the published name of your provider. - Address: "registry.terraform.io/hashicorp/scaffolding", + Address: "registry.terraform.io/tidbcloud/terraform-provider-tidbcloud", Debug: debug, } @@ -45,4 +44,5 @@ func main() { if err != nil { log.Fatal(err.Error()) } + } diff --git a/tidbcloud/api.go b/tidbcloud/api.go new file mode 100644 index 0000000..e2a9a43 --- /dev/null +++ b/tidbcloud/api.go @@ -0,0 +1,196 @@ +package tidbcloud + +import ( + "fmt" +) + +type TiDBCloudClient struct { +} + +func NewTiDBCloudClient(publicKey, privateKey string) (*TiDBCloudClient, error) { + initClient(publicKey, privateKey) + c := TiDBCloudClient{} + return &c, nil +} + +// GetSpecifications returns all the available specifications +func (c *TiDBCloudClient) GetSpecifications() (*GetSpecificationsResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/clusters/provider/regions", host) + result GetSpecificationsResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// GetAllProjects returns all the projects +func (c *TiDBCloudClient) GetAllProjects(page, pageSize int64) (*GetAllProjectsResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects?page=%d&page_size=%d", host, page, pageSize) + result GetAllProjectsResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// CreateCluster create a cluster in the given project +func (c *TiDBCloudClient) CreateCluster(projectID string, clusterReq *CreateClusterReq) (*CreateClusterResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/clusters", host, projectID) + result CreateClusterResp + ) + + _, err := doPOST(url, clusterReq, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// GetClusterById return detail status of given cluster +func (c *TiDBCloudClient) GetClusterById(projectID string, clusterID string) (*GetClusterResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s", host, projectID, clusterID) + result GetClusterResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// DeleteClusterById delete a cluster by the given ID +func (c *TiDBCloudClient) DeleteClusterById(projectID, clusterID string) error { + url := fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s", host, projectID, clusterID) + _, err := doDELETE(url, nil, nil) + if err != nil { + return err + } + + return nil +} + +// UpdateClusterById can only scale out and set pause now +func (c *TiDBCloudClient) UpdateClusterById(projectID, clusterID string, updateClusterReq UpdateClusterReq) error { + url := fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s", host, projectID, clusterID) + _, err := doPATCH(url, updateClusterReq, nil) + if err != nil { + return err + } + return nil +} + +// CreateBackup can create a backup for the cluster +func (c *TiDBCloudClient) CreateBackup(projectID, clusterID string, req CreateBackupReq) (*CreateBackupResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s/backups", host, projectID, clusterID) + result CreateBackupResp + ) + + _, err := doPOST(url, req, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// GetBackupById show the detail of the bakcup +func (c *TiDBCloudClient) GetBackupById(projectID, clusterID, backupID string) (*GetBackupResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s/backups/%s", host, projectID, clusterID, backupID) + result GetBackupResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// DeleteBackupById delete a backup +func (c *TiDBCloudClient) DeleteBackupById(projectID, clusterID, backupID string) error { + url := fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s/backups/%s", host, projectID, clusterID, backupID) + _, err := doDELETE(url, nil, nil) + if err != nil { + return err + } + + return nil +} + +// GetBackups get all the backups +func (c *TiDBCloudClient) GetBackups(projectID, clusterID string, page, pageSize int64) (*GetBackupsResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/clusters/%s/backups?page=%d&page_size=%d", host, projectID, clusterID, page, pageSize) + result GetBackupsResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// CreateRestoreTask create a restore task from a backup +func (c *TiDBCloudClient) CreateRestoreTask(projectID string, req CreateRestoreTaskReq) (*CreateRestoreTaskResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/restores", host, projectID) + result CreateRestoreTaskResp + ) + + _, err := doPOST(url, req, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// GetRestoreTask show the details of the restore task +func (c *TiDBCloudClient) GetRestoreTask(projectID, restoreId string) (*GetRestoreTaskResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/restores/%s", host, projectID, restoreId) + result GetRestoreTaskResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} + +// GetRestoreTasks get All the restore tasks +func (c *TiDBCloudClient) GetRestoreTasks(projectID string, page, pageSize int64) (*GetRestoreTasksResp, error) { + var ( + url = fmt.Sprintf("%s/api/v1beta/projects/%s/restores?page=%d&page_size=%d", host, projectID, page, pageSize) + result GetRestoreTasksResp + ) + + _, err := doGET(url, nil, &result) + if err != nil { + return nil, err + } + + return &result, nil +} diff --git a/tidbcloud/client.go b/tidbcloud/client.go new file mode 100644 index 0000000..2f75ab5 --- /dev/null +++ b/tidbcloud/client.go @@ -0,0 +1,81 @@ +package tidbcloud + +import ( + "encoding/json" + "fmt" + "github.com/go-resty/resty/v2" + "github.com/icholy/digest" + "log" + "net/http" + "os" + "sync" +) + +var ( + clientInitOnce sync.Once + + restClient *resty.Client +) + +var host = "https://api.tidbcloud.com" + +func initClient(publicKey, privateKey string) { + clientInitOnce.Do(func() { + restClient = resty.New() + restClient.SetTransport(&digest.Transport{ + Username: publicKey, + Password: privateKey, + }) + }) + // only for test + if os.Getenv("TIDBCLOUD_HOST") != "" { + host = os.Getenv("TIDBCLOUD_HOST") + } +} + +// doRequest wraps resty request, it's a generic method to spawn a HTTP request +func doRequest(method, url string, payload, output interface{}) (*resty.Response, error) { + request := restClient.R() + + // if payload is not nil, we'll put it on body + if payload != nil { + request.SetBody(payload) + } + + // execute the request + resp, err := request.Execute(method, url) + b, _ := json.Marshal(payload) + log.Printf("\npayload: %s\n", b) + log.Printf("\nRequest: method %s, url %s, response %s\n\n", method, url, resp) + if err != nil { + return nil, err + } + + // if the request return a non-200 response, wrap it with error + if resp.StatusCode() != http.StatusOK { + return resp, fmt.Errorf("failed with status %d and resp %s", resp.StatusCode(), resp) + } + + // if we need to unmarshal the response into a struct, we pass it here, otherwise pass nil in the argument + if output != nil { + return resp, json.Unmarshal(resp.Body(), output) + } + + return resp, nil +} + +func doGET(url string, payload, output interface{}) (*resty.Response, error) { + return doRequest(resty.MethodGet, url, payload, output) +} + +func doPOST(url string, payload, output interface{}) (*resty.Response, error) { + return doRequest(resty.MethodPost, url, payload, output) +} + +func doDELETE(url string, payload, output interface{}) (*resty.Response, error) { + return doRequest(resty.MethodDelete, url, payload, output) +} + +func doPATCH(url string, payload, output interface{}) (*resty.Response, error) { + return doRequest(resty.MethodPatch, url, payload, output) +} diff --git a/tidbcloud/type.go b/tidbcloud/type.go new file mode 100644 index 0000000..127e2e6 --- /dev/null +++ b/tidbcloud/type.go @@ -0,0 +1,192 @@ +package tidbcloud + +type Project struct { + Id string `json:"id"` + OrgId string `json:"org_id"` + Name string `json:"name"` + ClusterCount int64 `json:"cluster_count"` + UserCount int64 `json:"user_count"` + CreateTimestamp string `json:"create_timestamp"` +} + +type ConnectionString struct { + Standard string `json:"standard"` + VpcPeering string `json:"vpc_peering"` +} + +type IPAccess struct { + CIDR string `json:"cidr"` + Description string `json:"description"` +} + +type ComponentTiDB struct { + NodeSize string `json:"node_size"` + NodeQuantity int `json:"node_quantity,omitempty"` +} + +type ComponentTiKV struct { + NodeSize string `json:"node_size"` + StorageSizeGib int `json:"storage_size_gib"` + NodeQuantity int `json:"node_quantity,omitempty"` +} + +type ComponentTiFlash struct { + NodeSize string `json:"node_size"` + StorageSizeGib int `json:"storage_size_gib"` + NodeQuantity int `json:"node_quantity,omitempty"` +} + +type Components struct { + TiDB ComponentTiDB `json:"tidb"` + TiKV ComponentTiKV `json:"tikv"` + TiFlash *ComponentTiFlash `json:"tiflash,omitempty"` +} + +type ClusterConfig struct { + RootPassword string `json:"root_password"` + Port int `json:"port"` + Components Components `json:"components"` + IPAccessList []IPAccess `json:"ip_access_list"` +} + +type ClusterStatus struct { + TidbVersion string `json:"tidb_version"` + ClusterStatus string `json:"cluster_status"` +} + +type CreateClusterReq struct { + Name string `json:"name"` + ClusterType string `json:"cluster_type"` + CloudProvider string `json:"cloud_provider"` + Region string `json:"region"` + Config ClusterConfig `json:"config"` +} + +type CreateClusterResp struct { + ClusterId uint64 `json:"id,string"` + Message string `json:"message"` +} + +type UpdateClusterReq struct { + Config UpdateClusterConfig `json:"config"` +} + +type UpdateClusterConfig struct { + Paused *bool `json:"paused,omitempty"` + Components *Components `json:"components,omitempty"` +} + +type GetAllProjectsResp struct { + Items []Project `json:"items"` + Total int64 `json:"total"` +} + +type GetClusterResp struct { + Id uint64 `json:"id,string"` + ProjectId uint64 `json:"project_id,string"` + Name string `json:"name"` + Port int32 `json:"port"` + TiDBVersion string `json:"tidb_version"` + ClusterType string `json:"cluster_type"` + CloudProvider string `json:"cloud_provider"` + Region string `json:"region"` + Status ClusterStatus `json:"status"` + CreateTimestamp string `json:"create_timestamp"` + Config ClusterConfig `json:"config"` + ConnectionStrings ConnectionString `json:"connection_strings"` +} + +type Specification struct { + ClusterType string `json:"cluster_type"` + CloudProvider string `json:"cloud_provider"` + Region string `json:"region"` + Tidb []struct { + NodeSize string `json:"node_size"` + NodeQuantityRange struct { + Min int `json:"min"` + Step int `json:"step"` + } `json:"node_quantity_range"` + } `json:"tidb"` + Tikv []struct { + NodeSize string `json:"node_size"` + NodeQuantityRange struct { + Min int `json:"min"` + Step int `json:"step"` + } `json:"node_quantity_range"` + StorageSizeGibRange struct { + Min int `json:"min"` + Max int `json:"max"` + } `json:"storage_size_gib_range"` + } `json:"tikv"` + Tiflash []struct { + NodeSize string `json:"node_size"` + NodeQuantityRange struct { + Min int `json:"min"` + Step int `json:"step"` + } `json:"node_quantity_range"` + StorageSizeGibRange struct { + Min int `json:"min"` + Max int `json:"max"` + } `json:"storage_size_gib_range"` + } `json:"tiflash"` +} + +type GetSpecificationsResp struct { + Items []Specification `json:"items"` +} + +type CreateBackupResp struct { + BackupId string `json:"id"` +} + +type CreateBackupReq struct { + Name string `json:"name"` + Description string `json:"description"` +} + +type GetBackupResp struct { + Id string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Size string `json:"size"` + Status string `json:"status"` + CreateTimestamp string `json:"create_timestamp"` +} + +type GetBackupsResp struct { + Items []GetBackupResp `json:"items"` + Total int64 `json:"total"` +} + +type CreateRestoreTaskReq struct { + BackupId string `json:"backup_id"` + Name string `json:"name"` + Config ClusterConfig `json:"config"` +} + +type CreateRestoreTaskResp struct { + Id string `json:"id"` + ClusterId string `json:"cluster_id"` +} + +type GetRestoreTaskResp struct { + Id string `json:"id"` + CreateTimestamp string `json:"create_timestamp"` + BackupId string `json:"backup_id"` + ClusterId string `json:"cluster_id"` + Status string `json:"status"` + Cluster ClusterInfo `json:"cluster"` + ErrorMessage string `json:"error_message"` +} + +type ClusterInfo struct { + Id string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` +} + +type GetRestoreTasksResp struct { + Items []GetRestoreTaskResp `json:"items"` + Total int64 `json:"total"` +}