diff --git a/.githooks/pre-commit b/.githooks/pre-commit
new file mode 100755
index 00000000..f92e8bf2
--- /dev/null
+++ b/.githooks/pre-commit
@@ -0,0 +1,8 @@
+echo "Running pre-commit hook"
+make fmt
+make build
+go generate ./...
+# Add examples and docs files to git after documentation is generated
+git add examples/
+git add docs/
+echo "Pre-commit hook finished"
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 86523808..512855d1 100644
--- a/Makefile
+++ b/Makefile
@@ -37,6 +37,7 @@ validate-fmt:
.PHONY: dep
dep:
+ git config core.hooksPath .githooks
go mod download
go install golang.org/x/tools/cmd/goimports
go mod tidy
diff --git a/README.md b/README.md
index d5104ad1..389a6d80 100644
--- a/README.md
+++ b/README.md
@@ -63,7 +63,9 @@ If you wish to work on the provider, you'll first need [Go](http://www.golang.or
To compile the provider, see [Building The Provider](## Building The Provider).
-To run terraform with the provider, create a `.terraformrc` file in your home directory with the following content to override the provider installation with the local build:
+To add example docs, add the correspond `.tf` files to the `examples` directory.
+
+To run terraform with the provider, create a `.terraformrc` file in your home directory (`~`) with the following content to override the provider installation with the local build:
```hcl
provider_installation {
diff --git a/docs/data-sources/deployment.md b/docs/data-sources/deployment.md
new file mode 100644
index 00000000..f9f8ed29
--- /dev/null
+++ b/docs/data-sources/deployment.md
@@ -0,0 +1,189 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "astronomer_deployment Data Source - astronomer"
+subcategory: ""
+description: |-
+ Deployment data source
+---
+
+# astronomer_deployment (Data Source)
+
+Deployment data source
+
+## Example Usage
+
+```terraform
+data "astronomer_deployment" "example" {
+ id = "clozc036j01to01jrlgvueo8t"
+}
+```
+
+
+## Schema
+
+### Required
+
+- `id` (String) Deployment identifier
+
+### Read-Only
+
+- `airflow_version` (String) Deployment Airflow version
+- `astro_runtime_version` (String) Deployment Astro Runtime version
+- `cloud_provider` (String) Deployment cloud provider
+- `cluster_id` (String) Deployment cluster identifier
+- `contact_emails` (List of String) Deployment contact emails
+- `created_at` (String) Deployment creation timestamp
+- `created_by` (Attributes) Deployment creator (see [below for nested schema](#nestedatt--created_by))
+- `dag_tarball_version` (String) Deployment DAG tarball version
+- `default_task_pod_cpu` (String) Deployment default task pod CPU
+- `default_task_pod_memory` (String) Deployment default task pod memory
+- `description` (String) Deployment description
+- `desired_dag_tarball_version` (String) Deployment desired DAG tarball version
+- `environment_variables` (Attributes List) Deployment environment variables (see [below for nested schema](#nestedatt--environment_variables))
+- `executor` (String) Deployment executor
+- `external_ips` (List of String) Deployment external IPs
+- `image_repository` (String) Deployment image repository
+- `image_tag` (String) Deployment image tag
+- `image_version` (String) Deployment image version
+- `is_cicd_enforced` (Boolean) Whether the Deployment enforces CI/CD deploys
+- `is_dag_deploy_enabled` (Boolean) Whether DAG deploy is enabled
+- `is_development_mode` (Boolean) Whether Deployment is in development mode
+- `is_high_availability` (Boolean) Whether Deployment has high availability
+- `name` (String) Deployment name
+- `namespace` (String) Deployment namespace
+- `oidc_issuer_url` (String) Deployment OIDC issuer URL
+- `region` (String) Deployment region
+- `resource_quota_cpu` (String) Deployment resource quota CPU
+- `resource_quota_memory` (String) Deployment resource quota memory
+- `scaling_spec` (Attributes) Deployment scaling spec (see [below for nested schema](#nestedatt--scaling_spec))
+- `scaling_status` (Attributes) Deployment scaling status (see [below for nested schema](#nestedatt--scaling_status))
+- `scheduler_au` (Number) Deployment scheduler AU
+- `scheduler_cpu` (String) Deployment scheduler CPU
+- `scheduler_memory` (String) Deployment scheduler memory
+- `scheduler_replicas` (Number) Deployment scheduler replicas
+- `scheduler_size` (String) Deployment scheduler size
+- `status` (String) Deployment status
+- `status_reason` (String) Deployment status reason
+- `task_pod_node_pool_id` (String) Deployment task pod node pool identifier
+- `type` (String) Deployment type
+- `updated_at` (String) Deployment last updated timestamp
+- `updated_by` (Attributes) Deployment updater (see [below for nested schema](#nestedatt--updated_by))
+- `webserver_airflow_api_url` (String) Deployment webserver Airflow API URL
+- `webserver_cpu` (String) Deployment webserver CPU
+- `webserver_ingress_hostname` (String) Deployment webserver ingress hostname
+- `webserver_memory` (String) Deployment webserver memory
+- `webserver_replicas` (Number) Deployment webserver replicas
+- `webserver_url` (String) Deployment webserver URL
+- `worker_queues` (Attributes List) Deployment worker queues (see [below for nested schema](#nestedatt--worker_queues))
+- `workload_identity` (String) Deployment workload identity
+- `workspace_id` (String) Deployment workspace identifier
+
+
+### Nested Schema for `created_by`
+
+Read-Only:
+
+- `api_token_name` (String)
+- `avatar_url` (String)
+- `full_name` (String)
+- `id` (String)
+- `subject_type` (String)
+- `username` (String)
+
+
+
+### Nested Schema for `environment_variables`
+
+Read-Only:
+
+- `is_secret` (Boolean) Whether Environment variable is a secret
+- `key` (String) Environment variable key
+- `updated_at` (String) Environment variable last updated timestamp
+- `value` (String) Environment variable value
+
+
+
+### Nested Schema for `scaling_spec`
+
+Read-Only:
+
+- `hibernation_spec` (Attributes) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec))
+
+
+### Nested Schema for `scaling_spec.hibernation_spec`
+
+Read-Only:
+
+- `override` (Attributes) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--override))
+- `schedules` (Attributes List) (see [below for nested schema](#nestedatt--scaling_spec--hibernation_spec--schedules))
+
+
+### Nested Schema for `scaling_spec.hibernation_spec.override`
+
+Read-Only:
+
+- `is_active` (Boolean) Whether the override is active
+- `is_hibernating` (Boolean) Whether the override is hibernating
+- `override_until` (String) Time until the override is active
+
+
+
+### Nested Schema for `scaling_spec.hibernation_spec.schedules`
+
+Read-Only:
+
+- `description` (String) Description of the schedule
+- `hibernate_at_cron` (String) Cron expression for hibernation
+- `is_enabled` (Boolean) Whether the schedule is enabled
+- `wake_at_cron` (String) Cron expression for waking
+
+
+
+
+
+### Nested Schema for `scaling_status`
+
+Read-Only:
+
+- `hibernation_status` (Attributes) (see [below for nested schema](#nestedatt--scaling_status--hibernation_status))
+
+
+### Nested Schema for `scaling_status.hibernation_status`
+
+Read-Only:
+
+- `is_hibernating` (Boolean) Whether the deployment is hibernating
+- `next_event_at` (String) Time of the next event
+- `next_event_type` (String) Type of the next event
+- `reason` (String) Reason for the current state
+
+
+
+
+### Nested Schema for `updated_by`
+
+Read-Only:
+
+- `api_token_name` (String)
+- `avatar_url` (String)
+- `full_name` (String)
+- `id` (String)
+- `subject_type` (String)
+- `username` (String)
+
+
+
+### Nested Schema for `worker_queues`
+
+Read-Only:
+
+- `astro_machine` (String) Worker queue Astro machine value
+- `id` (String) Worker queue identifier
+- `is_default` (Boolean) Whether Worker queue is default
+- `max_worker_count` (Number) Worker queue max worker count
+- `min_worker_count` (Number) Worker queue min worker count
+- `name` (String) Worker queue name
+- `node_pool_id` (String) Worker queue node pool identifier
+- `pod_cpu` (String) Worker queue pod CPU
+- `pod_memory` (String) Worker queue pod memory
+- `worker_concurrency` (Number) Worker queue worker concurrency
diff --git a/docs/data-sources/deployments.md b/docs/data-sources/deployments.md
new file mode 100644
index 00000000..ae618a7e
--- /dev/null
+++ b/docs/data-sources/deployments.md
@@ -0,0 +1,212 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "astronomer_deployments Data Source - astronomer"
+subcategory: ""
+description: |-
+ Deployments data source
+---
+
+# astronomer_deployments (Data Source)
+
+Deployments data source
+
+## Example Usage
+
+```terraform
+data "astronomer_deployments" "example_deployments" {}
+
+data "astronomer_deployments" "example_deployments_filter_by_deployment_ids" {
+ deployment_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_deployments" "example_deployments_filter_by_workspace_ids" {
+ workspace_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_deployments" "example_deployments_filter_by_names" {
+ names = ["my first deployment", "my second deployment"]
+}
+```
+
+
+## Schema
+
+### Optional
+
+- `deployment_ids` (List of String)
+- `names` (List of String)
+- `workspace_ids` (List of String)
+
+### Read-Only
+
+- `deployments` (Attributes List) (see [below for nested schema](#nestedatt--deployments))
+
+
+### Nested Schema for `deployments`
+
+Required:
+
+- `id` (String) Deployment identifier
+
+Read-Only:
+
+- `airflow_version` (String) Deployment Airflow version
+- `astro_runtime_version` (String) Deployment Astro Runtime version
+- `cloud_provider` (String) Deployment cloud provider
+- `cluster_id` (String) Deployment cluster identifier
+- `contact_emails` (List of String) Deployment contact emails
+- `created_at` (String) Deployment creation timestamp
+- `created_by` (Attributes) Deployment creator (see [below for nested schema](#nestedatt--deployments--created_by))
+- `dag_tarball_version` (String) Deployment DAG tarball version
+- `default_task_pod_cpu` (String) Deployment default task pod CPU
+- `default_task_pod_memory` (String) Deployment default task pod memory
+- `description` (String) Deployment description
+- `desired_dag_tarball_version` (String) Deployment desired DAG tarball version
+- `environment_variables` (Attributes List) Deployment environment variables (see [below for nested schema](#nestedatt--deployments--environment_variables))
+- `executor` (String) Deployment executor
+- `external_ips` (List of String) Deployment external IPs
+- `image_repository` (String) Deployment image repository
+- `image_tag` (String) Deployment image tag
+- `image_version` (String) Deployment image version
+- `is_cicd_enforced` (Boolean) Whether the Deployment enforces CI/CD deploys
+- `is_dag_deploy_enabled` (Boolean) Whether DAG deploy is enabled
+- `is_development_mode` (Boolean) Whether Deployment is in development mode
+- `is_high_availability` (Boolean) Whether Deployment has high availability
+- `name` (String) Deployment name
+- `namespace` (String) Deployment namespace
+- `oidc_issuer_url` (String) Deployment OIDC issuer URL
+- `region` (String) Deployment region
+- `resource_quota_cpu` (String) Deployment resource quota CPU
+- `resource_quota_memory` (String) Deployment resource quota memory
+- `scaling_spec` (Attributes) Deployment scaling spec (see [below for nested schema](#nestedatt--deployments--scaling_spec))
+- `scaling_status` (Attributes) Deployment scaling status (see [below for nested schema](#nestedatt--deployments--scaling_status))
+- `scheduler_au` (Number) Deployment scheduler AU
+- `scheduler_cpu` (String) Deployment scheduler CPU
+- `scheduler_memory` (String) Deployment scheduler memory
+- `scheduler_replicas` (Number) Deployment scheduler replicas
+- `scheduler_size` (String) Deployment scheduler size
+- `status` (String) Deployment status
+- `status_reason` (String) Deployment status reason
+- `task_pod_node_pool_id` (String) Deployment task pod node pool identifier
+- `type` (String) Deployment type
+- `updated_at` (String) Deployment last updated timestamp
+- `updated_by` (Attributes) Deployment updater (see [below for nested schema](#nestedatt--deployments--updated_by))
+- `webserver_airflow_api_url` (String) Deployment webserver Airflow API URL
+- `webserver_cpu` (String) Deployment webserver CPU
+- `webserver_ingress_hostname` (String) Deployment webserver ingress hostname
+- `webserver_memory` (String) Deployment webserver memory
+- `webserver_replicas` (Number) Deployment webserver replicas
+- `webserver_url` (String) Deployment webserver URL
+- `worker_queues` (Attributes List) Deployment worker queues (see [below for nested schema](#nestedatt--deployments--worker_queues))
+- `workload_identity` (String) Deployment workload identity
+- `workspace_id` (String) Deployment workspace identifier
+
+
+### Nested Schema for `deployments.created_by`
+
+Read-Only:
+
+- `api_token_name` (String)
+- `avatar_url` (String)
+- `full_name` (String)
+- `id` (String)
+- `subject_type` (String)
+- `username` (String)
+
+
+
+### Nested Schema for `deployments.environment_variables`
+
+Read-Only:
+
+- `is_secret` (Boolean) Whether Environment variable is a secret
+- `key` (String) Environment variable key
+- `updated_at` (String) Environment variable last updated timestamp
+- `value` (String) Environment variable value
+
+
+
+### Nested Schema for `deployments.scaling_spec`
+
+Read-Only:
+
+- `hibernation_spec` (Attributes) (see [below for nested schema](#nestedatt--deployments--scaling_spec--hibernation_spec))
+
+
+### Nested Schema for `deployments.scaling_spec.hibernation_spec`
+
+Read-Only:
+
+- `override` (Attributes) (see [below for nested schema](#nestedatt--deployments--scaling_spec--hibernation_spec--override))
+- `schedules` (Attributes List) (see [below for nested schema](#nestedatt--deployments--scaling_spec--hibernation_spec--schedules))
+
+
+### Nested Schema for `deployments.scaling_spec.hibernation_spec.schedules`
+
+Read-Only:
+
+- `is_active` (Boolean) Whether the override is active
+- `is_hibernating` (Boolean) Whether the override is hibernating
+- `override_until` (String) Time until the override is active
+
+
+
+### Nested Schema for `deployments.scaling_spec.hibernation_spec.schedules`
+
+Read-Only:
+
+- `description` (String) Description of the schedule
+- `hibernate_at_cron` (String) Cron expression for hibernation
+- `is_enabled` (Boolean) Whether the schedule is enabled
+- `wake_at_cron` (String) Cron expression for waking
+
+
+
+
+
+### Nested Schema for `deployments.scaling_status`
+
+Read-Only:
+
+- `hibernation_status` (Attributes) (see [below for nested schema](#nestedatt--deployments--scaling_status--hibernation_status))
+
+
+### Nested Schema for `deployments.scaling_status.hibernation_status`
+
+Read-Only:
+
+- `is_hibernating` (Boolean) Whether the deployment is hibernating
+- `next_event_at` (String) Time of the next event
+- `next_event_type` (String) Type of the next event
+- `reason` (String) Reason for the current state
+
+
+
+
+### Nested Schema for `deployments.updated_by`
+
+Read-Only:
+
+- `api_token_name` (String)
+- `avatar_url` (String)
+- `full_name` (String)
+- `id` (String)
+- `subject_type` (String)
+- `username` (String)
+
+
+
+### Nested Schema for `deployments.worker_queues`
+
+Read-Only:
+
+- `astro_machine` (String) Worker queue Astro machine value
+- `id` (String) Worker queue identifier
+- `is_default` (Boolean) Whether Worker queue is default
+- `max_worker_count` (Number) Worker queue max worker count
+- `min_worker_count` (Number) Worker queue min worker count
+- `name` (String) Worker queue name
+- `node_pool_id` (String) Worker queue node pool identifier
+- `pod_cpu` (String) Worker queue pod CPU
+- `pod_memory` (String) Worker queue pod memory
+- `worker_concurrency` (Number) Worker queue worker concurrency
diff --git a/docs/data-sources/workspace.md b/docs/data-sources/workspace.md
index d85e8edd..25256922 100644
--- a/docs/data-sources/workspace.md
+++ b/docs/data-sources/workspace.md
@@ -32,7 +32,6 @@ data "astronomer_workspace" "example" {
- `created_by` (Attributes) Workspace creator (see [below for nested schema](#nestedatt--created_by))
- `description` (String) Workspace description
- `name` (String) Workspace name
-- `organization_name` (String) Workspace organization name
- `updated_at` (String) Workspace last updated timestamp
- `updated_by` (Attributes) Workspace updater (see [below for nested schema](#nestedatt--updated_by))
diff --git a/docs/data-sources/workspaces.md b/docs/data-sources/workspaces.md
index 72aa08dd..baccb85d 100644
--- a/docs/data-sources/workspaces.md
+++ b/docs/data-sources/workspaces.md
@@ -10,7 +10,19 @@ description: |-
Workspaces data source
+## Example Usage
+```terraform
+data "astronomer_workspaces" "example_workspaces" {}
+
+data "astronomer_workspaces" "example_workspaces_filter_by_workspace_ids" {
+ workspace_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_workspaces" "example_workspaces_filter_by_names" {
+ names = ["my first workspace", "my second workspace"]
+}
+```
## Schema
@@ -38,7 +50,6 @@ Read-Only:
- `created_by` (Attributes) Workspace creator (see [below for nested schema](#nestedatt--workspaces--created_by))
- `description` (String) Workspace description
- `name` (String) Workspace name
-- `organization_name` (String) Workspace organization name
- `updated_at` (String) Workspace last updated timestamp
- `updated_by` (Attributes) Workspace updater (see [below for nested schema](#nestedatt--workspaces--updated_by))
diff --git a/docs/resources/workspace.md b/docs/resources/workspace.md
index eee67de9..8eb2dc62 100644
--- a/docs/resources/workspace.md
+++ b/docs/resources/workspace.md
@@ -34,7 +34,6 @@ resource "workspace_resource" "example" {
- `created_at` (String) Workspace creation timestamp
- `created_by` (Attributes) Workspace creator (see [below for nested schema](#nestedatt--created_by))
- `id` (String) Workspace identifier
-- `organization_name` (String) Workspace organization name
- `updated_at` (String) Workspace last updated timestamp
- `updated_by` (Attributes) Workspace updater (see [below for nested schema](#nestedatt--updated_by))
diff --git a/examples/data-sources/astronomer_deployment/data-source.tf b/examples/data-sources/astronomer_deployment/data-source.tf
new file mode 100644
index 00000000..fa9333ea
--- /dev/null
+++ b/examples/data-sources/astronomer_deployment/data-source.tf
@@ -0,0 +1,3 @@
+data "astronomer_deployment" "example" {
+ id = "clozc036j01to01jrlgvueo8t"
+}
diff --git a/examples/data-sources/astronomer_deployments/data-source.tf b/examples/data-sources/astronomer_deployments/data-source.tf
new file mode 100644
index 00000000..cba8eb08
--- /dev/null
+++ b/examples/data-sources/astronomer_deployments/data-source.tf
@@ -0,0 +1,13 @@
+data "astronomer_deployments" "example_deployments" {}
+
+data "astronomer_deployments" "example_deployments_filter_by_deployment_ids" {
+ deployment_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_deployments" "example_deployments_filter_by_workspace_ids" {
+ workspace_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_deployments" "example_deployments_filter_by_names" {
+ names = ["my first deployment", "my second deployment"]
+}
\ No newline at end of file
diff --git a/examples/data-sources/astronomer_workspaces/data-source.tf b/examples/data-sources/astronomer_workspaces/data-source.tf
new file mode 100644
index 00000000..e56ad1af
--- /dev/null
+++ b/examples/data-sources/astronomer_workspaces/data-source.tf
@@ -0,0 +1,9 @@
+data "astronomer_workspaces" "example_workspaces" {}
+
+data "astronomer_workspaces" "example_workspaces_filter_by_workspace_ids" {
+ workspace_ids = ["clozc036j01to01jrlgvueo8t", "clozc036j01to01jrlgvueo81"]
+}
+
+data "astronomer_workspaces" "example_workspaces_filter_by_names" {
+ names = ["my first workspace", "my second workspace"]
+}
diff --git a/internal/provider/datasources/data_source_deployment.go b/internal/provider/datasources/data_source_deployment.go
new file mode 100644
index 00000000..2f2d8ed9
--- /dev/null
+++ b/internal/provider/datasources/data_source_deployment.go
@@ -0,0 +1,118 @@
+package datasources
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/models"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ datasource.DataSource = &deploymentDataSource{}
+var _ datasource.DataSourceWithConfigure = &deploymentDataSource{}
+
+func NewDeploymentDataSource() datasource.DataSource {
+ return &deploymentDataSource{}
+}
+
+// deploymentDataSource defines the data source implementation.
+type deploymentDataSource struct {
+ PlatformClient platform.ClientWithResponsesInterface
+ OrganizationId string
+}
+
+func (d *deploymentDataSource) Metadata(
+ ctx context.Context,
+ req datasource.MetadataRequest,
+ resp *datasource.MetadataResponse,
+) {
+ resp.TypeName = req.ProviderTypeName + "_deployment"
+}
+
+func (d *deploymentDataSource) Schema(
+ ctx context.Context,
+ req datasource.SchemaRequest,
+ resp *datasource.SchemaResponse,
+) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Deployment data source",
+ Attributes: schemas.DeploymentDataSourceSchemaAttributes(),
+ }
+}
+
+func (d *deploymentDataSource) Configure(
+ ctx context.Context,
+ req datasource.ConfigureRequest,
+ resp *datasource.ConfigureResponse,
+) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClients, ok := req.ProviderData.(models.ApiClientsModel)
+ if !ok {
+ utils.DataSourceApiClientConfigureError(ctx, req, resp)
+ return
+ }
+
+ d.PlatformClient = apiClients.PlatformClient
+ d.OrganizationId = apiClients.OrganizationId
+}
+
+func (d *deploymentDataSource) Read(
+ ctx context.Context,
+ req datasource.ReadRequest,
+ resp *datasource.ReadResponse,
+) {
+ var data models.DeploymentDataSource
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ deployment, err := d.PlatformClient.GetDeploymentWithResponse(
+ ctx,
+ d.OrganizationId,
+ data.Id.ValueString(),
+ )
+ if err != nil {
+ tflog.Error(ctx, "failed to get deployment", map[string]interface{}{"error": err})
+ resp.Diagnostics.AddError(
+ "Client Error",
+ fmt.Sprintf("Unable to read deployment, got error: %s", err),
+ )
+ return
+ }
+ _, diagnostic := clients.NormalizeAPIError(ctx, deployment.HTTPResponse, deployment.Body)
+ if diagnostic != nil {
+ resp.Diagnostics.Append(diagnostic)
+ return
+ }
+ if deployment.JSON200 == nil {
+ tflog.Error(ctx, "failed to get deployment", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read deployment, got nil response")
+ return
+ }
+
+ // Populate the model with the response data
+ diags := data.ReadFromResponse(ctx, deployment.JSON200)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
diff --git a/internal/provider/datasources/data_source_deployments.go b/internal/provider/datasources/data_source_deployments.go
new file mode 100644
index 00000000..f9f413a5
--- /dev/null
+++ b/internal/provider/datasources/data_source_deployments.go
@@ -0,0 +1,140 @@
+package datasources
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/samber/lo"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/models"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces.
+var _ datasource.DataSource = &deploymentsDataSource{}
+var _ datasource.DataSourceWithConfigure = &deploymentsDataSource{}
+
+func NewDeploymentsDataSource() datasource.DataSource {
+ return &deploymentsDataSource{}
+}
+
+// deploymentsDataSource defines the data source implementation.
+type deploymentsDataSource struct {
+ PlatformClient platform.ClientWithResponsesInterface
+ OrganizationId string
+}
+
+func (d *deploymentsDataSource) Metadata(
+ ctx context.Context,
+ req datasource.MetadataRequest,
+ resp *datasource.MetadataResponse,
+) {
+ resp.TypeName = req.ProviderTypeName + "_deployments"
+}
+
+func (d *deploymentsDataSource) Schema(
+ ctx context.Context,
+ req datasource.SchemaRequest,
+ resp *datasource.SchemaResponse,
+) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Deployments data source",
+ Attributes: schemas.DeploymentsDataSourceSchemaAttributes(),
+ }
+}
+
+func (d *deploymentsDataSource) Configure(
+ ctx context.Context,
+ req datasource.ConfigureRequest,
+ resp *datasource.ConfigureResponse,
+) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ apiClients, ok := req.ProviderData.(models.ApiClientsModel)
+ if !ok {
+ utils.DataSourceApiClientConfigureError(ctx, req, resp)
+ return
+ }
+
+ d.PlatformClient = apiClients.PlatformClient
+ d.OrganizationId = apiClients.OrganizationId
+}
+
+func (d *deploymentsDataSource) Read(
+ ctx context.Context,
+ req datasource.ReadRequest,
+ resp *datasource.ReadResponse,
+) {
+ var data models.DeploymentsDataSource
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+
+ params := &platform.ListDeploymentsParams{
+ Limit: lo.ToPtr(1000),
+ }
+ params.DeploymentIds = utils.TypesListToStringSlicePtr(data.DeploymentIds)
+ params.WorkspaceIds = utils.TypesListToStringSlicePtr(data.WorkspaceIds)
+ params.Names = utils.TypesListToStringSlicePtr(data.Names)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ var deployments []platform.Deployment
+ offset := 0
+ for {
+ params.Offset = &offset
+ deploymentsResp, err := d.PlatformClient.ListDeploymentsWithResponse(
+ ctx,
+ d.OrganizationId,
+ params,
+ )
+ if err != nil {
+ tflog.Error(ctx, "failed to list deployments", map[string]interface{}{"error": err})
+ resp.Diagnostics.AddError(
+ "Client Error",
+ fmt.Sprintf("Unable to read deployments, got error: %s", err),
+ )
+ return
+ }
+ _, diagnostic := clients.NormalizeAPIError(ctx, deploymentsResp.HTTPResponse, deploymentsResp.Body)
+ if diagnostic != nil {
+ resp.Diagnostics.Append(diagnostic)
+ return
+ }
+ if deploymentsResp.JSON200 == nil {
+ tflog.Error(ctx, "failed to list deployments", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read deployments, got nil response")
+ return
+ }
+
+ deployments = append(deployments, deploymentsResp.JSON200.Deployments...)
+
+ if deploymentsResp.JSON200.TotalCount <= offset {
+ break
+ }
+
+ offset += 1000
+ }
+
+ // Populate the model with the response data
+ diags := data.ReadFromResponse(ctx, deployments)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+}
diff --git a/internal/provider/datasources/data_source_workspaces.go b/internal/provider/datasources/data_source_workspaces.go
index e6f294ec..f36ad8d0 100644
--- a/internal/provider/datasources/data_source_workspaces.go
+++ b/internal/provider/datasources/data_source_workspaces.go
@@ -3,9 +3,7 @@ package datasources
import (
"context"
"fmt"
- "strings"
- "github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/samber/lo"
"github.com/astronomer/astronomer-terraform-provider/internal/clients"
@@ -85,22 +83,8 @@ func (d *workspacesDataSource) Read(
params := &platform.ListWorkspacesParams{
Limit: lo.ToPtr(1000),
}
- workspaceIds := data.WorkspaceIds.Elements()
- if len(workspaceIds) > 0 {
- workspaceIdsParam := lo.Map(workspaceIds, func(id attr.Value, _ int) string {
- // Terraform includes quotes around the string, so we need to remove them
- return strings.ReplaceAll(id.String(), `"`, "")
- })
- params.WorkspaceIds = &workspaceIdsParam
- }
- names := data.Names.Elements()
- if len(names) > 0 {
- namesParam := lo.Map(names, func(name attr.Value, _ int) string {
- // Terraform includes quotes around the string, so we need to remove them
- return strings.ReplaceAll(name.String(), `"`, "")
- })
- params.Names = &namesParam
- }
+ params.WorkspaceIds = utils.TypesListToStringSlicePtr(data.WorkspaceIds)
+ params.Names = utils.TypesListToStringSlicePtr(data.Names)
if resp.Diagnostics.HasError() {
return
@@ -116,10 +100,10 @@ func (d *workspacesDataSource) Read(
params,
)
if err != nil {
- tflog.Error(ctx, "failed to get workspace", map[string]interface{}{"error": err})
+ tflog.Error(ctx, "failed to list workspaces", map[string]interface{}{"error": err})
resp.Diagnostics.AddError(
"Client Error",
- fmt.Sprintf("Unable to read workspace, got error: %s", err),
+ fmt.Sprintf("Unable to read workspaces, got error: %s", err),
)
return
}
@@ -129,8 +113,8 @@ func (d *workspacesDataSource) Read(
return
}
if workspacesResp.JSON200 == nil {
- tflog.Error(ctx, "failed to get workspace", map[string]interface{}{"error": "nil response"})
- resp.Diagnostics.AddError("Client Error", "Unable to read workspace, got nil response")
+ tflog.Error(ctx, "failed to list workspaces", map[string]interface{}{"error": "nil response"})
+ resp.Diagnostics.AddError("Client Error", "Unable to read workspaces, got nil response")
return
}
diff --git a/internal/provider/models/deployment.go b/internal/provider/models/deployment.go
new file mode 100644
index 00000000..9a11d731
--- /dev/null
+++ b/internal/provider/models/deployment.go
@@ -0,0 +1,302 @@
+package models
+
+import (
+ "context"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/astronomer/astronomer-terraform-provider/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// DeploymentDataSource describes the data source data model.
+type DeploymentDataSource struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ Description types.String `tfsdk:"description"`
+ CreatedAt types.String `tfsdk:"created_at"`
+ UpdatedAt types.String `tfsdk:"updated_at"`
+ CreatedBy types.Object `tfsdk:"created_by"`
+ UpdatedBy types.Object `tfsdk:"updated_by"`
+ WorkspaceId types.String `tfsdk:"workspace_id"`
+ ClusterId types.String `tfsdk:"cluster_id"`
+ Region types.String `tfsdk:"region"`
+ CloudProvider types.String `tfsdk:"cloud_provider"`
+ AstroRuntimeVersion types.String `tfsdk:"astro_runtime_version"`
+ AirflowVersion types.String `tfsdk:"airflow_version"`
+ Namespace types.String `tfsdk:"namespace"`
+ ContactEmails types.List `tfsdk:"contact_emails"`
+ Executor types.String `tfsdk:"executor"`
+ SchedulerAu types.Int64 `tfsdk:"scheduler_au"`
+ SchedulerCpu types.String `tfsdk:"scheduler_cpu"`
+ SchedulerMemory types.String `tfsdk:"scheduler_memory"`
+ SchedulerReplicas types.Int64 `tfsdk:"scheduler_replicas"`
+ ImageTag types.String `tfsdk:"image_tag"`
+ ImageRepository types.String `tfsdk:"image_repository"`
+ ImageVersion types.String `tfsdk:"image_version"`
+ EnvironmentVariables types.List `tfsdk:"environment_variables"`
+ WebserverIngressHostname types.String `tfsdk:"webserver_ingress_hostname"`
+ WebserverUrl types.String `tfsdk:"webserver_url"`
+ WebserverAirflowApiUrl types.String `tfsdk:"webserver_airflow_api_url"`
+ WebserverCpu types.String `tfsdk:"webserver_cpu"`
+ WebserverMemory types.String `tfsdk:"webserver_memory"`
+ WebserverReplicas types.Int64 `tfsdk:"webserver_replicas"`
+ Status types.String `tfsdk:"status"`
+ StatusReason types.String `tfsdk:"status_reason"`
+ DagTarballVersion types.String `tfsdk:"dag_tarball_version"`
+ DesiredDagTarballVersion types.String `tfsdk:"desired_dag_tarball_version"`
+ WorkerQueues types.List `tfsdk:"worker_queues"`
+ TaskPodNodePoolId types.String `tfsdk:"task_pod_node_pool_id"`
+ IsCicdEnforced types.Bool `tfsdk:"is_cicd_enforced"`
+ Type types.String `tfsdk:"type"`
+ IsDagDeployEnabled types.Bool `tfsdk:"is_dag_deploy_enabled"`
+ SchedulerSize types.String `tfsdk:"scheduler_size"`
+ IsHighAvailability types.Bool `tfsdk:"is_high_availability"`
+ IsDevelopmentMode types.Bool `tfsdk:"is_development_mode"`
+ WorkloadIdentity types.String `tfsdk:"workload_identity"`
+ ExternalIps types.List `tfsdk:"external_ips"`
+ OidcIssuerUrl types.String `tfsdk:"oidc_issuer_url"`
+ ResourceQuotaCpu types.String `tfsdk:"resource_quota_cpu"`
+ ResourceQuotaMemory types.String `tfsdk:"resource_quota_memory"`
+ DefaultTaskPodCpu types.String `tfsdk:"default_task_pod_cpu"`
+ DefaultTaskPodMemory types.String `tfsdk:"default_task_pod_memory"`
+ ScalingStatus types.Object `tfsdk:"scaling_status"`
+ ScalingSpec types.Object `tfsdk:"scaling_spec"`
+}
+
+type DeploymentEnvironmentVariable struct {
+ Key types.String `tfsdk:"key"`
+ Value types.String `tfsdk:"value"`
+ UpdatedAt types.String `tfsdk:"updated_at"`
+ IsSecret types.Bool `tfsdk:"is_secret"`
+}
+
+type WorkerQueue struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ AstroMachine types.String `tfsdk:"astro_machine"`
+ IsDefault types.Bool `tfsdk:"is_default"`
+ MaxWorkerCount types.Int64 `tfsdk:"max_worker_count"`
+ MinWorkerCount types.Int64 `tfsdk:"min_worker_count"`
+ NodePoolId types.String `tfsdk:"node_pool_id"`
+ PodCpu types.String `tfsdk:"pod_cpu"`
+ PodMemory types.String `tfsdk:"pod_memory"`
+ WorkerConcurrency types.Int64 `tfsdk:"worker_concurrency"`
+}
+
+func (data *DeploymentDataSource) ReadFromResponse(
+ ctx context.Context,
+ deployment *platform.Deployment,
+) diag.Diagnostics {
+ data.Id = types.StringValue(deployment.Id)
+ data.Name = types.StringValue(deployment.Name)
+ data.Description = types.StringPointerValue(deployment.Description)
+ data.CreatedAt = types.StringValue(deployment.CreatedAt.String())
+ data.UpdatedAt = types.StringValue(deployment.UpdatedAt.String())
+ var diags diag.Diagnostics
+ data.CreatedBy, diags = SubjectProfileTypesObject(ctx, deployment.CreatedBy)
+ if diags.HasError() {
+ return diags
+ }
+ data.UpdatedBy, diags = SubjectProfileTypesObject(ctx, deployment.UpdatedBy)
+ if diags.HasError() {
+ return diags
+ }
+ data.WorkspaceId = types.StringValue(deployment.WorkspaceId)
+ data.ClusterId = types.StringPointerValue(deployment.ClusterId)
+ data.Region = types.StringPointerValue(deployment.Region)
+ data.CloudProvider = types.StringPointerValue((*string)(deployment.CloudProvider))
+ data.AstroRuntimeVersion = types.StringValue(deployment.AstroRuntimeVersion)
+ data.AirflowVersion = types.StringValue(deployment.AirflowVersion)
+ data.Namespace = types.StringValue(deployment.Namespace)
+ if deployment.ContactEmails != nil {
+ data.ContactEmails, diags = utils.StringList(*deployment.ContactEmails)
+ if diags.HasError() {
+ return diags
+ }
+ }
+ data.Executor = types.StringPointerValue((*string)(deployment.Executor))
+ if deployment.SchedulerAu != nil {
+ deploymentSchedulerAu := int64(*deployment.SchedulerAu)
+ data.SchedulerAu = types.Int64Value(deploymentSchedulerAu)
+ }
+ data.SchedulerCpu = types.StringValue(deployment.SchedulerCpu)
+ data.SchedulerMemory = types.StringValue(deployment.SchedulerMemory)
+ data.SchedulerReplicas = types.Int64Value(int64(deployment.SchedulerReplicas))
+ data.ImageTag = types.StringValue(deployment.ImageTag)
+ data.ImageRepository = types.StringValue(deployment.ImageRepository)
+ data.ImageVersion = types.StringPointerValue(deployment.ImageVersion)
+ if deployment.EnvironmentVariables != nil {
+ data.EnvironmentVariables, diags = utils.ObjectList(ctx, *deployment.EnvironmentVariables, schemas.DeploymentEnvironmentVariableAttributeTypes(), DeploymentEnvironmentVariableTypesObject)
+ if diags.HasError() {
+ return diags
+ }
+ }
+ data.WebserverIngressHostname = types.StringValue(deployment.WebServerIngressHostname)
+ data.WebserverUrl = types.StringValue(deployment.WebServerUrl)
+ data.WebserverAirflowApiUrl = types.StringValue(deployment.WebServerAirflowApiUrl)
+ data.WebserverCpu = types.StringValue(deployment.WebServerCpu)
+ data.WebserverMemory = types.StringValue(deployment.WebServerMemory)
+ if deployment.WebServerReplicas != nil {
+ data.WebserverReplicas = types.Int64Value(int64(*deployment.WebServerReplicas))
+ }
+ data.Status = types.StringValue(string(deployment.Status))
+ data.StatusReason = types.StringPointerValue(deployment.StatusReason)
+ data.DagTarballVersion = types.StringPointerValue(deployment.DagTarballVersion)
+ data.DesiredDagTarballVersion = types.StringPointerValue(deployment.DesiredDagTarballVersion)
+ if deployment.WorkerQueues != nil {
+ data.WorkerQueues, diags = utils.ObjectList(ctx, *deployment.WorkerQueues, schemas.WorkerQueueAttributeTypes(), WorkerQueueTypesObject)
+ if diags.HasError() {
+ return diags
+ }
+ }
+ data.TaskPodNodePoolId = types.StringPointerValue(deployment.TaskPodNodePoolId)
+ data.IsCicdEnforced = types.BoolValue(deployment.IsCicdEnforced)
+ data.Type = types.StringPointerValue((*string)(deployment.Type))
+ data.IsDagDeployEnabled = types.BoolValue(deployment.IsDagDeployEnabled)
+ data.SchedulerSize = types.StringPointerValue((*string)(deployment.SchedulerSize))
+ data.IsHighAvailability = types.BoolPointerValue(deployment.IsHighAvailability)
+ data.IsDevelopmentMode = types.BoolPointerValue(deployment.IsDevelopmentMode)
+ data.WorkloadIdentity = types.StringPointerValue(deployment.WorkloadIdentity)
+ if deployment.ExternalIPs != nil {
+ data.ExternalIps, diags = utils.StringList(*deployment.ExternalIPs)
+ if diags.HasError() {
+ return diags
+ }
+ }
+ data.OidcIssuerUrl = types.StringPointerValue(deployment.OidcIssuerUrl)
+ data.ResourceQuotaCpu = types.StringPointerValue(deployment.ResourceQuotaCpu)
+ data.ResourceQuotaMemory = types.StringPointerValue(deployment.ResourceQuotaMemory)
+ data.DefaultTaskPodCpu = types.StringPointerValue(deployment.DefaultTaskPodCpu)
+ data.DefaultTaskPodMemory = types.StringPointerValue(deployment.DefaultTaskPodMemory)
+ data.ScalingStatus, diags = ScalingStatusTypesObject(ctx, deployment.ScalingStatus)
+ if diags.HasError() {
+ return diags
+ }
+ data.ScalingSpec, diags = ScalingSpecTypesObject(ctx, deployment.ScalingSpec)
+ if diags.HasError() {
+ return diags
+ }
+
+ return nil
+}
+
+func DeploymentEnvironmentVariableTypesObject(
+ ctx context.Context,
+ envVar platform.DeploymentEnvironmentVariable,
+) (types.Object, diag.Diagnostics) {
+ obj := DeploymentEnvironmentVariable{
+ Key: types.StringValue(envVar.Key),
+ Value: types.StringPointerValue(envVar.Value),
+ UpdatedAt: types.StringValue(envVar.UpdatedAt),
+ IsSecret: types.BoolValue(envVar.IsSecret),
+ }
+
+ return types.ObjectValueFrom(ctx, schemas.DeploymentEnvironmentVariableAttributeTypes(), obj)
+}
+
+func WorkerQueueTypesObject(
+ ctx context.Context,
+ workerQueue platform.WorkerQueue,
+) (types.Object, diag.Diagnostics) {
+ obj := WorkerQueue{
+ Id: types.StringValue(workerQueue.Id),
+ Name: types.StringValue(workerQueue.Name),
+ AstroMachine: types.StringPointerValue(workerQueue.AstroMachine),
+ IsDefault: types.BoolValue(workerQueue.IsDefault),
+ MaxWorkerCount: types.Int64Value(int64(workerQueue.MaxWorkerCount)),
+ MinWorkerCount: types.Int64Value(int64(workerQueue.MinWorkerCount)),
+ NodePoolId: types.StringPointerValue(workerQueue.NodePoolId),
+ PodCpu: types.StringValue(workerQueue.PodCpu),
+ PodMemory: types.StringValue(workerQueue.PodMemory),
+ WorkerConcurrency: types.Int64Value(int64(workerQueue.WorkerConcurrency)),
+ }
+
+ return types.ObjectValueFrom(ctx, schemas.WorkerQueueAttributeTypes(), obj)
+}
+
+type DeploymentScalingSpec struct {
+ HibernationSpec HibernationSpec `tfsdk:"hibernation_spec"`
+}
+
+type DeploymentStatus struct {
+ HibernationStatus HibernationStatus `tfsdk:"hibernation_status"`
+}
+
+type HibernationStatus struct {
+ IsHibernating types.Bool `tfsdk:"is_hibernating"`
+ NextEventType types.String `tfsdk:"next_event_type"`
+ NextEventAt types.String `tfsdk:"next_event_at"`
+ Reason types.String `tfsdk:"reason"`
+}
+
+type HibernationSpec struct {
+ Override HibernationSpecOverride `tfsdk:"override"`
+ Schedules []HibernationSchedule `tfsdk:"schedules"`
+}
+
+type HibernationSpecOverride struct {
+ IsHibernating types.Bool `tfsdk:"is_hibernating"`
+ OverrideUntil types.String `tfsdk:"override_until"`
+ IsActive types.Bool `tfsdk:"is_active"`
+}
+
+type HibernationSchedule struct {
+ Description types.String `tfsdk:"description"`
+ HibernateAtCron types.String `tfsdk:"hibernate_at_cron"`
+ IsEnabled types.Bool `tfsdk:"is_enabled"`
+ WakeAtCron types.String `tfsdk:"wake_at_cron"`
+}
+
+func ScalingStatusTypesObject(
+ ctx context.Context,
+ scalingStatus *platform.DeploymentScalingStatus,
+) (types.Object, diag.Diagnostics) {
+ if scalingStatus != nil && scalingStatus.HibernationStatus != nil {
+ obj := DeploymentStatus{
+ HibernationStatus: HibernationStatus{
+ IsHibernating: types.BoolValue(scalingStatus.HibernationStatus.IsHibernating),
+ NextEventType: types.StringPointerValue((*string)(scalingStatus.HibernationStatus.NextEventType)),
+ NextEventAt: types.StringPointerValue(scalingStatus.HibernationStatus.NextEventAt),
+ Reason: types.StringPointerValue(scalingStatus.HibernationStatus.Reason),
+ },
+ }
+ return types.ObjectValueFrom(ctx, schemas.ScalingStatusAttributeTypes(), obj)
+ }
+ return types.ObjectNull(schemas.ScalingStatusAttributeTypes()), nil
+}
+
+func ScalingSpecTypesObject(
+ ctx context.Context,
+ scalingSpec *platform.DeploymentScalingSpec,
+) (types.Object, diag.Diagnostics) {
+ if scalingSpec != nil && scalingSpec.HibernationSpec != nil && (scalingSpec.HibernationSpec.Override != nil || scalingSpec.HibernationSpec.Schedules != nil) {
+ obj := DeploymentScalingSpec{
+ HibernationSpec: HibernationSpec{},
+ }
+ if scalingSpec.HibernationSpec.Override != nil {
+ obj.HibernationSpec.Override = HibernationSpecOverride{
+ IsHibernating: types.BoolPointerValue(scalingSpec.HibernationSpec.Override.IsHibernating),
+ IsActive: types.BoolPointerValue(scalingSpec.HibernationSpec.Override.IsActive),
+ }
+ if scalingSpec.HibernationSpec.Override.OverrideUntil != nil {
+ obj.HibernationSpec.Override.OverrideUntil = types.StringValue(scalingSpec.HibernationSpec.Override.OverrideUntil.String())
+ }
+ }
+ if scalingSpec.HibernationSpec.Schedules != nil {
+ schedules := make([]HibernationSchedule, 0, len(*scalingSpec.HibernationSpec.Schedules))
+ for _, schedule := range *scalingSpec.HibernationSpec.Schedules {
+ schedules = append(schedules, HibernationSchedule{
+ Description: types.StringPointerValue(schedule.Description),
+ HibernateAtCron: types.StringValue(schedule.HibernateAtCron),
+ IsEnabled: types.BoolValue(schedule.IsEnabled),
+ WakeAtCron: types.StringValue(schedule.WakeAtCron),
+ })
+ }
+ obj.HibernationSpec.Schedules = schedules
+ }
+ return types.ObjectValueFrom(ctx, schemas.ScalingSpecAttributeTypes(), obj)
+ }
+ return types.ObjectNull(schemas.ScalingSpecAttributeTypes()), nil
+}
diff --git a/internal/provider/models/deployments.go b/internal/provider/models/deployments.go
new file mode 100644
index 00000000..cc2ca25e
--- /dev/null
+++ b/internal/provider/models/deployments.go
@@ -0,0 +1,50 @@
+package models
+
+import (
+ "context"
+
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// DeploymentsDataSource describes the data source data model.
+type DeploymentsDataSource struct {
+ Deployments types.List `tfsdk:"deployments"`
+ WorkspaceIds types.List `tfsdk:"workspace_ids"` // query parameter
+ DeploymentIds types.List `tfsdk:"deployment_ids"` // query parameter
+ Names types.List `tfsdk:"names"` // query parameter
+}
+
+func (data *DeploymentsDataSource) ReadFromResponse(
+ ctx context.Context,
+ deployments []platform.Deployment,
+) diag.Diagnostics {
+ if len(deployments) == 0 {
+ types.ListNull(types.ObjectType{AttrTypes: schemas.DeploymentsElementAttributeTypes()})
+ }
+
+ values := make([]attr.Value, len(deployments))
+ for i, deployment := range deployments {
+ var data DeploymentDataSource
+ diags := data.ReadFromResponse(ctx, &deployment)
+ if diags.HasError() {
+ return diags
+ }
+
+ objectValue, diags := types.ObjectValueFrom(ctx, schemas.DeploymentsElementAttributeTypes(), data)
+ if diags.HasError() {
+ return diags
+ }
+ values[i] = objectValue
+ }
+ var diags diag.Diagnostics
+ data.Deployments, diags = types.ListValue(types.ObjectType{AttrTypes: schemas.DeploymentsElementAttributeTypes()}, values)
+ if diags.HasError() {
+ return diags
+ }
+
+ return nil
+}
diff --git a/internal/provider/models/subject_profile.go b/internal/provider/models/subject_profile.go
index f119b357..823a7ab0 100644
--- a/internal/provider/models/subject_profile.go
+++ b/internal/provider/models/subject_profile.go
@@ -3,13 +3,13 @@ package models
import (
"context"
+ "github.com/astronomer/astronomer-terraform-provider/internal/clients/iam"
"github.com/astronomer/astronomer-terraform-provider/internal/provider/schemas"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
- "github.com/astronomer/astronomer-terraform-provider/internal/clients/iam"
"github.com/astronomer/astronomer-terraform-provider/internal/clients/platform"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
- "github.com/hashicorp/terraform-plugin-log/tflog"
)
type SubjectProfile struct {
@@ -25,25 +25,24 @@ func SubjectProfileTypesObject(
ctx context.Context,
basicSubjectProfile any,
) (types.Object, diag.Diagnostics) {
- // Check that the type passed in is a platform.BasicSubjectProfile or iam.BasicSubjectProfile
- bsp, ok := basicSubjectProfile.(*platform.BasicSubjectProfile)
- if !ok {
- iamBsp, ok := basicSubjectProfile.(*iam.BasicSubjectProfile)
- if !ok {
- tflog.Error(
- ctx,
- "Unexpected type passed into subject profile",
- map[string]interface{}{"value": basicSubjectProfile},
- )
- return types.Object{}, diag.Diagnostics{
- diag.NewErrorDiagnostic(
- "Internal Error",
- "SubjectProfileTypesObject expects a BasicSubjectProfile type but did not receive one",
- ),
- }
+ // Attempt to convert basicSubjectProfile to *platform.BasicSubjectProfile
+ // Our API client returns a BasicSubjectProfile, but we are unsure if it is a pointer and which package it is from
+ var bspPtr *platform.BasicSubjectProfile
+
+ switch v := basicSubjectProfile.(type) {
+ case platform.BasicSubjectProfile:
+ bspPtr = &v
+ case *platform.BasicSubjectProfile:
+ bspPtr = v
+ case iam.BasicSubjectProfile, *iam.BasicSubjectProfile:
+ var iamBsp *iam.BasicSubjectProfile
+ if nonPtr, ok := v.(iam.BasicSubjectProfile); ok {
+ iamBsp = &nonPtr
+ } else {
+ iamBsp = v.(*iam.BasicSubjectProfile)
}
- // Convert the iam.BasicSubjectProfile to a platform.BasicSubjectProfile for simplicity
- bsp = &platform.BasicSubjectProfile{
+
+ bspPtr = &platform.BasicSubjectProfile{
ApiTokenName: iamBsp.ApiTokenName,
AvatarUrl: iamBsp.AvatarUrl,
FullName: iamBsp.FullName,
@@ -51,37 +50,29 @@ func SubjectProfileTypesObject(
SubjectType: (*platform.BasicSubjectProfileSubjectType)(iamBsp.SubjectType),
Username: iamBsp.Username,
}
+ default:
+ // Log error and return if none of the types match
+ tflog.Error(
+ ctx,
+ "Unexpected type passed into subject profile",
+ map[string]interface{}{"value": basicSubjectProfile},
+ )
+ return types.Object{}, diag.Diagnostics{
+ diag.NewErrorDiagnostic(
+ "Internal Error",
+ "SubjectProfileTypesObject expects a BasicSubjectProfile type but did not receive one",
+ ),
+ }
}
subjectProfile := SubjectProfile{
- Id: types.StringValue(bsp.Id),
+ Id: types.StringValue(bspPtr.Id),
+ SubjectType: types.StringPointerValue((*string)(bspPtr.SubjectType)),
+ Username: types.StringPointerValue(bspPtr.Username),
+ FullName: types.StringPointerValue(bspPtr.FullName),
+ AvatarUrl: types.StringPointerValue(bspPtr.AvatarUrl),
+ ApiTokenName: types.StringPointerValue(bspPtr.ApiTokenName),
}
- if bsp.SubjectType != nil {
- subjectProfile.SubjectType = types.StringValue(string(*bsp.SubjectType))
- if *bsp.SubjectType == platform.USER {
- if bsp.Username != nil {
- subjectProfile.Username = types.StringValue(*bsp.Username)
- } else {
- subjectProfile.Username = types.StringUnknown()
- }
- if bsp.FullName != nil {
- subjectProfile.FullName = types.StringValue(*bsp.FullName)
- } else {
- subjectProfile.FullName = types.StringUnknown()
- }
- if bsp.AvatarUrl != nil {
- subjectProfile.AvatarUrl = types.StringValue(*bsp.AvatarUrl)
- } else {
- subjectProfile.AvatarUrl = types.StringUnknown()
- }
- } else {
- if bsp.ApiTokenName != nil {
- subjectProfile.ApiTokenName = types.StringValue(*bsp.ApiTokenName)
- } else {
- subjectProfile.ApiTokenName = types.StringUnknown()
- }
- }
- }
return types.ObjectValueFrom(ctx, schemas.SubjectProfileAttributeTypes(), subjectProfile)
}
diff --git a/internal/provider/models/workspace.go b/internal/provider/models/workspace.go
index 6dbdebf3..ce7e2078 100644
--- a/internal/provider/models/workspace.go
+++ b/internal/provider/models/workspace.go
@@ -13,7 +13,6 @@ type WorkspaceDataSource struct {
Id types.String `tfsdk:"id"`
Name types.String `tfsdk:"name"`
Description types.String `tfsdk:"description"`
- OrganizationName types.String `tfsdk:"organization_name"`
CicdEnforcedDefault types.Bool `tfsdk:"cicd_enforced_default"`
CreatedAt types.String `tfsdk:"created_at"`
UpdatedAt types.String `tfsdk:"updated_at"`
@@ -26,7 +25,6 @@ type WorkspaceResource struct {
Id types.String `tfsdk:"id"`
Name types.String `tfsdk:"name"`
Description types.String `tfsdk:"description"`
- OrganizationName types.String `tfsdk:"organization_name"`
CicdEnforcedDefault types.Bool `tfsdk:"cicd_enforced_default"`
CreatedAt types.String `tfsdk:"created_at"`
UpdatedAt types.String `tfsdk:"updated_at"`
@@ -40,12 +38,7 @@ func (data *WorkspaceResource) ReadFromResponse(
) diag.Diagnostics {
data.Id = types.StringValue(workspace.Id)
data.Name = types.StringValue(workspace.Name)
- if workspace.Description != nil {
- data.Description = types.StringValue(*workspace.Description)
- }
- if workspace.OrganizationName != nil {
- data.OrganizationName = types.StringValue(*workspace.OrganizationName)
- }
+ data.Description = types.StringPointerValue(workspace.Description)
data.CicdEnforcedDefault = types.BoolValue(workspace.CicdEnforcedDefault)
data.CreatedAt = types.StringValue(workspace.CreatedAt.String())
data.UpdatedAt = types.StringValue(workspace.UpdatedAt.String())
@@ -68,12 +61,7 @@ func (data *WorkspaceDataSource) ReadFromResponse(
) diag.Diagnostics {
data.Id = types.StringValue(workspace.Id)
data.Name = types.StringValue(workspace.Name)
- if workspace.Description != nil {
- data.Description = types.StringValue(*workspace.Description)
- }
- if workspace.OrganizationName != nil {
- data.OrganizationName = types.StringValue(*workspace.OrganizationName)
- }
+ data.Description = types.StringPointerValue(workspace.Description)
data.CicdEnforcedDefault = types.BoolValue(workspace.CicdEnforcedDefault)
data.CreatedAt = types.StringValue(workspace.CreatedAt.String())
data.UpdatedAt = types.StringValue(workspace.UpdatedAt.String())
diff --git a/internal/provider/models/workspaces.go b/internal/provider/models/workspaces.go
index 7b82f905..d1fa3602 100644
--- a/internal/provider/models/workspaces.go
+++ b/internal/provider/models/workspaces.go
@@ -27,38 +27,13 @@ func (data *WorkspacesDataSource) ReadFromResponse(
values := make([]attr.Value, len(workspaces))
for i, workspace := range workspaces {
- v := map[string]attr.Value{}
- v["id"] = types.StringValue(workspace.Id)
- v["name"] = types.StringValue(workspace.Name)
- if workspace.Description != nil {
- v["description"] = types.StringValue(*workspace.Description)
- } else {
- v["description"] = types.StringNull()
- }
- if workspace.OrganizationName != nil {
- v["organization_name"] = types.StringValue(*workspace.OrganizationName)
- } else {
- v["organization_name"] = types.StringNull()
- }
- v["cicd_enforced_default"] = types.BoolValue(workspace.CicdEnforcedDefault)
- v["created_at"] = types.StringValue(workspace.CreatedAt.String())
- v["updated_at"] = types.StringValue(workspace.UpdatedAt.String())
- if workspace.CreatedBy != nil {
- createdBy, diags := SubjectProfileTypesObject(ctx, workspace.CreatedBy)
- if diags.HasError() {
- return diags
- }
- v["created_by"] = createdBy
- }
- if workspace.UpdatedBy != nil {
- updatedBy, diags := SubjectProfileTypesObject(ctx, workspace.UpdatedBy)
- if diags.HasError() {
- return diags
- }
- v["updated_by"] = updatedBy
+ var data WorkspaceDataSource
+ diags := data.ReadFromResponse(ctx, &workspace)
+ if diags.HasError() {
+ return diags
}
- objectValue, diags := types.ObjectValue(schemas.WorkspacesElementAttributeTypes(), v)
+ objectValue, diags := types.ObjectValueFrom(ctx, schemas.WorkspacesElementAttributeTypes(), data)
if diags.HasError() {
return diags
}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 0227f425..01edf380 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -131,6 +131,8 @@ func (p *AstronomerProvider) DataSources(ctx context.Context) []func() datasourc
return []func() datasource.DataSource{
datasources.NewWorkspaceDataSource,
datasources.NewWorkspacesDataSource,
+ datasources.NewDeploymentDataSource,
+ datasources.NewDeploymentsDataSource,
}
}
diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go
index 6e82370f..ab186d83 100644
--- a/internal/provider/provider_test.go
+++ b/internal/provider/provider_test.go
@@ -44,6 +44,8 @@ var _ = Describe("Provider Test", func() {
expectedDataSources := []string{
"astronomer_workspace",
"astronomer_workspaces",
+ "astronomer_deployment",
+ "astronomer_deployments",
}
dataSources := p.DataSources(ctx)
diff --git a/internal/provider/schemas/deployment.go b/internal/provider/schemas/deployment.go
new file mode 100644
index 00000000..a5ca9c32
--- /dev/null
+++ b/internal/provider/schemas/deployment.go
@@ -0,0 +1,321 @@
+package schemas
+
+import (
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/validators"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ datasourceSchema "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func DeploymentDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment identifier",
+ Required: true,
+ Validators: []validator.String{validators.IsCuid()},
+ },
+ "name": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment name",
+ Computed: true,
+ },
+ "description": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment description",
+ Computed: true,
+ },
+ "created_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment creation timestamp",
+ Computed: true,
+ },
+ "updated_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment last updated timestamp",
+ Computed: true,
+ },
+ "created_by": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "Deployment creator",
+ Computed: true,
+ Attributes: DataSourceSubjectProfileSchemaAttributes(),
+ },
+ "updated_by": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "Deployment updater",
+ Computed: true,
+ Attributes: DataSourceSubjectProfileSchemaAttributes(),
+ },
+ "workspace_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment workspace identifier",
+ Computed: true,
+ },
+ "cluster_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment cluster identifier",
+ Computed: true,
+ },
+ "region": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment region",
+ Computed: true,
+ },
+ "cloud_provider": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment cloud provider",
+ Computed: true,
+ },
+ "astro_runtime_version": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment Astro Runtime version",
+ Computed: true,
+ },
+ "airflow_version": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment Airflow version",
+ Computed: true,
+ },
+ "namespace": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment namespace",
+ Computed: true,
+ },
+ "contact_emails": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Deployment contact emails",
+ Computed: true,
+ },
+ "executor": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment executor",
+ Computed: true,
+ },
+ "scheduler_au": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Deployment scheduler AU",
+ Computed: true,
+ },
+ "scheduler_cpu": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment scheduler CPU",
+ Computed: true,
+ },
+ "scheduler_memory": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment scheduler memory",
+ Computed: true,
+ },
+ "scheduler_replicas": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Deployment scheduler replicas",
+ Computed: true,
+ },
+ "image_tag": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment image tag",
+ Computed: true,
+ },
+ "image_repository": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment image repository",
+ Computed: true,
+ },
+ "image_version": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment image version",
+ Computed: true,
+ },
+ "environment_variables": datasourceSchema.ListNestedAttribute{
+ NestedObject: datasourceSchema.NestedAttributeObject{
+ Attributes: DeploymentEnvironmentVariableAttributes(),
+ },
+ MarkdownDescription: "Deployment environment variables",
+ Computed: true,
+ },
+ "webserver_ingress_hostname": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment webserver ingress hostname",
+ Computed: true,
+ },
+ "webserver_url": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment webserver URL",
+ Computed: true,
+ },
+ "webserver_airflow_api_url": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment webserver Airflow API URL",
+ Computed: true,
+ },
+ "webserver_cpu": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment webserver CPU",
+ Computed: true,
+ },
+ "webserver_memory": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment webserver memory",
+ Computed: true,
+ },
+ "webserver_replicas": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Deployment webserver replicas",
+ Computed: true,
+ },
+ "status": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment status",
+ Computed: true,
+ },
+ "status_reason": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment status reason",
+ Computed: true,
+ },
+ "dag_tarball_version": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment DAG tarball version",
+ Computed: true,
+ },
+ "desired_dag_tarball_version": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment desired DAG tarball version",
+ Computed: true,
+ },
+ "worker_queues": datasourceSchema.ListNestedAttribute{
+ NestedObject: datasourceSchema.NestedAttributeObject{
+ Attributes: WorkerQueueSchemaAttributes(),
+ },
+ MarkdownDescription: "Deployment worker queues",
+ Computed: true,
+ },
+ "task_pod_node_pool_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment task pod node pool identifier",
+ Computed: true,
+ },
+ "is_cicd_enforced": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether the Deployment enforces CI/CD deploys",
+ Computed: true,
+ },
+ "type": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment type",
+ Computed: true,
+ },
+ "is_dag_deploy_enabled": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether DAG deploy is enabled",
+ Computed: true,
+ },
+ "scheduler_size": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment scheduler size",
+ Computed: true,
+ },
+ "is_high_availability": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether Deployment has high availability",
+ Computed: true,
+ },
+ "is_development_mode": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether Deployment is in development mode",
+ Computed: true,
+ },
+ "workload_identity": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment workload identity",
+ Computed: true,
+ },
+ "external_ips": datasourceSchema.ListAttribute{
+ ElementType: types.StringType,
+ MarkdownDescription: "Deployment external IPs",
+ Computed: true,
+ },
+ "oidc_issuer_url": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment OIDC issuer URL",
+ Computed: true,
+ },
+ "resource_quota_cpu": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment resource quota CPU",
+ Computed: true,
+ },
+ "resource_quota_memory": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment resource quota memory",
+ Computed: true,
+ },
+ "default_task_pod_cpu": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment default task pod CPU",
+ Computed: true,
+ },
+ "default_task_pod_memory": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Deployment default task pod memory",
+ Computed: true,
+ },
+ "scaling_status": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "Deployment scaling status",
+ Computed: true,
+ Attributes: ScalingStatusDataSourceAttributes(),
+ },
+ "scaling_spec": datasourceSchema.SingleNestedAttribute{
+ MarkdownDescription: "Deployment scaling spec",
+ Computed: true,
+ Attributes: ScalingSpecDataSourceSchemaAttributes(),
+ },
+ }
+}
+
+func DeploymentEnvironmentVariableAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "key": types.StringType,
+ "value": types.StringType,
+ "updated_at": types.StringType,
+ "is_secret": types.BoolType,
+ }
+}
+
+func DeploymentEnvironmentVariableAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "key": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Environment variable key",
+ Computed: true,
+ },
+ "value": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Environment variable value",
+ Computed: true,
+ },
+ "updated_at": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Environment variable last updated timestamp",
+ Computed: true,
+ },
+ "is_secret": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether Environment variable is a secret",
+ Computed: true,
+ },
+ }
+}
+
+func WorkerQueueAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "id": types.StringType,
+ "name": types.StringType,
+ "astro_machine": types.StringType,
+ "is_default": types.BoolType,
+ "max_worker_count": types.Int64Type,
+ "min_worker_count": types.Int64Type,
+ "node_pool_id": types.StringType,
+ "pod_cpu": types.StringType,
+ "pod_memory": types.StringType,
+ "worker_concurrency": types.Int64Type,
+ }
+}
+
+func WorkerQueueSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue identifier",
+ Computed: true,
+ },
+ "name": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue name",
+ Computed: true,
+ },
+ "astro_machine": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue Astro machine value",
+ Computed: true,
+ },
+ "is_default": datasourceSchema.BoolAttribute{
+ MarkdownDescription: "Whether Worker queue is default",
+ Computed: true,
+ },
+ "max_worker_count": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Worker queue max worker count",
+ Computed: true,
+ },
+ "min_worker_count": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Worker queue min worker count",
+ Computed: true,
+ },
+ "node_pool_id": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue node pool identifier",
+ Computed: true,
+ },
+ "pod_cpu": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue pod CPU",
+ Computed: true,
+ },
+ "pod_memory": datasourceSchema.StringAttribute{
+ MarkdownDescription: "Worker queue pod memory",
+ Computed: true,
+ },
+ "worker_concurrency": datasourceSchema.Int64Attribute{
+ MarkdownDescription: "Worker queue worker concurrency",
+ Computed: true,
+ },
+ }
+}
diff --git a/internal/provider/schemas/deployments.go b/internal/provider/schemas/deployments.go
new file mode 100644
index 00000000..d7fb5e4c
--- /dev/null
+++ b/internal/provider/schemas/deployments.go
@@ -0,0 +1,114 @@
+package schemas
+
+import (
+ "github.com/astronomer/astronomer-terraform-provider/internal/provider/validators"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func DeploymentsElementAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "id": types.StringType,
+ "name": types.StringType,
+ "description": types.StringType,
+ "created_at": types.StringType,
+ "updated_at": types.StringType,
+ "created_by": types.ObjectType{
+ AttrTypes: SubjectProfileAttributeTypes(),
+ },
+ "updated_by": types.ObjectType{
+ AttrTypes: SubjectProfileAttributeTypes(),
+ },
+ "workspace_id": types.StringType,
+ "cluster_id": types.StringType,
+ "region": types.StringType,
+ "cloud_provider": types.StringType,
+ "astro_runtime_version": types.StringType,
+ "airflow_version": types.StringType,
+ "namespace": types.StringType,
+ "contact_emails": types.ListType{
+ ElemType: types.StringType,
+ },
+ "executor": types.StringType,
+ "scheduler_au": types.Int64Type,
+ "scheduler_cpu": types.StringType,
+ "scheduler_memory": types.StringType,
+ "scheduler_replicas": types.Int64Type,
+ "image_tag": types.StringType,
+ "image_repository": types.StringType,
+ "image_version": types.StringType,
+ "environment_variables": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: DeploymentEnvironmentVariableAttributeTypes(),
+ },
+ },
+ "webserver_ingress_hostname": types.StringType,
+ "webserver_url": types.StringType,
+ "webserver_airflow_api_url": types.StringType,
+ "webserver_cpu": types.StringType,
+ "webserver_memory": types.StringType,
+ "webserver_replicas": types.Int64Type,
+ "status": types.StringType,
+ "status_reason": types.StringType,
+ "dag_tarball_version": types.StringType,
+ "desired_dag_tarball_version": types.StringType,
+ "worker_queues": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: WorkerQueueAttributeTypes(),
+ },
+ },
+ "task_pod_node_pool_id": types.StringType,
+ "is_cicd_enforced": types.BoolType,
+ "type": types.StringType,
+ "is_dag_deploy_enabled": types.BoolType,
+ "scheduler_size": types.StringType,
+ "is_high_availability": types.BoolType,
+ "is_development_mode": types.BoolType,
+ "workload_identity": types.StringType,
+ "external_ips": types.ListType{
+ ElemType: types.StringType,
+ },
+ "oidc_issuer_url": types.StringType,
+ "resource_quota_cpu": types.StringType,
+ "resource_quota_memory": types.StringType,
+ "default_task_pod_cpu": types.StringType,
+ "default_task_pod_memory": types.StringType,
+ "scaling_status": types.ObjectType{
+ AttrTypes: ScalingStatusAttributeTypes(),
+ },
+ "scaling_spec": types.ObjectType{
+ AttrTypes: ScalingSpecAttributeTypes(),
+ },
+ }
+}
+
+func DeploymentsDataSourceSchemaAttributes() map[string]schema.Attribute {
+ return map[string]schema.Attribute{
+ "deployments": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: DeploymentDataSourceSchemaAttributes(),
+ },
+ Computed: true,
+ },
+ "deployment_ids": schema.ListAttribute{
+ ElementType: types.StringType,
+ Optional: true,
+ Validators: []validator.List{
+ validators.ListIsCuids(),
+ },
+ },
+ "workspace_ids": schema.ListAttribute{
+ ElementType: types.StringType,
+ Optional: true,
+ Validators: []validator.List{
+ validators.ListIsCuids(),
+ },
+ },
+ "names": schema.ListAttribute{
+ ElementType: types.StringType,
+ Optional: true,
+ },
+ }
+}
diff --git a/internal/provider/schemas/scaling.go b/internal/provider/schemas/scaling.go
new file mode 100644
index 00000000..1b5076a7
--- /dev/null
+++ b/internal/provider/schemas/scaling.go
@@ -0,0 +1,156 @@
+package schemas
+
+import (
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ datasourceSchema "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// ScalingSpec
+func ScalingSpecAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "hibernation_spec": types.ObjectType{
+ AttrTypes: HibernationSpecAttributeTypes(),
+ },
+ }
+}
+
+func HibernationSpecAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "override": types.ObjectType{
+ AttrTypes: HibernationOverrideAttributeTypes(),
+ },
+ "schedules": types.ListType{
+ ElemType: types.ObjectType{
+ AttrTypes: HibernationScheduleAttributeTypes(),
+ },
+ },
+ }
+}
+
+func ScalingSpecDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "hibernation_spec": datasourceSchema.SingleNestedAttribute{
+ Attributes: HibernationSpecDataSourceSchemaAttributes(),
+ Computed: true,
+ },
+ }
+}
+
+func HibernationSpecDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "override": datasourceSchema.SingleNestedAttribute{
+ Attributes: HibernationOverrideDataSourceSchemaAttributes(),
+ Computed: true,
+ },
+ "schedules": datasourceSchema.ListNestedAttribute{
+ NestedObject: datasourceSchema.NestedAttributeObject{
+ Attributes: HibernationScheduleDataSourceSchemaAttributes(),
+ },
+ Computed: true,
+ },
+ }
+}
+
+func HibernationOverrideDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "is_active": datasourceSchema.BoolAttribute{
+ Computed: true,
+ MarkdownDescription: "Whether the override is active",
+ },
+ "is_hibernating": datasourceSchema.BoolAttribute{
+ Computed: true,
+ MarkdownDescription: "Whether the override is hibernating",
+ },
+ "override_until": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Time until the override is active",
+ },
+ }
+}
+
+func HibernationScheduleDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "description": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Description of the schedule",
+ },
+ "hibernate_at_cron": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Cron expression for hibernation",
+ },
+ "is_enabled": datasourceSchema.BoolAttribute{
+ Computed: true,
+ MarkdownDescription: "Whether the schedule is enabled",
+ },
+ "wake_at_cron": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Cron expression for waking",
+ },
+ }
+}
+
+// ScalingStatus
+func ScalingStatusAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "hibernation_status": types.ObjectType{
+ AttrTypes: HibernationStatusAttributeTypes(),
+ },
+ }
+}
+
+func HibernationStatusAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "is_hibernating": types.BoolType,
+ "next_event_at": types.StringType,
+ "next_event_type": types.StringType,
+ "reason": types.StringType,
+ }
+}
+
+func HibernationOverrideAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "is_active": types.BoolType,
+ "is_hibernating": types.BoolType,
+ "override_until": types.StringType,
+ }
+}
+
+func HibernationScheduleAttributeTypes() map[string]attr.Type {
+ return map[string]attr.Type{
+ "description": types.StringType,
+ "hibernate_at_cron": types.StringType,
+ "is_enabled": types.BoolType,
+ "wake_at_cron": types.StringType,
+ }
+}
+
+func ScalingStatusDataSourceAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "hibernation_status": datasourceSchema.SingleNestedAttribute{
+ Attributes: HibernationStatusDataSourceSchemaAttributes(),
+ Computed: true,
+ },
+ }
+}
+
+func HibernationStatusDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute {
+ return map[string]datasourceSchema.Attribute{
+ "is_hibernating": datasourceSchema.BoolAttribute{
+ Computed: true,
+ MarkdownDescription: "Whether the deployment is hibernating",
+ },
+ "next_event_at": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Time of the next event",
+ },
+ "next_event_type": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Type of the next event",
+ },
+ "reason": datasourceSchema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Reason for the current state",
+ },
+ }
+}
diff --git a/internal/provider/schemas/subject_profile_test.go b/internal/provider/schemas/subject_profile_test.go
index 7a6fdc2f..e2b77778 100644
--- a/internal/provider/schemas/subject_profile_test.go
+++ b/internal/provider/schemas/subject_profile_test.go
@@ -80,6 +80,46 @@ var _ = Describe("Common Test", func() {
AvatarUrl: types.StringNull(),
ApiTokenName: types.StringNull(),
}),
+ Entry("platform.BasicSubjectProfile", platform.BasicSubjectProfile{
+ Id: "id",
+ }, models.SubjectProfile{
+ Id: types.StringValue("id"),
+ SubjectType: types.StringNull(),
+ Username: types.StringNull(),
+ FullName: types.StringNull(),
+ AvatarUrl: types.StringNull(),
+ ApiTokenName: types.StringNull(),
+ }),
+ Entry("*platform.BasicSubjectProfile", &platform.BasicSubjectProfile{
+ Id: "id",
+ }, models.SubjectProfile{
+ Id: types.StringValue("id"),
+ SubjectType: types.StringNull(),
+ Username: types.StringNull(),
+ FullName: types.StringNull(),
+ AvatarUrl: types.StringNull(),
+ ApiTokenName: types.StringNull(),
+ }),
+ Entry("iam.BasicSubjectProfile", iam.BasicSubjectProfile{
+ Id: "id",
+ }, models.SubjectProfile{
+ Id: types.StringValue("id"),
+ SubjectType: types.StringNull(),
+ Username: types.StringNull(),
+ FullName: types.StringNull(),
+ AvatarUrl: types.StringNull(),
+ ApiTokenName: types.StringNull(),
+ }),
+ Entry("*iam.BasicSubjectProfile", &iam.BasicSubjectProfile{
+ Id: "id",
+ }, models.SubjectProfile{
+ Id: types.StringValue("id"),
+ SubjectType: types.StringNull(),
+ Username: types.StringNull(),
+ FullName: types.StringNull(),
+ AvatarUrl: types.StringNull(),
+ ApiTokenName: types.StringNull(),
+ }),
)
})
})
diff --git a/internal/provider/schemas/workspace.go b/internal/provider/schemas/workspace.go
index 4142dfe3..2cacb292 100644
--- a/internal/provider/schemas/workspace.go
+++ b/internal/provider/schemas/workspace.go
@@ -26,10 +26,6 @@ func WorkspaceDataSourceSchemaAttributes() map[string]datasourceSchema.Attribute
MarkdownDescription: "Workspace description",
Computed: true,
},
- "organization_name": datasourceSchema.StringAttribute{
- MarkdownDescription: "Workspace organization name",
- Computed: true,
- },
"cicd_enforced_default": datasourceSchema.BoolAttribute{
MarkdownDescription: "Whether new Deployments enforce CI/CD deploys by default",
Computed: true,
@@ -74,13 +70,6 @@ func WorkspaceResourceSchemaAttributes() map[string]resourceSchema.Attribute {
MarkdownDescription: "Workspace description",
Required: true,
},
- "organization_name": resourceSchema.StringAttribute{
- MarkdownDescription: "Workspace organization name",
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
"cicd_enforced_default": resourceSchema.BoolAttribute{
MarkdownDescription: "Whether new Deployments enforce CI/CD deploys by default",
Required: true,
diff --git a/internal/provider/schemas/workspaces.go b/internal/provider/schemas/workspaces.go
index 522611fa..bd28dca0 100644
--- a/internal/provider/schemas/workspaces.go
+++ b/internal/provider/schemas/workspaces.go
@@ -11,7 +11,6 @@ func WorkspacesElementAttributeTypes() map[string]attr.Type {
"id": types.StringType,
"name": types.StringType,
"description": types.StringType,
- "organization_name": types.StringType,
"cicd_enforced_default": types.BoolType,
"created_at": types.StringType,
"updated_at": types.StringType,
diff --git a/internal/provider/validators/is_cuid.go b/internal/provider/validators/is_cuid.go
index e8ef5da6..3daed155 100644
--- a/internal/provider/validators/is_cuid.go
+++ b/internal/provider/validators/is_cuid.go
@@ -45,3 +45,48 @@ func (v isCuidValidator) ValidateString(
func IsCuid() validator.String {
return isCuidValidator{}
}
+
+var _ validator.List = listIsCuidsValidator{}
+
+type listIsCuidsValidator struct{}
+
+func (v listIsCuidsValidator) Description(ctx context.Context) string {
+ return v.MarkdownDescription(ctx)
+}
+
+func (v listIsCuidsValidator) MarkdownDescription(_ context.Context) string {
+ return "each value in list must be a cuid"
+}
+
+func (v listIsCuidsValidator) ValidateList(
+ ctx context.Context,
+ request validator.ListRequest,
+ response *validator.ListResponse,
+) {
+ if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() {
+ return
+ }
+
+ value := request.ConfigValue.Elements()
+ for i, elem := range value {
+ if elem.IsNull() || elem.IsUnknown() {
+ response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic(
+ request.Path.AtListIndex(i),
+ v.Description(ctx),
+ elem.String(),
+ ))
+ }
+
+ if err := cuid.IsCuid(elem.String()); err != nil {
+ response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic(
+ request.Path.AtListIndex(i),
+ v.Description(ctx),
+ elem.String(),
+ ))
+ }
+ }
+}
+
+func ListIsCuids() validator.List {
+ return listIsCuidsValidator{}
+}
diff --git a/internal/utils/list.go b/internal/utils/list.go
new file mode 100644
index 00000000..37541e24
--- /dev/null
+++ b/internal/utils/list.go
@@ -0,0 +1,58 @@
+package utils
+
+import (
+ "context"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/samber/lo"
+)
+
+// StringList is a helper that creates a types.List of string values
+func StringList(values []string) (types.List, diag.Diagnostics) {
+ list, diags := types.ListValue(types.StringType, lo.Map(values, func(v string, _ int) attr.Value {
+ return types.StringValue(v)
+ }))
+ if diags.HasError() {
+ return types.List{}, diags
+ }
+ return list, nil
+}
+
+// ObjectList is a helper that creates a types.List of objects where each types.Object is created by the transformer function
+func ObjectList[T any](ctx context.Context, values []T, objectAttributeTypes map[string]attr.Type, transformer func(context.Context, T) (types.Object, diag.Diagnostics)) (types.List, diag.Diagnostics) {
+ if len(values) == 0 {
+ return types.ListNull(types.ObjectType{AttrTypes: objectAttributeTypes}), nil
+ }
+ objs := make([]attr.Value, len(values))
+ for i, value := range values {
+ obj, diags := transformer(ctx, value)
+ if diags.HasError() {
+ return types.List{}, diags
+ }
+ objs[i] = obj
+ }
+ objectList, diags := types.ListValue(types.ObjectType{AttrTypes: objectAttributeTypes}, objs)
+ if diags.HasError() {
+ return types.List{}, diags
+ }
+ return objectList, nil
+}
+
+// TypesListToStringSlicePtr converts a types.List to a pointer to a slice of strings
+// This is useful for converting a list of strings from the Terraform framework to a slice of strings used for calling the API
+// We prefer to use a pointer to a slice of strings because our API client query params usually have type *[]string
+// and we can easily assign the query param to the result of this function (regardless if the result is nil or not)
+func TypesListToStringSlicePtr(list types.List) *[]string {
+ elements := list.Elements()
+ if len(elements) == 0 {
+ return nil
+ }
+ slice := lo.Map(elements, func(id attr.Value, _ int) string {
+ // Terraform includes quotes around the string, so we need to remove them
+ return strings.ReplaceAll(id.String(), `"`, "")
+ })
+ return &slice
+}