diff --git a/.github/workflows/check-yaml-format.yml b/.github/workflows/check-yaml-format.yml index 04bfac7d3..4623a9144 100644 --- a/.github/workflows/check-yaml-format.yml +++ b/.github/workflows/check-yaml-format.yml @@ -15,7 +15,7 @@ jobs: - name: Get changed files in the docs folder id: changed-files - uses: tj-actions/changed-files@v43.0.1 + uses: tj-actions/changed-files@v44.0.0 with: files: | _data/**/*.yml diff --git a/.github/workflows/test_dispatcher.yml b/.github/workflows/test_dispatcher.yml index 5851f369e..0f45e9a95 100644 --- a/.github/workflows/test_dispatcher.yml +++ b/.github/workflows/test_dispatcher.yml @@ -25,7 +25,7 @@ jobs: - name: Get changed files id: changed_files - uses: tj-actions/changed-files@v43.0.1 + uses: tj-actions/changed-files@v44.0.0 with: sha: ${{ github.event.pull_request.head.sha }} json: "true" diff --git a/_data/meltano/extractors/tap-trello/matatika.yml b/_data/meltano/extractors/tap-trello/matatika.yml index 40f6c774c..0760b107d 100644 --- a/_data/meltano/extractors/tap-trello/matatika.yml +++ b/_data/meltano/extractors/tap-trello/matatika.yml @@ -81,6 +81,6 @@ settings: label: Stream Maps name: stream_maps settings_group_validation: -- - developer_api_key - - access_token +- - access_token + - developer_api_key variant: matatika diff --git a/_data/meltano/loaders/target-clickhouse/shaped-ai.yml b/_data/meltano/loaders/target-clickhouse/shaped-ai.yml index cd6c1715a..6d280a628 100644 --- a/_data/meltano/loaders/target-clickhouse/shaped-ai.yml +++ b/_data/meltano/loaders/target-clickhouse/shaped-ai.yml @@ -23,6 +23,10 @@ settings: kind: boolean label: Add Record Metadata name: add_record_metadata +- description: Maximum number of rows in each batch. + kind: integer + label: Batch Size Rows + name: batch_size_rows - description: The cluster to create tables in. This is passed as the `clickhouse_cluster` argument when creating a table. [documentation](https://clickhouse.com/docs/en/sql-reference/distributed-ddl) can be found here. diff --git a/_data/meltano/loaders/target-redshift/ticketswap.yml b/_data/meltano/loaders/target-redshift/ticketswap.yml new file mode 100644 index 000000000..a3d6480b1 --- /dev/null +++ b/_data/meltano/loaders/target-redshift/ticketswap.yml @@ -0,0 +1,169 @@ +capabilities: +- about +- stream-maps +- schema-flattening +- target-schema +- hard-delete +description: Amazon Redshift loader +domain_url: https://aws.amazon.com/redshift/ +executable: target-redshift +keywords: +- meltano_sdk +- database +label: Amazon Redshift +logo_url: /assets/logos/loaders/redshift.png +maintenance_status: active +name: target-redshift +namespace: target_redshift +next_steps: '' +pip_url: target-redshift +quality: silver +repo: https://github.com/TicketSwap/target-redshift +settings: +- description: If set to false, the tap will ignore activate version messages. If + set to true, add_record_metadata must be set to true as well. + kind: boolean + label: Activate Version + name: activate_version + value: true +- description: Note that this must be enabled for activate_version to work!This adds + _sdc_extracted_at, _sdc_batched_at, and more to every table. See https://sdk.meltano.com/en/latest/implementation/record_metadata.html + for more information. + kind: boolean + label: Add Record Metadata + name: add_record_metadata + value: true +- description: Database name. Note if sqlalchemy_url is set this will be ignored. + kind: string + label: Database name + name: dbname +- description: 'Redshift schema to send data to, example: tap-clickup' + kind: string + label: Default Target Schema + name: default_target_schema + value: $MELTANO_EXTRACT__LOAD_SCHEMA +- description: Dialect+driver see https://aws.amazon.com/blogs/big-data/use-the-amazon-redshift-sqlalchemy-dialect-to-interact-with-amazon-redshift. + Generally just leave this alone. Note if sqlalchemy_url is set this will be ignored. + kind: string + label: Dialect+Driver + name: dialect+driver + value: redshift+redshift_connector +- description: "'True' to enable schema flattening and automatically expand nested + properties." + kind: boolean + label: Flattening Enabled + name: flattening_enabled +- description: The max depth to flatten schemas. + kind: integer + label: Flattening Max Depth + name: flattening_max_depth +- description: When activate version is sent from a tap this specefies if we should + delete the records that don't match, or mark them with a date in the `_sdc_deleted_at` + column. This config option is ignored if `activate_version` is set to false. + kind: boolean + label: Hard Delete + name: hard_delete + value: false +- description: Hostname for redshift instance. Note if sqlalchemy_url is set this + will be ignored. + kind: string + label: Host + name: host +- description: If set to true, the target will interpret the content encoding of the + schema to determine how to store the data. Using this option may result in a more + efficient storage of the data but may also result in an error if the data is not + encoded as expected. + kind: boolean + label: Interpret Content Encoding + name: interpret_content_encoding + value: false +- description: Password used to authenticate. Note if sqlalchemy_url is set this will + be ignored. + kind: password + label: Password + name: password + sensitive: true +- description: The port on which redshift is awaiting connection. Note if sqlalchemy_url + is set this will be ignored. + kind: integer + label: Port + name: port + value: 5432 +- description: SQLAlchemy connection string. This will override using host, user, + password, port, dialect, and all ssl settings. Note that you must escape password + special characters properly. See + https://docs.sqlalchemy.org/en/20/core/engines.html#escaping-special-characters-such-as-signs-in-passwords + kind: string + label: Sqlalchemy URL + name: sqlalchemy_url +- description: Username to connect to bastion host + kind: string + label: SSH Tunnel Username + name: ssh_tunnel.username +- description: Whether or not to use ssl to verify the server's identity. Use ssl_certificate_authority + and ssl_mode for further customization. To use a client certificate to authenticate + yourself to the server, use ssl_client_certificate_enable instead. Note if sqlalchemy_url + is set this will be ignored. + kind: boolean + label: SSL Enable + name: ssl_enable + value: false +- description: SSL Protection method. Must be one of disable, allow, prefer, require, verify-ca, + or verify-full. Note if sqlalchemy_url is set this will be ignored. + kind: string + label: SSL Mode + name: ssl_mode + value: verify-full +- description: User-defined config values to be used within map expressions. + kind: object + label: Stream Map Config + name: stream_map_config +- description: Config object for stream maps capability. For more information check + out [Stream Maps](https://sdk.meltano.com/en/latest/stream_maps.html). + kind: object + label: Stream Maps + name: stream_maps +- description: User name used to authenticate. Note if sqlalchemy_url is set this + will be ignored. + kind: string + label: User + name: user +- description: If true, use temporary credentials + (https://docs.aws.amazon.com/redshift/latest/mgmt/generating-iam-credentials-cli-api.html). + Note if sqlalchemy_url is set this will be ignored. + kind: boolean + label: Enable iam authentication + name: enable_iam_authentication + value: false +- description: Redshift cluster identifier. Note if sqlalchemy_url is set or enable_iam_authentication is false this will be ignored. + kind: string + label: Cluster identifier + name: cluster_identifier +- description: Redshift copy role arn to use for the COPY command from s3. + kind: string + label: AWS redshift copy role arn + name: aws_redshift_copy_role_arn +- description: S3 bucket to save staging files before using COPY command. + kind: string + label: S3 bucket + name: s3_bucket +- description: S3 key prefix to save staging files before using COPY command. + kind: string + label: S3 key prefix + name: s3_key_prefix +- description: If you want to remove staging files in S3. + kind: boolean + label: Remove S3 files + name: remove_s3_files + value: true +- description: Where you want to store your staging files locally. + kind: string + label: Temp dir + name: temp_dir + value: .meltano/temp +settings_group_validation: +- [] +settings_preamble: 'This target uses boto3 to connect to AWS and perform copy and possibly credential operations. The agent running this target should be connected to AWS to be able to open a boto3 client.' +target_schema: $TARGET_REDSHIFT_SCHEMA +usage: '' +variant: ticketswap diff --git a/_data/meltano/loaders/target-snowflake/meltanolabs.yml b/_data/meltano/loaders/target-snowflake/meltanolabs.yml index ccfd9cf7f..8e53a3bba 100644 --- a/_data/meltano/loaders/target-snowflake/meltanolabs.yml +++ b/_data/meltano/loaders/target-snowflake/meltanolabs.yml @@ -1,5 +1,6 @@ capabilities: - about +- hard-delete - schema-flattening - stream-maps description: Snowflake database loader @@ -28,6 +29,10 @@ settings: label: Add Record Metadata name: add_record_metadata value: true +- description: Maximum number of rows in each batch. + kind: integer + label: Batch Size Rows + name: batch_size_rows - description: Whether to remove batch files after processing. kind: boolean label: Clean Up Batch Files @@ -41,6 +46,14 @@ settings: kind: string label: Default Target Schema name: default_target_schema +- description: 'One or more LCID locale strings to produce localized output for: https://faker.readthedocs.io/en/master/#localization' + kind: array + label: Faker Config Locale + name: faker_config.locale +- description: 'Value to seed the Faker generator for deterministic output: https://faker.readthedocs.io/en/master/#seeding-the-generator' + kind: string + label: Faker Config Seed + name: faker_config.seed - description: "'True' to enable schema flattening and automatically expand nested properties." kind: boolean @@ -50,6 +63,26 @@ settings: kind: integer label: Flattening Max Depth name: flattening_max_depth +- description: Hard delete records. + kind: boolean + label: Hard Delete + name: hard_delete + value: false +- description: The method to use when loading data into the destination. `append-only` + will always write all input records whether that records already exists or not. + `upsert` will update existing records and insert new records. `overwrite` will + delete all existing records and insert all input records. + kind: options + label: Load Method + name: load_method + options: + - label: Append Only + value: append-only + - label: Upsert + value: upsert + - label: Overwrite + value: overwrite + value: append-only - description: The password for your Snowflake user. kind: password label: Password @@ -86,6 +119,11 @@ settings: kind: string label: User name: user +- description: Whether to validate the schema of the incoming streams. + kind: boolean + label: Validate Records + name: validate_records + value: true - description: The initial warehouse for the session. kind: string label: Warehouse