UNPKG

@stacktape/sdk

Version:

DevOps-free cloud development framework.

1,458 lines (1,454 loc) 888 kB
// @ts-nocheck /* eslint-disable */ export type LoggableEventType = | "ANALYZE_DEPENDENCIES" | "ANALYZE_PROJECT" | "BUILD_CODE" | "BUILD_IMAGE" | "BUILD_NEXTJS_PROJECT" | "BUNDLING_NEXTJS_FUNCTIONS" | "CALCULATE_CHANGES" | "CALCULATE_CHECKSUM" | "CALCULATE_SIZE" | "CLEANUP" | "CREATE_DOCKERFILE" | "CREATE_RESOURCES_FOR_ARTIFACTS" | "DEBUG" | "DELETE_ARTIFACTS" | "DELETE_OBSOLETE_ARTIFACTS" | "DELETE_STACK" | "DEPLOY" | "FETCH_BUDGET_INFO" | "FETCH_DOMAIN_STATUSES" | "FETCH_EC2_INFO" | "FETCH_MAIL_INFO" | "FETCH_OPENSEARCH_INFO" | "FETCH_PREVIOUS_ARTIFACTS" | "FETCH_STACK_DATA" | "FETCH_USERS_FROM_USERPOOL" | "GENERATE_AI_RESPONSE" | "HOTSWAP_UPDATE" | "INJECT_ENVIRONMENT" | "INSTALL_DEPENDENCIES" | "INVALIDATE_CACHE" | "LOAD_AWS_CREDENTIALS" | "LOAD_CONFIG_FILE" | "LOAD_PROVIDER_CREDENTIALS" | "LOAD_TARGET_STACK_INFO" | "LOAD_USER_DATA" | "PACKAGE_ARTIFACTS" | "PREPARE_PIPELINE" | "REBUILD_CODE" | "REFETCH_STACK_DATA" | "REGISTER_CF_PRIVATE_TYPES" | "REGISTER_ECS_TASK_DEFINITION" | "REPACKAGE_ARTIFACTS" | "RESOLVE_CONFIG" | "RESOLVE_DEPENDENCIES" | "ROLLBACK_STACK" | "START_DEPLOYMENT" | "SYNC_BUCKET" | "UPDATE_ECS_SERVICE" | "UPDATE_FUNCTION_CODE" | "UPDATE_STACK" | "UPLOAD_BUCKET_CONTENT" | "UPLOAD_DEPLOYMENT_ARTIFACTS" | "UPLOAD_IMAGE" | "UPLOAD_PACKAGE" | "UPLOAD_PROJECT" | "VALIDATE_CONFIG_TEMP" | "VALIDATE_TEMPLATE" | "ZIP_PACKAGE" | "ZIP_PROJECT"; export type EventLogEntryType = "FINISH" | "START" | "UPDATE"; export interface EventLogEntry { eventType: LoggableEventType; captureType: EventLogEntryType; timestamp: number; data: unknown; description: string; additionalMessage?: string; finalMessage?: string; childEvents: { [k: string]: ChildEventLogEntry[]; }; } export interface ChildEventLogEntry { data: unknown; eventType: LoggableEventType; captureType: EventLogEntryType; timestamp: number; description: string; additionalMessage?: string; finalMessage?: string; } export type AwsRegion = | "af-south-1" | "ap-east-1" | "ap-northeast-1" | "ap-northeast-2" | "ap-northeast-3" | "ap-south-1" | "ap-southeast-1" | "ap-southeast-2" | "ca-central-1" | "eu-central-1" | "eu-north-1" | "eu-south-1" | "eu-west-1" | "eu-west-2" | "eu-west-3" | "me-south-1" | "sa-east-1" | "us-east-1" | "us-east-2" | "us-west-1" | "us-west-2"; export type StacktapeResourceDefinition = | ContainerWorkload | BatchJob | WebService | PrivateService | WorkerService | RelationalDatabase | ApplicationLoadBalancer | NetworkLoadBalancer | HttpApiGateway | Bucket | UserAuthPool | EventBus | Bastion | DynamoDbTable | StateMachine | MongoDbAtlasCluster | RedisCluster | CustomResourceInstance | CustomResourceDefinition | UpstashRedis | DeploymentScript | AwsCdkConstruct | SqsQueue | SnsTopic | HostingBucket | WebAppFirewall | NextjsWeb | OpenSearchDomain | EfsFilesystem | LambdaFunction | EdgeLambdaFunction; export type HttpMethod = "*" | "DELETE" | "GET" | "HEAD" | "OPTIONS" | "PATCH" | "POST" | "PUT"; export type ApplicationLoadBalancerAlarmTrigger = | ApplicationLoadBalancerCustomTrigger | ApplicationLoadBalancerErrorRateTrigger | ApplicationLoadBalancerUnhealthyTargetsTrigger; export type AlarmUserIntegration = MsTeamsIntegration | SlackIntegration | EmailIntegration; export type HttpApiGatewayAlarmTrigger = HttpApiGatewayErrorRateTrigger | HttpApiGatewayLatencyTrigger; /** * #### Configures the load balancing mechanism to use. * * --- * * Supported types are `service-connect` and `application-load-balancer`. * * - **`service-connect`**: * - Distributes traffic evenly to available containers. * - Connections are only possible from other container-based resources in the stack. * - Supports any TCP protocol. * - This option is significantly cheaper, costing only ~$0.50 per month for a private Cloud Map DNS namespace. * * - **`application-load-balancer`**: * - Distributes traffic to available containers in a round-robin fashion. * - Supports the HTTP protocol only. * - Uses a pricing model that combines a flat hourly charge (~$0.0252/hour) with usage-based charges for LCUs (Load Balancer Capacity Units) (~$0.08/hour). * - Eligible for the AWS Free Tier. For more details, see the [AWS pricing documentation](https://aws.amazon.com/elasticloadbalancing/pricing/). */ export type PrivateServiceLoadBalancing = { type: "application-load-balancer" | "service-connect"; } & string; export type RelationalDatabaseAlarmTrigger = | RelationalDatabaseReadLatencyTrigger | RelationalDatabaseWriteLatencyTrigger | RelationalDatabaseCPUUtilizationTrigger | RelationalDatabaseFreeStorageTrigger | RelationalDatabaseFreeMemoryTrigger | RelationalDatabaseConnectionCountTrigger; export type AllowedOauthFlow = "client_credentials" | "code" | "implicit"; export type State = Choice | Fail | StateMachineMap | Parallel | Pass | Succeed | Task | Wait; export type SqsQueueAlarmTrigger = SqsQueueReceivedMessagesCountTrigger | SqsQueueNotEmptyTrigger; export type LambdaAlarmTrigger = LambdaErrorRateTrigger | LambdaDurationTrigger; export type ValueNumber = IntrinsicFunction | number; export type ValueString = IntrinsicFunction | string; export type DeletionPolicy = "Delete" | "Retain" | "Snapshot"; export type ValueBoolean = IntrinsicFunction | boolean; export type ListString = string[] | IntrinsicFunction; export interface StacktapeConfig { /** * #### The name of this service. * * --- * * > **Deprecated:** Use the `--projectName` option in the CLI instead. * * The CloudFormation stack name will be in the format: `{serviceName}-{stage}`. * * Must be alphanumeric and can contain dashes. Must match the regex `[a-zA-Z][-a-zA-Z0-9]*`. */ serviceName?: string; /** * #### Configuration for 3rd-party service providers. */ providerConfig?: { mongoDbAtlas?: MongoDbAtlasProvider; upstash?: UpstashProvider; }; /** * #### Defines variables that can be used throughout the configuration. * * --- * * Variables can be accessed using the `$Var().{variable-name}` directive. * They are useful when you want to reuse the same value for multiple properties. * * Example: `dbAddress: $ResourceParam('myDatabase', 'host')` */ variables?: { [k: string]: unknown; }; budgetControl?: BudgetControl; hooks?: Hooks; /** * #### A list of script definitions. * * --- * * Scripts allow you to specify and execute custom logic. Defining scripts in your Stacktape configuration offers several benefits: * - They are easily reusable by all members of your team. * - They can be executed automatically as part of lifecycle [hooks](https://docs.stacktape.com/configuration/hooks/) (e.g., before/after `deploy`/`delete`) or manually using the [`script:run` command](https://docs.stacktape.com/cli/commands/script-run/). * - You can use the `connectTo` property to easily inject environment variables for connecting to your stack's resources. * - You can leverage bastion scripts and tunneling to access resources that are only available within a VPC. * * There are three types of scripts: * 1. **`local-script`**: Executed locally on the same machine where the Stacktape command is run. * 2. **`local-script-with-bastion-tunneling`**: Same as `local-script`, but connections to resources in the `connectTo` list are tunneled through a bastion host, allowing you to access VPC-only resources. * 3. **`bastion-script`**: Executed on the bastion host itself. * * Scripts can be either shell commands or files written in JavaScript, TypeScript, or Python. */ scripts?: { [k: string]: LocalScript | BastionScript | LocalScriptWithBastionTunneling; }; /** * #### Configures custom, user-defined directives for use in this configuration. * * --- * * Directives can be used to dynamically configure certain aspects of your stack. */ directives?: DirectiveDefinition[]; deploymentConfig?: DeploymentConfig; stackConfig?: StackConfig; /** * #### The infrastructure resources that make up your stack. * * --- * * Each resource consists of multiple underlying AWS resources. * To see all resources in this stack, including their underlying CloudFormation resources, use the `stacktape stack-info --detailed` command. * Each resource specified here counts towards your resource limit. */ resources: { [k: string]: StacktapeResourceDefinition; }; /** * #### Raw CloudFormation resources that will be deployed in this stack. * * --- * * These resources will be merged with the resources managed by Stacktape. * Each CloudFormation resource consists of a logical name and its definition. * * To avoid logical name conflicts, you can see all logical names for resources deployed by Stacktape using the `stacktape stack-info --detailed` command. * Resources specified here do not count towards your resource limit. * * For a list of all supported AWS CloudFormation resources, see the [AWS documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). */ cloudformationResources?: { [k: string]: Default; }; } export interface MongoDbAtlasProvider { /** * #### Your MongoDB Atlas public API key. * * --- * * You can get API keys for your organization by following the [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/). */ publicKey?: string; /** * #### Your MongoDB Atlas private API key. * * --- * * You can get API keys for your organization by following the [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/). * * For security reasons, you should store your credentials as secrets. For more details, see the [secrets guide](https://docs.stacktape.com/resources/secrets/). */ privateKey?: string; /** * #### Your MongoDB Atlas Organization ID. * * --- * * You can get the organization ID for your organization by following the [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/). */ organizationId?: string; accessibility?: MongoDbAtlasAccessibility; } /** * #### Configures the connectivity settings of the MongoDB Atlas project. * * --- * * If your stack contains a MongoDB Atlas cluster, Stacktape will automatically create a MongoDB Atlas Project for it. * All MongoDB Atlas clusters in your stack will be deployed within this project. * * Network connectivity to clusters is configured at the project level, so the accessibility settings defined here will apply to all MongoDB Atlas clusters in your stack. */ export interface MongoDbAtlasAccessibility { /** * #### Configures the accessibility mode for this database. * * --- * * The following modes are supported: * * - **`internet`**: The least restrictive mode. The database can be accessed from anywhere on the internet. * - **`vpc`**: The database can only be accessed from resources within your VPC. This includes any [function](https://docs.stacktape.com/compute-resources/lambda-functions) (with `joinDefaultVpc: true`), [batch job](https://docs.stacktape.com/compute-resources/batch-jobs), or [container workload](https://docs.stacktape.com/compute-resources/multi-container-workloads) in your stack, provided they have the required credentials. IP addresses configured in `whitelistedIps` can also access the database. * - **`scoping-workloads-in-vpc`**: Similar to `vpc` mode, but more restrictive. In addition to being in the same VPC, resources must have the necessary security group permissions to access the cluster. For functions, batch jobs, and container services, these permissions can be granted using the `connectTo` property in their respective configurations. IP addresses configured in `whitelistedIps` can also access the cluster. * - **`whitelisted-ips-only`**: The cluster can only be accessed from the IP addresses and CIDR blocks listed in `whitelistedIps`. * * To learn more about VPCs, see the [VPC documentation](https://docs.stacktape.com/user-guides/vpcs/). */ accessibilityMode: "internet" | "scoping-workloads-in-vpc" | "vpc" | "whitelisted-ips-only"; /** * #### A list of IP addresses or IP ranges (in CIDR format) that are allowed to access the cluster. * * --- * * The behavior of this property varies based on the `accessibilityMode`: * - **`internet`**: This property has no effect, as the database is accessible from anywhere. * - **`vpc`** and **`scoping-workloads-in-vpc`**: These IP addresses can be used to allow access from specific locations outside of the VPC (e.g., your office IP address). * - **`whitelisted-ips-only`**: These are the only addresses that can access the database. */ whitelistedIps?: string[]; } export interface UpstashProvider { /** * #### The email address associated with your Upstash account. * * --- * * All database operations (create, update, delete) will be performed on behalf of this account. */ accountEmail: string; /** * #### The API key associated with your Upstash account or team. * * --- * * You can create an API key in the [Upstash console](https://console.upstash.com/account/api). */ apiKey: string; } /** * #### Configures a monthly budget and notifications for the stack. * * --- * * Budget control allows you to monitor your spending and configure email notifications when cost thresholds are met. * The budget is reset at the beginning of each calendar month. */ export interface BudgetControl { /** * #### Budget Limit * * --- * * The total cost (in USD) that you want to track with this budget. Notification thresholds are calculated as a percentage of this limit. */ limit: number; /** * #### Budget Notifications * * --- * * A list of notifications to send when a budget threshold is met. * * Notifications are sent via email and can be triggered based on either actual or forecasted spend. You can configure up to 5 notifications per stack. */ notifications?: BudgetNotification[]; } export interface BudgetNotification { /** * #### Budget Type * * --- * * Determines whether the notification is based on `ACTUAL` or `FORECASTED` spend. * * - `ACTUAL`: Based on the costs you have already incurred this month. * - `FORECASTED`: Based on a prediction of your total spend for the month. * * > **Note:** AWS requires about 5 weeks of usage data to generate accurate forecasts. Forecast-based notifications will not be triggered until sufficient historical data is available. */ budgetType?: "ACTUAL" | "FORECASTED"; /** * #### Threshold Percentage * * --- * * The percentage of the budget limit at which the notification should be triggered. * * For example, if the `limit` is $200 and `thresholdPercentage` is 80, the notification will be sent when the spend exceeds $160. */ thresholdPercentage?: number; /** * #### Email Recipients * * --- * * A list of email addresses that will receive the notification. You can specify up to 10 recipients. */ emails: string[]; } /** * #### Configures hooks to be executed before or after specified commands. * * --- * * Hooks are used to automatically execute scripts from the `scripts` section. */ export interface Hooks { /** * #### Before Deploy * * --- * * A list of scripts to execute before the `deploy` and `codebuild:deploy` commands. When using `codebuild:deploy`, these hooks run in the CodeBuild environment, not on your local machine. */ beforeDeploy?: NamedScriptLifecycleHook[]; /** * #### After Deploy * * --- * * A list of scripts to execute after the `deploy` and `codebuild:deploy` commands. When using `codebuild:deploy`, these hooks run in the CodeBuild environment, not on your local machine. */ afterDeploy?: NamedScriptLifecycleHook[]; /** * #### Before Delete * * --- * * A list of scripts to execute before the `delete` command. These hooks only run if you provide the `--configPath` and `--stage` options when running the command. */ beforeDelete?: NamedScriptLifecycleHook[]; /** * #### After Delete * * --- * * A list of scripts to execute after the `delete` command. These hooks only run if you provide the `--configPath` and `--stage` options when running the command. */ afterDelete?: NamedScriptLifecycleHook[]; /** * #### Before Bucket Sync * * --- * * A list of scripts to execute before the `bucket:sync` command. */ beforeBucketSync?: NamedScriptLifecycleHook[]; /** * #### After Bucket Sync * * --- * * A list of scripts to execute after the `bucket:sync` command. */ afterBucketSync?: NamedScriptLifecycleHook[]; /** * #### Before Dev * * --- * * A list of scripts to execute before the `dev` command. */ beforeDev?: NamedScriptLifecycleHook[]; /** * #### After Dev * * --- * * A list of scripts to execute after the `dev` command. */ afterDev?: NamedScriptLifecycleHook[]; } export interface NamedScriptLifecycleHook { /** * #### Script Name * * --- * * The name of the script to execute. The script must be defined in the `scripts` section of your configuration. */ scriptName: string; /** * #### Skip on CI * * --- * * If `true`, this hook will not run in a CI/CD environment (e.g., AWS CodeBuild, GitHub Actions, GitLab CI). This is useful for hooks that should only run locally. */ skipOnCI?: boolean; /** * #### Skip on Local * * --- * * If `true`, this hook will only run in a CI/CD environment and will be skipped during local execution. */ skipOnLocal?: boolean; } export interface LocalScript { type: "local-script"; properties: LocalScriptProps; } export interface LocalScriptProps { /** * #### Execute Script * * --- * * The path to a script file to execute. The script can be written in JavaScript, TypeScript, or Python and runs in a separate process. * * The executable is determined by `defaults:configure` or the system default (`node` for JS/TS, `python` for Python). You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeScript?: string; /** * #### Execute Command * * --- * * A single terminal command to execute in a separate shell process. * * The command runs on the machine executing the Stacktape command. Be aware of potential differences between local and CI environments (e.g., OS, shell). You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeCommand?: string; /** * #### Execute Scripts * * --- * * A list of script files to execute sequentially. Each script runs in a separate process. * * The script can be written in JavaScript, TypeScript, or Python. The executable is determined by `defaults:configure` or the system default. You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeScripts?: string[]; /** * #### Execute Commands * * --- * * A list of terminal commands to execute sequentially. Each command runs in a separate shell process. * * The commands run on the machine executing the Stacktape command. Be aware of potential differences between environments. You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeCommands?: string[]; /** * #### Working Directory * * --- * * The directory where the script or command will be executed. */ cwd?: string; /** * #### Pipe Stdio * * --- * * If `true`, pipes the standard input/output (stdio) of the hook process to the main process. This allows you to see logs from your hook and interact with prompts. */ pipeStdio?: boolean; /** * #### Connect To * * --- * * A list of resources the script needs to interact with. Stacktape automatically injects environment variables with connection details for each specified resource. * * Environment variable names are in the format `STP_[RESOURCE_NAME]_[VARIABLE_NAME]` (e.g., `STP_MY_DATABASE_CONNECTION_STRING`). * * **Injected Variables by Resource Type:** * - **`Bucket`**: `NAME`, `ARN` * - **`DynamoDbTable`**: `NAME`, `ARN`, `STREAM_ARN` * - **`MongoDbAtlasCluster`**: `CONNECTION_STRING` * - **`RelationalDatabase`**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`. For Aurora clusters, `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, and `READER_HOST` are also included. * - **`RedisCluster`**: `HOST`, `READER_HOST`, `PORT` * - **`EventBus`**: `ARN` * - **`Function`**: `ARN` * - **`BatchJob`**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN` * - **`UserAuthPool`**: `ID`, `CLIENT_ID`, `ARN` * - **`SnsTopic`**: `ARN`, `NAME` * - **`SqsQueue`**: `ARN`, `NAME`, `URL` * - **`UpstashKafkaTopic`**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL` * - **`UpstashRedis`**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL` * - **`PrivateService`**: `ADDRESS` * - **`WebService`**: `URL` */ connectTo?: string[]; /** * #### Environment Variables * * --- * * A list of environment variables to pass to the script or command. * * Values can be: * - A static string, number, or boolean. * - The result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives). * - A reference to another resource's parameter using the [`$ResourceParam` directive](https://docs.stacktape.com/configuration/referencing-parameters/). * - A value from a [secret](https://docs.stacktape.com/resources/secrets/) using the [`$Secret` directive](https://docs.stacktape.com/configuration/directives/#secret). */ environment?: EnvironmentVar[]; /** * #### Assume Role of Resource * * --- * * The name of a deployed resource whose IAM role the script should assume. This grants the script the same permissions as the specified resource. * * The resource must be deployed before the script is executed. Stacktape injects temporary AWS credentials as environment variables, which are automatically used by most AWS SDKs and CLIs. * * **Supported Resource Types:** * - `function` * - `batch-job` * - `worker-service` * - `web-service` * - `private-service` * - `multi-container-workload` * - `nextjs-web` */ assumeRoleOfResource?: string; } export interface EnvironmentVar { /** * #### Variable Name * * --- * * The name of the environment variable. */ name: string; /** * #### Variable Value * * --- * * The value of the environment variable. Numbers and booleans will be converted to strings. */ value: string | number | boolean; } export interface BastionScript { type: "bastion-script"; properties: BastionScriptProps; } export interface BastionScriptProps { /** * #### Bastion Resource Name * * --- * * The name of the bastion resource on which the commands will be executed. */ bastionResource?: string; /** * #### Execute Command * * --- * * A single terminal command to execute on the bastion host. Logs from the execution are streamed to your terminal. * * You can use either `executeCommand` or `executeCommands`, but not both. */ executeCommand?: string; /** * #### Execute Commands * * --- * * A list of terminal commands to execute sequentially as a script on the bastion host. Logs from the execution are streamed to your terminal. * * You can use either `executeCommand` or `executeCommands`, but not both. */ executeCommands?: string[]; /** * #### Working Directory * * --- * * The directory on the bastion host where the command will be executed. */ cwd?: string; /** * #### Connect To * * --- * * A list of resources the script needs to interact with. Stacktape automatically injects environment variables with connection details for each specified resource. * * Environment variable names are in the format `STP_[RESOURCE_NAME]_[VARIABLE_NAME]` (e.g., `STP_MY_DATABASE_CONNECTION_STRING`). * * **Injected Variables by Resource Type:** * - **`Bucket`**: `NAME`, `ARN` * - **`DynamoDbTable`**: `NAME`, `ARN`, `STREAM_ARN` * - **`MongoDbAtlasCluster`**: `CONNECTION_STRING` * - **`RelationalDatabase`**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`. For Aurora clusters, `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, and `READER_HOST` are also included. * - **`RedisCluster`**: `HOST`, `READER_HOST`, `PORT` * - **`EventBus`**: `ARN` * - **`Function`**: `ARN` * - **`BatchJob`**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN` * - **`UserAuthPool`**: `ID`, `CLIENT_ID`, `ARN` * - **`SnsTopic`**: `ARN`, `NAME` * - **`SqsQueue`**: `ARN`, `NAME`, `URL` * - **`UpstashKafkaTopic`**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL` * - **`UpstashRedis`**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL` * - **`PrivateService`**: `ADDRESS` * - **`WebService`**: `URL` */ connectTo?: string[]; /** * #### Environment Variables * * --- * * A list of environment variables to pass to the script or command. * * Values can be: * - A static string, number, or boolean. * - The result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives). * - A reference to another resource's parameter using the [`$ResourceParam` directive](https://docs.stacktape.com/configuration/referencing-parameters/). * - A value from a [secret](https://docs.stacktape.com/resources/secrets/) using the [`$Secret` directive](https://docs.stacktape.com/configuration/directives/#secret). */ environment?: EnvironmentVar[]; /** * #### Assume Role of Resource * * --- * * The name of a deployed resource whose IAM role the script should assume. This grants the script the same permissions as the specified resource. * * The resource must be deployed before the script is executed. Stacktape injects temporary AWS credentials as environment variables, which are automatically used by most AWS SDKs and CLIs. * * **Supported Resource Types:** * - `function` * - `batch-job` * - `worker-service` * - `web-service` * - `private-service` * - `multi-container-workload` * - `nextjs-web` */ assumeRoleOfResource?: string; } export interface LocalScriptWithBastionTunneling { type: "local-script-with-bastion-tunneling"; properties: LocalScriptWithBastionTunnelingProps; } export interface LocalScriptWithBastionTunnelingProps { /** * #### Bastion Resource Name * * --- * * The name of the bastion resource to use for tunneling to protected resources. */ bastionResource?: string; /** * #### Execute Script * * --- * * The path to a script file to execute. The script can be written in JavaScript, TypeScript, or Python and runs in a separate process. * * The executable is determined by `defaults:configure` or the system default (`node` for JS/TS, `python` for Python). You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeScript?: string; /** * #### Execute Command * * --- * * A single terminal command to execute in a separate shell process. * * The command runs on the machine executing the Stacktape command. Be aware of potential differences between local and CI environments (e.g., OS, shell). You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeCommand?: string; /** * #### Execute Scripts * * --- * * A list of script files to execute sequentially. Each script runs in a separate process. * * The script can be written in JavaScript, TypeScript, or Python. The executable is determined by `defaults:configure` or the system default. You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeScripts?: string[]; /** * #### Execute Commands * * --- * * A list of terminal commands to execute sequentially. Each command runs in a separate shell process. * * The commands run on the machine executing the Stacktape command. Be aware of potential differences between environments. You can only use one of `executeScript`, `executeScripts`, `executeCommand`, or `executeCommands`. */ executeCommands?: string[]; /** * #### Working Directory * * --- * * The directory where the script or command will be executed. */ cwd?: string; /** * #### Pipe Stdio * * --- * * If `true`, pipes the standard input/output (stdio) of the hook process to the main process. This allows you to see logs from your hook and interact with prompts. */ pipeStdio?: boolean; /** * #### Connect To * * --- * * A list of resources the script needs to interact with. Stacktape automatically injects environment variables with connection details for each specified resource. * * Environment variable names are in the format `STP_[RESOURCE_NAME]_[VARIABLE_NAME]` (e.g., `STP_MY_DATABASE_CONNECTION_STRING`). * * **Injected Variables by Resource Type:** * - **`Bucket`**: `NAME`, `ARN` * - **`DynamoDbTable`**: `NAME`, `ARN`, `STREAM_ARN` * - **`MongoDbAtlasCluster`**: `CONNECTION_STRING` * - **`RelationalDatabase`**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`. For Aurora clusters, `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, and `READER_HOST` are also included. * - **`RedisCluster`**: `HOST`, `READER_HOST`, `PORT` * - **`EventBus`**: `ARN` * - **`Function`**: `ARN` * - **`BatchJob`**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN` * - **`UserAuthPool`**: `ID`, `CLIENT_ID`, `ARN` * - **`SnsTopic`**: `ARN`, `NAME` * - **`SqsQueue`**: `ARN`, `NAME`, `URL` * - **`UpstashKafkaTopic`**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL` * - **`UpstashRedis`**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL` * - **`PrivateService`**: `ADDRESS` * - **`WebService`**: `URL` */ connectTo?: string[]; /** * #### Environment Variables * * --- * * A list of environment variables to pass to the script or command. * * Values can be: * - A static string, number, or boolean. * - The result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives). * - A reference to another resource's parameter using the [`$ResourceParam` directive](https://docs.stacktape.com/configuration/referencing-parameters/). * - A value from a [secret](https://docs.stacktape.com/resources/secrets/) using the [`$Secret` directive](https://docs.stacktape.com/configuration/directives/#secret). */ environment?: EnvironmentVar[]; /** * #### Assume Role of Resource * * --- * * The name of a deployed resource whose IAM role the script should assume. This grants the script the same permissions as the specified resource. * * The resource must be deployed before the script is executed. Stacktape injects temporary AWS credentials as environment variables, which are automatically used by most AWS SDKs and CLIs. * * **Supported Resource Types:** * - `function` * - `batch-job` * - `worker-service` * - `web-service` * - `private-service` * - `multi-container-workload` * - `nextjs-web` */ assumeRoleOfResource?: string; } export interface DirectiveDefinition { /** * #### Directive Name * * --- * * The name of the custom directive. */ name: string; /** * #### File Path * * --- * * The path to the file where the directive is defined, in the format `{file-path}:{handler}`. * * If the `{handler}` is omitted: * - For `.js` and `.ts` files, the `default` export is used. * - For `.py` files, the `main` function is used. */ filePath: string; } /** * #### Configures deployment-related aspects for this stack. */ export interface DeploymentConfig { /** * #### Enable Termination Protection * * --- * * If `true`, protects the stack from accidental deletion. You must disable this protection before you can delete the stack. */ terminationProtection?: boolean; /** * << Description missing. Will be provided soon. >> */ cloudformationRoleArn?: string; /** * #### Rollback Alarms * * --- * * A list of alarms that will trigger a rollback if they enter the `ALARM` state during a stack deployment. * * You can specify an alarm by its name (if defined in the `alarms` section) or by its ARN. * * > An alarm must exist before the deployment starts to be used as a rollback trigger. If you specify a newly created alarm, it will only be used in subsequent deployments. */ triggerRollbackOnAlarms?: string[]; /** * #### Post-Deployment Monitoring Time * * --- * * The amount of time (in minutes) that CloudFormation should monitor the stack and rollback alarms after a deployment. * * If a rollback alarm is triggered or the update is canceled during this period, the stack will be rolled back. */ monitoringTimeAfterDeploymentInMinutes?: number; /** * #### Disable Auto-Rollback * * --- * * If `true`, disables automatic rollback on deployment failure. * * - **With auto-rollback (default):** If a deployment fails, the stack is automatically rolled back to the last known good state. * - **Without auto-rollback:** If a deployment fails, the stack remains in the `UPDATE_FAILED` state. You can then either fix the issues and redeploy or manually roll back using the `stacktape rollback` command. */ disableAutoRollback?: boolean; /** * << Description missing. Will be provided soon. >> */ publishEventsToArn?: string[]; /** * #### Previous Versions to Keep * * --- * * The number of previous deployment artifact versions (functions, images, templates) to keep. */ previousVersionsToKeep?: number; /** * #### Disable S3 Transfer Acceleration * * --- * * If `true`, disables the use of S3 Transfer Acceleration for uploading deployment artifacts. * * S3 Transfer Acceleration can improve upload times and security by routing uploads through the nearest AWS edge location. It may incur minor additional costs. */ disableS3TransferAcceleration?: boolean; } /** * #### Configures other, uncategorized aspects of this stack. */ export interface StackConfig { /** * #### Stack Outputs * * --- * * A list of custom outputs for your stack, such as API Gateway URLs, database endpoints, or resource ARNs. These outputs are often dynamically generated by AWS and are only known after deployment. */ outputs?: StackOutput[]; /** * #### Stack Tags * * --- * * A list of tags to apply to the stack. These tags are propagated to all supported AWS resources created in the stack and can help with cost allocation and resource management. You can specify a maximum of 45 tags. */ tags?: CloudformationTag[]; /** * #### Disable Stack Info Saving * * --- * * If `true`, disables saving information about the deployed stack to a local file after each deployment. * * By default, stack information is saved to `.stacktape-stack-info/<<stack-name>>.json`. */ disableStackInfoSaving?: boolean; /** * #### Stack Info Directory * * --- * * The directory where information about deployed stacks will be saved. */ stackInfoDirectory?: string; } export interface StackOutput { /** * #### Output Name * * --- * * The name of the stack output. */ name: string; /** * #### Output Value * * --- * * The value of the stack output. */ value: string; /** * #### Output Description * * --- * * A human-readable description of the stack output. */ description?: string; /** * #### Export Output * * --- * * If `true`, exports the stack output so it can be referenced by other stacks using the [`$CfStackOutput` directive](https://docs.stacktape.com/configuration/directives#cf-stack-output). */ export?: boolean; } export interface CloudformationTag { /** * #### Tag Name * * --- * * The name of the tag. It must be between 1 and 128 characters and can contain Unicode letters, digits, whitespace, and the characters `_`, `.`, `/`, `=`, `+`, `-`. */ name: string; /** * #### Tag Value * * --- * * The value of the tag. It must be between 1 and 256 characters. */ value: string; } /** * #### A resource for running and managing containerized applications. * * --- * * A multi-container workload is a fully managed, auto-scaling, and easy-to-use runtime for your Docker containers. * It allows you to run one or more containers together with shared resources. */ export interface ContainerWorkload { type: "multi-container-workload"; properties: ContainerWorkloadProps; /** * #### Resource Overrides * * --- * * Overrides properties of the underlying CloudFormation resources that Stacktape creates. * * Child resources are identified by their CloudFormation logical ID (e.g., `MyBucketBucket`). You can find these IDs by running `stacktape stack:info --detailed`. * * For a list of properties that can be overridden, refer to the [AWS CloudFormation documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). */ overrides?: { [k: string]: { [k: string]: unknown; }; }; } export interface ContainerWorkloadProps { /** * #### A list of containers that will run in this workload. * * --- * * A workload can consist of one or more containers. Containers within the same workload share computing resources and scale together as a single unit. */ containers: ContainerWorkloadContainer[]; resources: ContainerWorkloadResourcesConfig; scaling?: ContainerWorkloadScaling; deployment?: ContainerWorkloadDeploymentConfig; /** * #### Enables interactive shell access to running containers. * * --- * * When enabled, you can use the `stacktape container:session` command to get a shell inside a running container. * This is useful for debugging and inspecting your application in a live environment. * It uses AWS ECS Exec and SSM Session Manager for secure connections. */ enableRemoteSessions?: boolean; /** * #### Connect To * * --- * * Configures access to other resources in your stack and AWS services. By specifying resources here, Stacktape automatically: * - Configures IAM role permissions. * - Sets up security group rules to allow network traffic. * - Injects environment variables with connection details into the compute resource. * * Environment variables are named `STP_[RESOURCE_NAME]_[VARIABLE_NAME]` (e.g., `STP_MY_DATABASE_CONNECTION_STRING`). * * --- * * #### Granted Permissions and Injected Variables: * * **`Bucket`** * - **Permissions:** List, create, get, delete, and tag objects. * - **Variables:** `NAME`, `ARN` * * **`DynamoDbTable`** * - **Permissions:** Get, put, update, delete, scan, and query items; describe table stream. * - **Variables:** `NAME`, `ARN`, `STREAM_ARN` * * **`MongoDbAtlasCluster`** * - **Permissions:** Allows connection to clusters with `accessibilityMode` set to `scoping-workloads-in-vpc`. Creates a temporary user for secure, credential-less access. * - **Variables:** `CONNECTION_STRING` * * **`RelationalDatabase`** * - **Permissions:** Allows connection to databases with `accessibilityMode` set to `scoping-workloads-in-vpc`. * - **Variables:** `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`. For Aurora clusters, `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, and `READER_HOST` are also included. * * **`RedisCluster`** * - **Permissions:** Allows connection to clusters with `accessibilityMode` set to `scoping-workloads-in-vpc`. * - **Variables:** `HOST`, `READER_HOST`, `PORT` * * **`EventBus`** * - **Permissions:** Publish events. * - **Variables:** `ARN` * * **`Function`** * - **Permissions:** Invoke the function, including via its URL if enabled. * - **Variables:** `ARN` * * **`BatchJob`** * - **Permissions:** Submit, list, describe, and terminate jobs; manage state machine executions. * - **Variables:** `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN` * * **`UserAuthPool`** * - **Permissions:** Full control over the user pool (`cognito-idp:*`). * - **Variables:** `ID`, `CLIENT_ID`, `ARN` * * **`SnsTopic`** * - **Permissions:** Confirm, list, publish, and manage subscriptions. * - **Variables:** `ARN`, `NAME` * * **`SqsQueue`** * - **Permissions:** Send, receive, delete, and purge messages. * - **Variables:** `ARN`, `NAME`, `URL` * * **`UpstashKafkaTopic`** * - **Variables:** `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL` * * **`UpstashRedis`** * - **Variables:** `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL` * * **`PrivateService`** * - **Variables:** `ADDRESS` * * **`aws:ses` (Macro)** * - **Permissions:** Full permissions for AWS SES (`ses:*`). */ connectTo?: string[]; /** * #### Custom IAM Role Statements * * --- * * A list of raw AWS IAM role statements to append to the resource's role, allowing for fine-grained permission control. */ iamRoleStatements?: StpIamRoleStatement[]; } export interface ContainerWorkloadContainer { /** * #### Configures how this container receives traffic and events. * * --- * * Event integrations allow containers to: * - Receive requests from load balancers. * - Communicate with other containers in the same workload. */ events?: ( | ContainerWorkloadLoadBalancerIntegration | ContainerWorkloadHttpApiIntegration | ContainerWorkloadInternalIntegration | ContainerWorkloadServiceConnectIntegration | ContainerWorkloadNetworkLoadBalancerIntegration )[]; loadBalancerHealthCheck?: LoadBalancerHealthCheck; /** * #### A unique name for the container within the workload. */ name: string; /** * #### Configures the container image. */ packaging: | StpBuildpackCwImagePackaging | ExternalBuildpackCwImagePackaging | PrebuiltCwImagePackaging | CustomDockerfileCwImagePackaging | NixpacksCwImagePackaging; /** * #### Determines if this container is critical for the workload's health. * * --- * * If an essential container fails or stops, all other containers in the same workload instance are also stopped. The entire instance is then terminated and replaced. */ essential?: boolean; logging?: ContainerWorkloadContainerLogging; /** * #### A list of other containers that this container depends on to start. * * --- * * This defines the startup order for containers within the workload. * For example, you can require a database container to be `HEALTHY` before your application container starts. */ dependsOn?: ContainerDependency[]; /** * #### A list of environment variables to inject into the container. * * --- * * Environment variables are ideal for providing configuration details to your container, such as database connection strings, API keys, or other dynamic parameters. */ environment?: EnvironmentVar[]; internalHealthCheck?: ContainerHealthCheck; /** * #### The time (in seconds) to wait before forcefully killing the container if it doesn't shut down gracefully. * * --- * * When a container is stopped, it first receives a `SIGTERM` signal. If it doesn't exit within this timeout, it will be sent a `SIGKILL` signal. * This allows for graceful shutdown procedures. The timeout must be between 2 and 120 seconds. */ stopTimeout?: number; /** * #### A list of file system volumes to mount to the container. * * --- * * Volumes provide persistent storage that can be shared across multiple containers and persists even if the container is stopped or replaced. * Currently, only EFS (Elastic File System) volumes are supported. */ volumeMounts?: ContainerEfsMount[]; } /** * #### Triggers a container when a request matches the specified conditions on an Application Load Balancer. * * --- * * You can route requests based on HTTP method, path, headers, query parameters, and source IP address. */ export interface ContainerWorkloadLoadBalancerIntegration { type: "application-load-balancer"; /** * #### Properties of the integration */ properties: { /** * #### The container port that will receive traffic from the load balancer. */ containerPort: number; /** * #### The name of the Application Load Balancer. * * --- * * This must reference a load balancer defined in your Stacktape configuration. */ loadBalancerName: string; /** * #### The port of the load balancer listener to attach to. * * --- * * You only need to specify this if the load balancer uses custom listeners. */ listenerPort?: number; /** * #### The priority of this integration rule. * * --- * * Load balancer rules are evaluated in order from the lowest priority to the highest. * The first rule that matches an incoming request will handle it. */ priority: number; /** * #### A list of URL paths that will trigger this integration. * * --- * * The request will be routed if its path matches any of the paths in this list. * The comparison is case-sensitive and supports `*` and `?` wildcards. * * Example: `/users`, `/articles/*` */ paths?: string[]; /** * #### A list of HTTP methods that will trigger this integration. * * --- * * Example: `GET`, `POST`, `DELETE` */ methods?: string[]; /** * #### A list of hostnames that will trigger this integration. * * --- * * The hostname is parsed from the `Host` header of the request. * Wildcards (`*` and `?`) are supported. * * Example: `api.example.com`, `*.myapp.com` */ hosts?: string[]; /** * #### A list of header conditions that the request must match. * * --- * * All header conditions must be met for the request to be routed. */ headers?: LbHeaderCondition[]; /** * #### A list of query parameter conditions that the request must match. * * --- * * All query parameter conditions must be met for the request to be routed. */ queryParams?: LbQueryParamCondition[]; /** * #### A list of source IP addresses (in CIDR format) that are allowed to trigger this integration. * * --- * * > **Note:** If the client is behind a proxy, this will be the IP address of the proxy. */ sourceIps?: string[]; }; } export interface LbHeaderCondition { /** * #### The name of the HTTP header. */ headerName: string; /** * #### A list of allowed values for the header. * * --- * * The condition is met if the header's value in the incoming request matches any of the values in this list. The comparison is case-insensitive. */ values: string[]; } export interface LbQueryParamCondition { /** * #### The name of the query parameter. */ paramName: string; /** * #### A list of allowed values for the query parameter. * * --- * * The condition is met if the query parameter's value in the incoming request matches any of the values in this list. The comparison is case-insensitive. */ values: string[]; } /** * #### Triggers a container when an HTTP API Gateway receives a matching request. * * --- * * You can route requests based on HTTP method and path. */ export interface ContainerWorkloadHttpApiIntegration { type: "http-api-gateway"; /** * #### Properties of the integration */ properties: { /** * #### The container port that will receive traffic from the API Gateway. */ containerPort: number; /** * #### The name of the HTTP API Gateway. */ httpApiGatewayName: string; /** * #### The HTTP method that will trigger this integration. * * --- * * You can specify an exact method (e.g., `GET`) or use `*` to match any method. */ method: "*" | "DELETE" | "GET" | "HEAD" | "OPTIONS" | "PATCH" | "POST" | "PUT"; /** * #### The URL path that will trigger this integration. * * --- * * - **Exact path**: `/users` * - **Path with parameter**: `/users/{id}`. The `id` will be available in `event.pathParameters.id`. * - **Greedy path**: `/files/{proxy+}`. This will match any path starting with `/files/`. */ path: strin