@stacktape/sdk
Version:
DevOps-free cloud development framework.
1,092 lines (1,088 loc) • 992 kB
TypeScript
// @ts-nocheck
/* eslint-disable */
export type LoggableEventType =
| "ANALYZE_DEPENDENCIES"
| "ANALYZE_PROJECT"
| "BUILD_CODE"
| "BUILD_IMAGE"
| "BUILD_NEXTJS_PROJECT"
| "BUNDLING_NEXTJS_FUNCTIONS"
| "CALCULATE_CHANGES"
| "CALCULATE_CHECKSUM"
| "CALCULATE_SIZE"
| "CLEANUP"
| "CREATE_DOCKERFILE"
| "CREATE_RESOURCES_FOR_ARTIFACTS"
| "DEBUG"
| "DELETE_ARTIFACTS"
| "DELETE_OBSOLETE_ARTIFACTS"
| "DELETE_STACK"
| "DEPLOY"
| "FETCH_BUDGET_INFO"
| "FETCH_DOMAIN_STATUSES"
| "FETCH_EC2_INFO"
| "FETCH_MAIL_INFO"
| "FETCH_OPENSEARCH_INFO"
| "FETCH_PREVIOUS_ARTIFACTS"
| "FETCH_STACK_DATA"
| "FETCH_USERS_FROM_USERPOOL"
| "GENERATE_AI_RESPONSE"
| "HOTSWAP_UPDATE"
| "INJECT_ENVIRONMENT"
| "INSTALL_DEPENDENCIES"
| "INVALIDATE_CACHE"
| "LOAD_AWS_CREDENTIALS"
| "LOAD_CONFIG_FILE"
| "LOAD_PROVIDER_CREDENTIALS"
| "LOAD_TARGET_STACK_INFO"
| "LOAD_USER_DATA"
| "PACKAGE_ARTIFACTS"
| "PREPARE_PIPELINE"
| "REBUILD_CODE"
| "REFETCH_STACK_DATA"
| "REGISTER_CF_PRIVATE_TYPES"
| "REGISTER_ECS_TASK_DEFINITION"
| "REPACKAGE_ARTIFACTS"
| "RESOLVE_CONFIG"
| "RESOLVE_DEPENDENCIES"
| "ROLLBACK_STACK"
| "START_DEPLOYMENT"
| "SYNC_BUCKET"
| "UPDATE_ECS_SERVICE"
| "UPDATE_FUNCTION_CODE"
| "UPDATE_STACK"
| "UPLOAD_BUCKET_CONTENT"
| "UPLOAD_DEPLOYMENT_ARTIFACTS"
| "UPLOAD_IMAGE"
| "UPLOAD_PACKAGE"
| "UPLOAD_PROJECT"
| "VALIDATE_CONFIG_TEMP"
| "VALIDATE_TEMPLATE"
| "ZIP_PACKAGE"
| "ZIP_PROJECT";
export type EventLogEntryType = "FINISH" | "START" | "UPDATE";
export interface EventLogEntry {
eventType: LoggableEventType;
captureType: EventLogEntryType;
timestamp: number;
data: unknown;
description: string;
additionalMessage?: string;
finalMessage?: string;
childEvents: {
[k: string]: ChildEventLogEntry[];
};
}
export interface ChildEventLogEntry {
data: unknown;
description: string;
timestamp: number;
eventType: LoggableEventType;
captureType: EventLogEntryType;
additionalMessage?: string;
finalMessage?: string;
}
export type AwsRegion =
| "af-south-1"
| "ap-east-1"
| "ap-northeast-1"
| "ap-northeast-2"
| "ap-northeast-3"
| "ap-south-1"
| "ap-southeast-1"
| "ap-southeast-2"
| "ca-central-1"
| "eu-central-1"
| "eu-north-1"
| "eu-south-1"
| "eu-west-1"
| "eu-west-2"
| "eu-west-3"
| "me-south-1"
| "sa-east-1"
| "us-east-1"
| "us-east-2"
| "us-west-1"
| "us-west-2";
export type StacktapeResourceDefinition =
| RelationalDatabase
| ContainerWorkload
| BatchJob
| WebService
| PrivateService
| WorkerService
| ApplicationLoadBalancer
| NetworkLoadBalancer
| HttpApiGateway
| Bucket
| UserAuthPool
| EventBus
| Bastion
| DynamoDbTable
| StateMachine
| MongoDbAtlasCluster
| RedisCluster
| CustomResourceInstance
| CustomResourceDefinition
| UpstashRedis
| DeploymentScript
| AwsCdkConstruct
| SqsQueue
| SnsTopic
| HostingBucket
| WebAppFirewall
| NextjsWeb
| OpenSearchDomain
| EfsFilesystem
| LambdaFunction
| EdgeLambdaFunction;
export type RelationalDatabaseAlarmTrigger =
| RelationalDatabaseReadLatencyTrigger
| RelationalDatabaseWriteLatencyTrigger
| RelationalDatabaseCPUUtilizationTrigger
| RelationalDatabaseFreeStorageTrigger
| RelationalDatabaseFreeMemoryTrigger
| RelationalDatabaseConnectionCountTrigger;
export type AlarmUserIntegration = MsTeamsIntegration | SlackIntegration | EmailIntegration;
export type HttpMethod = "*" | "DELETE" | "GET" | "HEAD" | "OPTIONS" | "PATCH" | "POST" | "PUT";
export type ApplicationLoadBalancerAlarmTrigger =
| ApplicationLoadBalancerErrorRateTrigger
| ApplicationLoadBalancerCustomTrigger;
export type HttpApiGatewayAlarmTrigger = HttpApiGatewayErrorRateTrigger | HttpApiGatewayLatencyTrigger;
/**
* #### Configures what type of load balancing mechanism is used.
* ---
* - Supported types are: `service-connect` and `application-load-balancer`.
*
* `service-connect`
* - distributes traffic to the available containers evenly
* - connections are only possible from other container-based resources of the stack (web-services, worker-services, multi-container-workloads)
* - support any TCP protocol
* - this option is vastly cheaper - you only pay $0.5 a month for background resource (private cloud map dns namespace)
*
* `application-load-balancer`
* - distributes traffic to the available containers in a round robin fashion
* - supports HTTP protocol only
* - uses pricing which is combination of flat hourly charge(\~$0.0252/hour) and used LCUs(Load Balancer Capacity Units)(\~$0.08/hour)
* - is eligible for free tier, for better understanding of pricing refer to [AWS docs](https://aws.amazon.com/elasticloadbalancing/pricing/)
*/
export type PrivateServiceLoadBalancing = {
type: "application-load-balancer" | "service-connect";
} & string;
export type AllowedOauthFlow = "client_credentials" | "code" | "implicit";
export type State = Choice | Fail | StateMachineMap | Parallel | Pass | Succeed | Task | Wait;
export type SqsQueueAlarmTrigger = SqsQueueReceivedMessagesCountTrigger | SqsQueueNotEmptyTrigger;
export type LambdaAlarmTrigger = LambdaErrorRateTrigger | LambdaDurationTrigger;
export type ValueNumber = IntrinsicFunction | number;
export type ValueString = IntrinsicFunction | string;
export type DeletionPolicy = "Delete" | "Retain" | "Snapshot";
export type ValueBoolean = IntrinsicFunction | boolean;
export type ListString = string[] | IntrinsicFunction;
export interface StacktapeConfig {
/**
* #### Name of this service.
* ---
* > Using serviceName is deprecated. Use option "--projectName [project_name]" instead
* - Cloudformation stack name for the stack deployed with this config will have form: {serviceName}-{stage}.
* - Must be alphanumeric with dashes (Must match regex [a-zA-Z][-a-zA-Z0-9]*)
*/
serviceName?: string;
/**
* #### Configuration for 3rd party service providers.
*/
providerConfig?: {
mongoDbAtlas?: MongoDbAtlasProvider;
upstash?: UpstashProvider;
};
/**
* #### Defines variables that can be later used in the configuration using `$Var().{variable-name}` directive.
* ---
* - Variables can be accessed using `$Var().{variable-name}`
* - Variables are helpful, when you want to use the same value for multiple properties in your configuration.
* - Example variable: `dbAddress: $ResourceParam('myDatabase', 'host')`
*/
variables?: {
[k: string]: unknown;
};
budgetControl?: BudgetControl;
hooks?: Hooks;
/**
* #### List of script definitions
* ---
* - **Scripts** are used to specify and execute your custom scripts. Specifying a script in the Stacktape config can be beneficial for multiple reasons:
* - scripts will be easily reusable by all members of your team,
* - scripts can be executed automatically within lifecycle [hooks](https://docs.stacktape.com/configuration/hooks/) (before/after deploy/delete etc.)
* or manually using [script:run command](https://docs.stacktape.com/cli/commands/script-run/),
* - you can use `connectTo` property to easily inject environment variables needed for connecting to resources of your stack,
* - you can leverage bastion scripts and bastion tunneling to access resources which are only accessible within VPC.
* - There are 3 types of scripts and based on the type of script there are differences on how the script is executed:
* 1. [local-script](https://docs.stacktape.com/configuration/scripts/#local-script) - script is executed locally (from the same host from which the stacktape command is being executed),
* 2. [local-script-with-bastion-tunneling](https://docs.stacktape.com/configuration/scripts/#local-script-with-bastion-tunneling) - same as `local-script`, and additionally: Connections to selected resources listed in **connectTo** list
* are tunneled through bastion resource of your stack. This allows you to connect to resources (databases, redis-clusters...)
* which are only accessible within VPC from your local script (**requires bastion resource**),
* 3. [bastion-script](https://docs.stacktape.com/configuration/scripts/#bastion-tunnel) - script is executed on the bastion host (**requires bastion resource**).
* - Scripts can specify either execute shell commands or scripts written in `Javascript`,`Typescript` or `Python`.
*/
scripts?: {
[k: string]: LocalScript | BastionScript | LocalScriptWithBastionTunneling;
};
/**
* #### Configures custom, user-defined directives that can be used in this configuration.
* ---
* - Directives can be used to dynamically configure certain aspects of this stack.
*/
directives?: DirectiveDefinition[];
deploymentConfig?: DeploymentConfig;
stackConfig?: StackConfig;
/**
* #### Infrastructure resources that make up your stack.
* ---
* - Every resource consists of multiple (sometimes more than 10) underlying AWS resources.
* - To see all resources in this stack (with their underlying AWS Cloudformation resources, etc.) use `stacktape stack-info --detailed` command.
* - Every resource specified here counts towards your resources limit.
*/
resources: {
[k: string]: StacktapeResourceDefinition;
};
/**
* #### Raw cloudformation resources that will be deployed in this stack.
* ---
* - These resources will be merged with Stacktape resources.
* - Every cloudformation resource consists of its name (logical name) and definition.
* - To avoid logical name conflicts, you can see all logical names for all resources deployed by Stacktape using `stacktape stack-info --detailed` command.
* - Resources specified here does not count towards your resources limit.
* - You can see a list of all AWS cloudformation supported resources here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html
*/
cloudformationResources?: {
[k: string]: Default;
};
}
export interface MongoDbAtlasProvider {
/**
* #### MongoDb Atlas public API key
* ---
* - You can get API Keys for you organization by following
* [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/)
*/
publicKey?: string;
/**
* #### MongoDb Atlas private API key
* ---
* - You can get API Keys for you organization by following
* [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/)
* - For security reasons, you should store your credentials as secrets. To learn more about secrets, refer to
* [secrets guide](https://docs.stacktape.com/resources/secrets/)
*/
privateKey?: string;
/**
* #### MongoDB Atlas Organization Id
* ---
* - You can get organization ID for you organization by following
* [guide in the docs](https://docs.stacktape.com/user-guides/mongo-db-atlas-credentials/)
*/
organizationId?: string;
accessibility?: MongoDbAtlasAccessibility;
}
/**
* #### Configures connectivity settings of the MongoDB Atlas project
* ---
* - If your stack contains a **MongoDb Atlas cluster**, Stacktape automatically creates
* **MongoDb Atlas Project** associated with the stack.
* - All of the **MongoDb Atlas clusters** are deployed within this project
* - Network connectivity to clusters is configured on a per project level. As a result, accessibility settings here are
* applied to all MongoDb Atlas clusters within your stack.
*/
export interface MongoDbAtlasAccessibility {
/**
* #### Configures the accessibility mode for this database
* ---
* The following modes are supported:
* - **internet** - Least restrictive mode. The database can be accessed from anywhere on the internet.
* - **vpc** - The database can be accessed only from resources within your VPC. This
* means any [function](https://docs.stacktape.com/compute-resources/lambda-functions) (provided it has `joinDefaultVpc` set to true),
* [batch job](https://docs.stacktape.com/compute-resources/batch-jobs),
* [container workload](https://docs.stacktape.com/compute-resources/multi-container-workloads) or container service
* within your stack can access the cluster (if it also has required credentials).
* Additionally, IP addresses configured in `whitelistedIps` can also access the database (even from the internet).
* - **scoping-workloads-in-vpc** - similar to **vpc** mode, but even more restrictive. In addition to being in the same VPC, the resources of your stack
* accessing the cluster must also have sufficient security group permissions (for functions, batch jobs and container services, these permissions
* can be granted together with IAM permissions using `connectTo` in their configuration). Additionally, IP addresses configured in `whitelistedIps`
* can also access the cluster (even from the internet).
* - **whitelisted-ips-only** - The cluster can only be accessed from an IP addresses and CIDR blocks listed in the `whitelistedIps` list.
*
* To learn more about VPCs, refer to [VPC Docs](https://docs.stacktape.com/user-guides/vpcs/).
*/
accessibilityMode: "internet" | "scoping-workloads-in-vpc" | "vpc" | "whitelisted-ips-only";
/**
* #### List of IP addresses or IP ranges (in CIDR format)
* ---
* The behavior of this property varies based on `accessibilityMode`:
* - in the **internet** mode, this property has no effect as the database is are already accessible from everywhere.
* - in the **vpc** mode and **scoping-workloads-in-vpc** mode, these IP addresses/ranges can be used to
* allow access from a specific addresses outside of the VPC (i.e IP address of your office).
* - in the **whitelisted-ips-only** mode, these addresses/ranges are the only addresses that can access the database.
*/
whitelistedIps?: string[];
}
export interface UpstashProvider {
/**
* #### Email address associated with the Upstash account
* ---
* - Operations (database creation/update/delete) will be done on behalf of this account.
*/
accountEmail: string;
/**
* #### Api key associated with the account or the team.
* ---
* - You can create api key in your [Upstash console](https://console.upstash.com/account/api)
*/
apiKey: string;
}
/**
* #### Configures monthly budget and notifications for the stack
* ---
* - Budget control allows you to watch your spending, and configure email notifications when thresholds are met.
* - The budget is reset each calendar month, meaning: at the beginning of each month, the spend is reset to 0.
*/
export interface BudgetControl {
/**
* #### The amount of cost (in USD) that you want to track with budget.
* ---
* - Percentage thresholds for configured notifications are relative to this limit.
*/
limit: number;
/**
* #### Notification that are sent when a notification condition is met
* ---
* - Notification sends email(s) to specified email recipients when the notification condition is met.
* - Notifications are triggered:
* - when the **actual** spend is over specified threshold.
* - when the **forecasted** spend is forecasted to get over specified threshold.
* - You can configure up to 5 notifications per stack.
*/
notifications?: BudgetNotification[];
}
export interface BudgetNotification {
/**
* #### Whether the notification applies to how much you have spent (**ACTUAL**) or to how much you are forecasted to spend (**FORECASTED**).
* ---
* - **FORECASTED** - A forecast is a prediction of how much you will use AWS services over the following month. This forecast is based on your past usage.
* - **ACTUAL** - An actual budget that you already spent in this month (as billed by AWS).
*
* > WARNING: AWS requires approximately 5 weeks of usage data to generate budget forecasts. If you set a notification
* > to trigger based on a **FORECASTED** amount, this notification isn't triggered until you have enough historical usage information.
* > Therefore we advise to use forecast notifications mainly for long running production stacks.
* > To learn more about budgets, refer to [AWS docs](https://docs.aws.amazon.com/cost-management/latest/userguide/budgets-best-practices.html#budgets-best-practices-alerts)
*/
budgetType?: "ACTUAL" | "FORECASTED";
/**
* #### The notification is triggered when this threshold is reached
* ---
* - Example: When you set `limit` to $**200** and `thresholdPercentage` to **80** percent, the notification is triggered when the spend goes over $160 (80% of 200).
*/
thresholdPercentage?: number;
/**
* #### List of email recipients that will receive the notification
* ---
* - You can specify up to 10 email recipients per notification
*/
emails: string[];
}
/**
* #### Configures hooks executed before/after specified commands.
* ---
* - Hooks are used to automatically execute scripts from `scripts` section.
*/
export interface Hooks {
/**
* #### Executed before `deploy` and `codebuild:deploy` commands
* ---
* - When using `codebuild:deploy` hooks are executed within codebuild environment(not locally).
*/
beforeDeploy?: NamedScriptLifecycleHook[];
/**
* #### Executed after `deploy` and `codebuild:deploy` commands
* ---
* - When using `codebuild:deploy` hooks are executed within codebuild environment(not locally).
*/
afterDeploy?: NamedScriptLifecycleHook[];
/**
* #### Executed before `delete` command
* * ---
* - before delete scripts are executed only if you provide config path (`--configPath`) and stage (`--stage`) when running `delete` command.
*/
beforeDelete?: NamedScriptLifecycleHook[];
/**
* #### Executed after `delete` command
* ---
* - After delete scripts are executed only if you provide config path (`--configPath`) and stage (`--stage`) when running `delete` command.
*/
afterDelete?: NamedScriptLifecycleHook[];
/**
* #### Executed before `bucket:sync` command
*/
beforeBucketSync?: NamedScriptLifecycleHook[];
/**
* #### Executed after `bucket:sync` command
*/
afterBucketSync?: NamedScriptLifecycleHook[];
/**
* #### Executed before `dev` command
*/
beforeDev?: NamedScriptLifecycleHook[];
/**
* #### Executed after `dev` command
*/
afterDev?: NamedScriptLifecycleHook[];
}
export interface NamedScriptLifecycleHook {
/**
* #### Name of the script (from scripts section) to execute
* ---
* - The specified script must be defined in the `scripts` section.
*/
scriptName: string;
/**
* #### If set to true, skips execution of the hook when stacktape is running on CI server(CODEBUILD, GitHub action, Gitlab CI etc...)
* ---
* - This is useful if you wish to execute the scripts only when running locally
*/
skipOnCI?: boolean;
/**
* #### If set to true, hook is only executed when stacktape is running on CI server(CODEBUILD, GitHub action, Gitlab CI etc...), but not on local workstation
* ---
* - This is useful if you wish to execute the scripts only when running on CI server
*/
skipOnLocal?: boolean;
}
export interface LocalScript {
type: "local-script";
properties: LocalScriptProps;
}
export interface LocalScriptProps {
/**
* #### Path to the script to execute
* ---
* - The script can be written in **Javascript**, **Typescript** or **Python**.
* - The script is executed in a separate process.
* - The script is executed using an executable configured using `defaults:configure` command or a default
* executable on your machine:
* - `node` for Javascript and Typescript
* - `python` for Python
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeScript?: string;
/**
* #### Terminal command to execute
* ---
* - Executes the specified command in a separate shell process. Uses `/bin/bash` on UNIX systems and default shell(usually `cmd.exe`) on Windows systems.
* - The command will be executed on the machine running the Stacktape command.
* If the command works on your machine, it doesn't mean it works for people or machines with different OSes or shells.
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeCommand?: string;
/**
* #### List of scripts to execute
* ---
* - Scripts are executed in sequential order.
* - Script can be written in **Javascript**, **Typescript** or **Python**.
* - Each script is executed in a separate process.
* - Each script is executed using an executable configured using `defaults:configure` command or a default
* executable on your machine:
* - `node` for Javascript and Typescript
* - `python` for Python
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeScripts?: string[];
/**
* #### List of terminal commands to execute
* ---
* - Commands are executed in sequential order.
* - Each command is executed in a separate shell process. Uses `/bin/bash` on UNIX systems and default shell(usually `cmd.exe`) on Windows systems.
* - Commands will be executed on the machine running the Stacktape command.
* If a command works on your machine, it doesn't mean it works for people or machines with different OSes or shells.
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeCommands?: string[];
/**
* #### Directory where the command will be executed.
* ---
* - By default, the script is executed from the directory where the stacktape command was executed.
*/
cwd?: string;
/**
* #### Pipes stdio (standard input/output) of the hook process to the main process
* ---
* - This allows you to see logs (stdout/stderr) produced by your hook and respond to prompts.
*/
pipeStdio?: boolean;
/**
* #### List of resources with which the script will interact
* ---
* By referencing resources in `connectTo` list, Stacktape automatically injects environment variables (containing information about resources in the list) into the script:
* - names of environment variables use upper-snake-case and are in form **STP_`[RESOURCE_NAME]`_`[VARIABLE_NAME]`**,
* - examples: `STP_MY_DATABASE_CONNECTION_STRING` or `STP_MY_EVENT_BUS_ARN`,
*
* <br/>
*
* List of the injected environment variables depends on the resource type:
* - **Bucket**: `NAME`, `ARN`
* - **DynamoDB table**: `NAME`, `ARN`, `STREAM_ARN`
* - **MongoDB Atlas cluster**: `CONNECTION_STRING`
* - **Relational(SQL) database**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`
* (in case of aurora multi instance cluster additionally: `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, `READER_HOST`)
* - **Redis cluster**: `HOST`, `READER_HOST`, `PORT`
* - **Event bus**: `ARN`
* - **Function**: `ARN`
* - **Batch job**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN`
* - **User auth pool**: `ID`, `CLIENT_ID`, `ARN`
* - **SNS Topic**: `ARN`, `NAME`
* - **SQS Queue**: `ARN`, `NAME`, `URL`
* - **Upstash Kafka topic**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL`
* - **Upstash Redis**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL`
* - **Private service**: `ADDRESS`
* - **Web service**: `URL`
*/
connectTo?: string[];
/**
* #### Environment variables passed to the script/command.
* ---
* Most commonly used types of environment variables:
* - Static - string, number or boolean (will be stringified).
* - Result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives).
* - Referenced property of another resource (using **$ResourceParam** directive). To learn more, refer to
* [referencing parameters guide](https://docs.stacktape.com/configuration/referencing-parameters/).
* If you are using environment variables to inject information about resources into your script, see also property [connectTo](https://docs.stacktape.com/configuration/scripts/#connecting-to-resources) which simplifies this process.
* - Value of a [secret](https://docs.stacktape.com/resources/secrets/) (using [$Secret directive](https://docs.stacktape.com/configuration/directives/#secret)).
*/
environment?: EnvironmentVar[];
/**
* #### Name of the resource to assume role of
* ---
* - If specified, the script will be executed with the permissions of the specified resource.
* - Resource must already be deployed when the script is executed.
* - On the background, the script is injected with AWS environment variables which are automatically picked up by most of the AWS SDKs and CLIs.
* - Supported resource types include:
* - `function`
* - `batch-job`
* - `worker-service`
* - `web-service`
* - `private-service`
* - `multi-container-workload`
* - `nextjs-web`
*/
assumeRoleOfResource?: string;
}
export interface EnvironmentVar {
/**
* #### Name of the environment variable
*/
name: string;
/**
* #### Value of the environment variable
*/
value: string | number | boolean;
}
export interface BastionScript {
type: "bastion-script";
properties: BastionScriptProps;
}
export interface BastionScriptProps {
/**
* #### Name of the bastion resource on which commands will be executed
*/
bastionResource?: string;
/**
* #### Terminal command to execute
* ---
* - Specified command is executed on the bastion host.
* - Logs from the execution are printed to your terminal.
* - Only one of `executeCommand` or `executeCommands` can be configured.
*/
executeCommand?: string;
/**
* #### List of terminal commands to execute
* ---
* - Specified commands are executed sequentially as a script on the bastion host.
* - Logs from the execution are printed to your terminal.
* - Only one of `executeCommand` or `executeCommands` can be configured.
*/
executeCommands?: string[];
/**
* #### Directory on bastion host where the command will be executed.
* ---
* - By default, the script is executed from the root directory `/`.
*/
cwd?: string;
/**
* #### List of resources with which the script will interact
* ---
* By referencing resources in `connectTo` list, Stacktape automatically injects environment variables (containing information about resources in the list) into the script:
* - names of environment variables use upper-snake-case and are in form **STP_`[RESOURCE_NAME]`_`[VARIABLE_NAME]`**,
* - examples: `STP_MY_DATABASE_CONNECTION_STRING` or `STP_MY_EVENT_BUS_ARN`,
*
* <br/>
*
* List of the injected environment variables depends on the resource type:
* - **Bucket**: `NAME`, `ARN`
* - **DynamoDB table**: `NAME`, `ARN`, `STREAM_ARN`
* - **MongoDB Atlas cluster**: `CONNECTION_STRING`
* - **Relational(SQL) database**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`
* (in case of aurora multi instance cluster additionally: `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, `READER_HOST`)
* - **Redis cluster**: `HOST`, `READER_HOST`, `PORT`
* - **Event bus**: `ARN`
* - **Function**: `ARN`
* - **Batch job**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN`
* - **User auth pool**: `ID`, `CLIENT_ID`, `ARN`
* - **SNS Topic**: `ARN`, `NAME`
* - **SQS Queue**: `ARN`, `NAME`, `URL`
* - **Upstash Kafka topic**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL`
* - **Upstash Redis**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL`
* - **Private service**: `ADDRESS`
* - **Web service**: `URL`
*/
connectTo?: string[];
/**
* #### Environment variables passed to the script/command.
* ---
* Most commonly used types of environment variables:
* - Static - string, number or boolean (will be stringified).
* - Result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives).
* - Referenced property of another resource (using **$ResourceParam** directive). To learn more, refer to
* [referencing parameters guide](https://docs.stacktape.com/configuration/referencing-parameters/).
* If you are using environment variables to inject information about resources into your script, see also property [connectTo](https://docs.stacktape.com/configuration/scripts/#connecting-to-resources) which simplifies this process.
* - Value of a [secret](https://docs.stacktape.com/resources/secrets/) (using [$Secret directive](https://docs.stacktape.com/configuration/directives/#secret)).
*/
environment?: EnvironmentVar[];
/**
* #### Name of the resource to assume role of
* ---
* - If specified, the script will be executed with the permissions of the specified resource.
* - Resource must already be deployed when the script is executed.
* - On the background, the script is injected with AWS environment variables which are automatically picked up by most of the AWS SDKs and CLIs.
* - Supported resource types include:
* - `function`
* - `batch-job`
* - `worker-service`
* - `web-service`
* - `private-service`
* - `multi-container-workload`
* - `nextjs-web`
*/
assumeRoleOfResource?: string;
}
export interface LocalScriptWithBastionTunneling {
type: "local-script-with-bastion-tunneling";
properties: LocalScriptWithBastionTunnelingProps;
}
export interface LocalScriptWithBastionTunnelingProps {
/**
* #### Name of the bastion resource which will be used for tunneling to protected resources.
*/
bastionResource?: string;
/**
* #### Path to the script to execute
* ---
* - The script can be written in **Javascript**, **Typescript** or **Python**.
* - The script is executed in a separate process.
* - The script is executed using an executable configured using `defaults:configure` command or a default
* executable on your machine:
* - `node` for Javascript and Typescript
* - `python` for Python
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeScript?: string;
/**
* #### Terminal command to execute
* ---
* - Executes the specified command in a separate shell process. Uses `/bin/bash` on UNIX systems and default shell(usually `cmd.exe`) on Windows systems.
* - The command will be executed on the machine running the Stacktape command.
* If the command works on your machine, it doesn't mean it works for people or machines with different OSes or shells.
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeCommand?: string;
/**
* #### List of scripts to execute
* ---
* - Scripts are executed in sequential order.
* - Script can be written in **Javascript**, **Typescript** or **Python**.
* - Each script is executed in a separate process.
* - Each script is executed using an executable configured using `defaults:configure` command or a default
* executable on your machine:
* - `node` for Javascript and Typescript
* - `python` for Python
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeScripts?: string[];
/**
* #### List of terminal commands to execute
* ---
* - Commands are executed in sequential order.
* - Each command is executed in a separate shell process. Uses `/bin/bash` on UNIX systems and default shell(usually `cmd.exe`) on Windows systems.
* - Commands will be executed on the machine running the Stacktape command.
* If a command works on your machine, it doesn't mean it works for people or machines with different OSes or shells.
* - Only one of `executeScript`, `executeScripts`, `executeCommand` or `executeCommands` can be configured.
*/
executeCommands?: string[];
/**
* #### Directory where the command will be executed.
* ---
* - By default, the script is executed from the directory where the stacktape command was executed.
*/
cwd?: string;
/**
* #### Pipes stdio (standard input/output) of the hook process to the main process
* ---
* - This allows you to see logs (stdout/stderr) produced by your hook and respond to prompts.
*/
pipeStdio?: boolean;
/**
* #### List of resources with which the script will interact
* ---
* By referencing resources in `connectTo` list, Stacktape automatically injects environment variables (containing information about resources in the list) into the script:
* - names of environment variables use upper-snake-case and are in form **STP_`[RESOURCE_NAME]`_`[VARIABLE_NAME]`**,
* - examples: `STP_MY_DATABASE_CONNECTION_STRING` or `STP_MY_EVENT_BUS_ARN`,
*
* <br/>
*
* List of the injected environment variables depends on the resource type:
* - **Bucket**: `NAME`, `ARN`
* - **DynamoDB table**: `NAME`, `ARN`, `STREAM_ARN`
* - **MongoDB Atlas cluster**: `CONNECTION_STRING`
* - **Relational(SQL) database**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`
* (in case of aurora multi instance cluster additionally: `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, `READER_HOST`)
* - **Redis cluster**: `HOST`, `READER_HOST`, `PORT`
* - **Event bus**: `ARN`
* - **Function**: `ARN`
* - **Batch job**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN`
* - **User auth pool**: `ID`, `CLIENT_ID`, `ARN`
* - **SNS Topic**: `ARN`, `NAME`
* - **SQS Queue**: `ARN`, `NAME`, `URL`
* - **Upstash Kafka topic**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL`
* - **Upstash Redis**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL`
* - **Private service**: `ADDRESS`
* - **Web service**: `URL`
*/
connectTo?: string[];
/**
* #### Environment variables passed to the script/command.
* ---
* Most commonly used types of environment variables:
* - Static - string, number or boolean (will be stringified).
* - Result of a [custom directive](https://docs.stacktape.com/configuration/directives/#custom-directives).
* - Referenced property of another resource (using **$ResourceParam** directive). To learn more, refer to
* [referencing parameters guide](https://docs.stacktape.com/configuration/referencing-parameters/).
* If you are using environment variables to inject information about resources into your script, see also property [connectTo](https://docs.stacktape.com/configuration/scripts/#connecting-to-resources) which simplifies this process.
* - Value of a [secret](https://docs.stacktape.com/resources/secrets/) (using [$Secret directive](https://docs.stacktape.com/configuration/directives/#secret)).
*/
environment?: EnvironmentVar[];
/**
* #### Name of the resource to assume role of
* ---
* - If specified, the script will be executed with the permissions of the specified resource.
* - Resource must already be deployed when the script is executed.
* - On the background, the script is injected with AWS environment variables which are automatically picked up by most of the AWS SDKs and CLIs.
* - Supported resource types include:
* - `function`
* - `batch-job`
* - `worker-service`
* - `web-service`
* - `private-service`
* - `multi-container-workload`
* - `nextjs-web`
*/
assumeRoleOfResource?: string;
}
export interface DirectiveDefinition {
/**
* #### Name of the directive.
*/
name: string;
/**
* #### Path to the file where the directive is defined.
* ---
* - Format: `{file-path}:{handler}`
* - If you do not specify `{handler}` part:
* - for `.js` and `.ts` files, `default` export is used
* - for `.py` files, `main` functions is used
*/
filePath: string;
}
/**
* #### Configures deployment-related aspects for this stack.
*/
export interface DeploymentConfig {
/**
* #### Set termination protection for the stack
* ---
* - Stacks with termination protection enabled cannot be deleted (you need to disable protection first).
*/
terminationProtection?: boolean;
/**
* << Description missing. Will be provided soon. >>
*/
cloudformationRoleArn?: string;
/**
* #### Alarms to be monitored during deployment operation
* ---
* - If any of the alarms goes to **ALARM** state during stack deployment operation, deployment is canceled and the stack is rolled back
* - You can specify either:
* - Name of the alarm specified in `alarms` section
* - Arn of the arbitrary Cloudwatch alarm
* > If you specify alarm name of **newly created** alarm from `alarms` section, it is used as rollback trigger only in subsequent updates.
* >
* > In other words the alarm must already exist before the deploy operation starts, to be used as a rollback trigger.
*/
triggerRollbackOnAlarms?: string[];
/**
* #### The amount of time, during which CloudFormation monitors the stack (and rollback alarms) after deployment.
* ---
* - During this period the stack is rolled back if either:
* - Any of the rollback alarms are triggered.
* - Update of the stack is canceled (by user through console/SDK/cli)
* - Default is 0 minutes
*/
monitoringTimeAfterDeploymentInMinutes?: number;
/**
* #### Disables automatic rollback after the deployment failure
* ---
* When automatic rollback is **enabled** (default) and the deployment fails:
* - the stack will be automatically rolled back to the latest working version
* - resources created during the failed deployment will be deleted.
* - to move forward, you should resolve the issues and deploy again
*
* When automatic rollback is **disabled** and the deployment fails:
* - the stack will remain in the `UPDATE_FAILED` state
* - all of the successfully created and updated resources will be kept
* - to move forward, you can resolve the issues causing the deployment to fail and then use `stacktape deploy` again
* or roll back to the latest working version using the `stacktape rollback` command.
*/
disableAutoRollback?: boolean;
/**
* << Description missing. Will be provided soon. >>
*/
publishEventsToArn?: string[];
/**
* #### Amount of previous versions that will have their deployment artifacts preserved
* ---
* - Stacktape keeps deployment artifacts from previous versions (functions, images, templates)
* - By default, Stacktape keeps 3 previous versions
*/
previousVersionsToKeep?: number;
/**
* #### Disables S3 transfer acceleration.
* ---
* - S3 transfer acceleration improves upload times of your deployment artifacts.
* - Objects are uploaded to the nearest AWS edge location, and routed to the bucket from there using AWS backbone network.
* - Used to improve upload times and security.
* - S3 transfer acceleration includes (insignificant) additional costs.
*/
disableS3TransferAcceleration?: boolean;
}
/**
* #### Configures other, uncategorized aspects of this stack
*/
export interface StackConfig {
/**
* #### Configures outputs of your stack
* ---
* - You can save outputs of your stack (such as Api Gateway URLs, database endpoints, resources ARNs, etc.).
* - Most of these outputs are only known after your deployment is finished. (They are dynamically allocated by AWS).
*/
outputs?: StackOutput[];
/**
* #### Tags to apply to this stack
* ---
* - These tags are propagated to all AWS resources created in this stack.
* - Tags can help you to identify and categorize resources.
* - Not every AWS resource supports tags.
* - A maximum number of 45 tags can be specified.
*/
tags?: CloudformationTag[];
/**
* #### Disables saving of information about the deployed stack to a local directory after each deployment
* ---
* - Stack information include data about deployed resources, outputs, and metadata
* - By default, the information is saved to `.stacktape-stack-info/<<stack-name>>.json`
*/
disableStackInfoSaving?: boolean;
/**
* #### Directory where the information about the deployed stacks will be saved
* ---
* - Stack information include data about deployed resources, outputs, and metadata
* - By default, the information is saved to `.stacktape-stack-info/<<stack-name>>.json`
*/
stackInfoDirectory?: string;
}
export interface StackOutput {
/**
* #### Name of the stack output
*/
name: string;
/**
* #### Value of the stack output
*/
value: string;
/**
* #### Human-readable description of the stack output
*/
description?: string;
/**
* #### Exports the stack output so it can be referenced by another stack.
* ---
* - To see how to reference the output in another stack see [CfStackOutput directive](https://docs.stacktape.com/configuration/directives#cf-stack-output).
*/
export?: boolean;
}
export interface CloudformationTag {
/**
* Name of the tag
* ---
* - Must be 1-128 characters long.
* - Can consist of the following characters: Unicode letters, digits, whitespace, `_`, `.`, `/`, `=`, `+`, and `-`.
*/
name: string;
/**
* Value of the tag
* ---
* - Must be 1-256 characters long.
*/
value: string;
}
/**
* #### Relational (SQL) database resource
* ---
* - Fully managed relational databases (Postgres, MySQL, MariaDb, etc.) with support for clustering, failover & more.
*/
export interface RelationalDatabase {
type: "relational-database";
properties: RelationalDatabaseProps;
/**
* #### Overrides one or more properties of the specified child resource.
* ---
* - Child resources are specified using their cloudformation logical id (e.g. `MyBucketBucket`).
* - To see all configurable child resources for given Stacktape resource, use `stacktape stack-info --detailed` command.
* - To see the list of properties that can be overridden, refer to
* [AWS Cloudformation docs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html).
*/
overrides?: {
[k: string]: {
[k: string]: unknown;
};
};
}
export interface RelationalDatabaseProps {
credentials: RelationalDatabaseCredentials;
/**
* #### Configures the underlying database engine and its properties
* ---
* Database engine determines the properties of your database, such as database type (PostgreSQL, MySQL...), number of instances, high availability capabilities, redundancy,
* performance, scaling behavior, pricing etc.
*
* Depending on the properties they provide, we group engine types into following groups:
*
* **Rds Engine** group
* - To use the RDS engine, set the `engine.type` property to `postgres`, `mysql`, `mariadb`, `oracle-ee`, `oracle-se2`, `sqlserver-ee`, `sqlserver-ex`, `sqlserver-se` or `sqlserver-web`.
* - Fully managed single-node database engines.
* - Configurable read replicas (additional parallelly-running, automatically-replicated instances used for read operations).
*
* **Aurora Engine** group
* - To use the Aurora engine, set the `engine.type` property to `aurora-postgresql` or `aurora-mysql`.
* - Fully-managed AWS-developed engines (based on and compatible with either PostgreSQL or MySQL) with clustering support, high-availability, increased durability & performance.
* - Compute instances (nodes) run across multiple Availability Zones. Storage is automatically replicated 6-ways across 3
* availability zones.
* - Automatically load-balances read operations between nodes.
* - Automatic failover - if a primary instance fails, one of the read replicas is elected as a new primary instance.
* - To learn more about Aurora Engines, refer to [AWS Docs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html)
*
* **Aurora Serverless Engine** group
* >
* > We recommend using newer Aurora **Aurora Serverless V2 Engines** instead of this one.
* >
* - To use the Aurora engine, set the `engine.type` property to `aurora-postgresql-serverless` or `aurora-mysql-serverless`.
* - Fully-managed AWS-developed engines (based on and compatible with either PostgreSQL or MySQL) with high-availability, increased durability & performance.
* - Supported engines: `aurora-postgresql-serverless`, `aurora-mysql-serverless`.
* - Similar to Aurora Engines, but automatically scales based on usage.
* - Scaling is done using ACUs (Aurora Compute units). Each ACU has ~2GB of RAM and 1 virtual CPU.
* - Can scale to 0 ACUs (database is paused, and you don't pay anything).
* - To learn more about Aurora Engines, refer to [AWS Docs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html)
*
* **Aurora Serverless V2 Engine** group
* - To use the Aurora engine, set the `engine.type` property to `aurora-postgresql-serverless-v2` or `aurora-mysql-serverless-v2`.
* - Fully-managed AWS-developed engines (based on and compatible with either PostgreSQL or MySQL) with high-availability, increased durability & performance.
* - Supported engines: `aurora-postgresql-serverless-v2`, `aurora-mysql-serverless-v2`.
* - Similar to Aurora Serverless Engines, but is more responsive, more granular, and less disruptive when scaling.
* - Scaling is done using ACUs (Aurora Compute units). Each ACU has ~2GB of RAM and 1 virtual CPU.
* - Can scale to 0 ACUs (database is paused, and you don't pay anything).
* - To learn more about Aurora Engines, refer to [AWS Docs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html)
*/
engine: AuroraServerlessEngine | RdsEngine | AuroraEngine | AuroraServerlessV2Engine;
accessibility?: DatabaseAccessibility;
/**
* #### Enables database deletion protection
* ---
* - By default, the database is not deletion-protected.
* - To delete a database with `deletionProtection` enabled, you first need to explicitly disable the protection.
*/
deletionProtection?: boolean;
/**
* #### Configures how long the database backups will be retained (in days)
* ---
* - Databases are automatically backed up once a day.
* - Maximum retention period is `35` days.
* - You can disable automated backups by setting the value to 0 (works only for RDS engines).
* - You can also take manual backup snapshots (in the console or using the API). The retention is not applied to manual backups.
* - By default, backups are retained for 1 day.
* - To learn more about RDS engine backups, refer to
* [RDS engine backups AWS Docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html).
* - To learn more about Aurora engine backups, refer to
* [Aurora engine backups AWS Docs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html).
*/
automatedBackupRetentionDays?: number;
/**
* #### Configures the preferred maintenance window for the database
* ---
* - The maintenance window is the time period during which the database is available for maintenance.
* - During this time, your database instance can be updated with modifications like OS patching or database engine version upgrades.
* - The database might be unavailable for a short period of time during maintenance. To avoid service interruptions, you should use `multiAz` deployment for RDS engines or use an Aurora engine.
* - The maintenance window is **specified in UTC**.
* - The maintenance window is specified in the format of `day:time-day:time`. Example: `Sun:02:00-Sun:04:00`
* - By default, the maintenance window is set to **Sunday 02:00 - Sunday 04:00** adjusted to the region's timezone (but does not account for daylight savings time). Examples:
* - In **us-east-1** the maintenance window is set to **Sunday 07:00 - Sunday 09:00 UTC** which translates to **Sunday 02:00 - Sunday 04:00** in the region's timezone
* - In **eu-west-1** the maintenance window is set to **Sunday 01:00 - Sunday 03:00 UTC** which translates to **Sunday 02:00 - Sunday 04:00** in the region's timezone
*/
preferredMaintenanceWindow?: string;
/**
* #### Additional alarms associated with this resource
* ---
* - These alarms will be merged with the alarms configured globally in the [console](https://console.stacktape.com/alarms)
*/
alarms?: RelationalDatabaseAlarm[];
/**
* #### Disables globally configured alarms specifically for this resource
* ---
* - List of alarm names as configured in the [console](https://console.stacktape.com/alarms)
*/
disabledGlobalAlarms?: string[];
logging?: RelationalDatabaseLogging;
}
/**
* #### Configures credentials for the database master user
* ---
* - Credentials for master user can be used to connect to the database.
* - Master user credentials are used in database `connectionString` (URL).
*/
export interface RelationalDatabaseCredentials {
/**
* #### Username of the database master user
* ---
* - This name will be used for the main admin user in your database.
* - Due to name being part of database `connectionString` (URL), it should not contain following characters: `[]{}(),;?*=!@`
* - You can use this name for connecting to the database.
*
* > Changing this parameter after database was created will cause database replacement and data loss
*
* - By default this `db_master_user` is used
*/
masterUserName?: string;
/**