@aws-cdk/aws-eks-v2-alpha
Version:
The CDK Construct Library for AWS::EKS
979 lines • 216 kB
JavaScript
"use strict";
var __runInitializers = (this && this.__runInitializers) || function (thisArg, initializers, value) {
var useValue = arguments.length > 2;
for (var i = 0; i < initializers.length; i++) {
value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);
}
return useValue ? value : void 0;
};
var __esDecorate = (this && this.__esDecorate) || function (ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {
function accept(f) { if (f !== void 0 && typeof f !== "function") throw new TypeError("Function expected"); return f; }
var kind = contextIn.kind, key = kind === "getter" ? "get" : kind === "setter" ? "set" : "value";
var target = !descriptorIn && ctor ? contextIn["static"] ? ctor : ctor.prototype : null;
var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});
var _, done = false;
for (var i = decorators.length - 1; i >= 0; i--) {
var context = {};
for (var p in contextIn) context[p] = p === "access" ? {} : contextIn[p];
for (var p in contextIn.access) context.access[p] = contextIn.access[p];
context.addInitializer = function (f) { if (done) throw new TypeError("Cannot add initializers after decoration has completed"); extraInitializers.push(accept(f || null)); };
var result = (0, decorators[i])(kind === "accessor" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);
if (kind === "accessor") {
if (result === void 0) continue;
if (result === null || typeof result !== "object") throw new TypeError("Object expected");
if (_ = accept(result.get)) descriptor.get = _;
if (_ = accept(result.set)) descriptor.set = _;
if (_ = accept(result.init)) initializers.unshift(_);
}
else if (_ = accept(result)) {
if (kind === "field") initializers.unshift(_);
else descriptor[key] = _;
}
}
if (target) Object.defineProperty(target, contextIn.name, descriptor);
done = true;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.MachineImageType = exports.DefaultCapacityType = exports.CoreDnsComputeType = exports.CpuArch = exports.NodeType = exports.EksOptimizedImage = exports.Cluster = exports.IpFamily = exports.ClusterLoggingTypes = exports.KubernetesVersion = exports.EndpointAccess = void 0;
const jsiiDeprecationWarnings = require("../.warnings.jsii.js");
const JSII_RTTI_SYMBOL_1 = Symbol.for("jsii.rtti");
const fs = require("fs");
const path = require("path");
const constructs_1 = require("constructs");
const YAML = require("yaml");
const access_entry_1 = require("./access-entry");
const addon_1 = require("./addon");
const alb_controller_1 = require("./alb-controller");
const fargate_profile_1 = require("./fargate-profile");
const helm_chart_1 = require("./helm-chart");
const instance_types_1 = require("./instance-types");
const k8s_manifest_1 = require("./k8s-manifest");
const k8s_object_value_1 = require("./k8s-object-value");
const k8s_patch_1 = require("./k8s-patch");
const kubectl_provider_1 = require("./kubectl-provider");
const managed_nodegroup_1 = require("./managed-nodegroup");
const oidc_provider_1 = require("./oidc-provider");
const bottlerocket_1 = require("./private/bottlerocket");
const service_account_1 = require("./service-account");
const user_data_1 = require("./user-data");
const autoscaling = require("aws-cdk-lib/aws-autoscaling");
const ec2 = require("aws-cdk-lib/aws-ec2");
const iam = require("aws-cdk-lib/aws-iam");
const ssm = require("aws-cdk-lib/aws-ssm");
const core_1 = require("aws-cdk-lib/core");
const aws_eks_1 = require("aws-cdk-lib/aws-eks");
const metadata_resource_1 = require("aws-cdk-lib/core/lib/metadata-resource");
const prop_injectable_1 = require("aws-cdk-lib/core/lib/prop-injectable");
// defaults are based on https://eksctl.io
const DEFAULT_CAPACITY_COUNT = 2;
const DEFAULT_CAPACITY_TYPE = ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.LARGE);
/**
* Endpoint access characteristics.
*/
class EndpointAccess {
_config;
static [JSII_RTTI_SYMBOL_1] = { fqn: "@aws-cdk/aws-eks-v2-alpha.EndpointAccess", version: "2.222.0-alpha.0" };
/**
* The cluster endpoint is accessible from outside of your VPC.
* Worker node traffic will leave your VPC to connect to the endpoint.
*
* By default, the endpoint is exposed to all adresses. You can optionally limit the CIDR blocks that can access the public endpoint using the `PUBLIC.onlyFrom` method.
* If you limit access to specific CIDR blocks, you must ensure that the CIDR blocks that you
* specify include the addresses that worker nodes and Fargate pods (if you use them)
* access the public endpoint from.
*
* @param cidr The CIDR blocks.
*/
static PUBLIC = new EndpointAccess({ privateAccess: false, publicAccess: true });
/**
* The cluster endpoint is only accessible through your VPC.
* Worker node traffic to the endpoint will stay within your VPC.
*/
static PRIVATE = new EndpointAccess({ privateAccess: true, publicAccess: false });
/**
* The cluster endpoint is accessible from outside of your VPC.
* Worker node traffic to the endpoint will stay within your VPC.
*
* By default, the endpoint is exposed to all adresses. You can optionally limit the CIDR blocks that can access the public endpoint using the `PUBLIC_AND_PRIVATE.onlyFrom` method.
* If you limit access to specific CIDR blocks, you must ensure that the CIDR blocks that you
* specify include the addresses that worker nodes and Fargate pods (if you use them)
* access the public endpoint from.
*
* @param cidr The CIDR blocks.
*/
static PUBLIC_AND_PRIVATE = new EndpointAccess({ privateAccess: true, publicAccess: true });
constructor(
/**
* Configuration properties.
*
* @internal
*/
_config) {
this._config = _config;
if (!_config.publicAccess && _config.publicCidrs && _config.publicCidrs.length > 0) {
throw new Error('CIDR blocks can only be configured when public access is enabled');
}
}
/**
* Restrict public access to specific CIDR blocks.
* If public access is disabled, this method will result in an error.
*
* @param cidr CIDR blocks.
*/
onlyFrom(...cidr) {
if (!this._config.privateAccess) {
// when private access is disabled, we can't restric public
// access since it will render the kubectl provider unusable.
throw new Error('Cannot restric public access to endpoint when private access is disabled. Use PUBLIC_AND_PRIVATE.onlyFrom() instead.');
}
return new EndpointAccess({
...this._config,
// override CIDR
publicCidrs: cidr,
});
}
}
exports.EndpointAccess = EndpointAccess;
/**
* Kubernetes cluster version
* @see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-release-calendar
*/
class KubernetesVersion {
version;
static [JSII_RTTI_SYMBOL_1] = { fqn: "@aws-cdk/aws-eks-v2-alpha.KubernetesVersion", version: "2.222.0-alpha.0" };
/**
* Kubernetes version 1.25
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV25Layer` from
* `@aws-cdk/lambda-layer-kubectl-v25`.
*/
static V1_25 = KubernetesVersion.of('1.25');
/**
* Kubernetes version 1.26
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV26Layer` from
* `@aws-cdk/lambda-layer-kubectl-v26`.
*/
static V1_26 = KubernetesVersion.of('1.26');
/**
* Kubernetes version 1.27
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV27Layer` from
* `@aws-cdk/lambda-layer-kubectl-v27`.
*/
static V1_27 = KubernetesVersion.of('1.27');
/**
* Kubernetes version 1.28
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV28Layer` from
* `@aws-cdk/lambda-layer-kubectl-v28`.
*/
static V1_28 = KubernetesVersion.of('1.28');
/**
* Kubernetes version 1.29
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV29Layer` from
* `@aws-cdk/lambda-layer-kubectl-v29`.
*/
static V1_29 = KubernetesVersion.of('1.29');
/**
* Kubernetes version 1.30
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV30Layer` from
* `@aws-cdk/lambda-layer-kubectl-v30`.
*/
static V1_30 = KubernetesVersion.of('1.30');
/**
* Kubernetes version 1.31
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV31Layer` from
* `@aws-cdk/lambda-layer-kubectl-v31`.
*/
static V1_31 = KubernetesVersion.of('1.31');
/**
* Kubernetes version 1.32
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV32Layer` from
* `@aws-cdk/lambda-layer-kubectl-v32`.
*/
static V1_32 = KubernetesVersion.of('1.32');
/**
* Kubernetes version 1.33
*
* When creating a `Cluster` with this version, you need to also specify the
* `kubectlLayer` property with a `KubectlV33Layer` from
* `@aws-cdk/lambda-layer-kubectl-v33`.
*/
static V1_33 = KubernetesVersion.of('1.33');
/**
* Custom cluster version
* @param version custom version number
*/
static of(version) { return new KubernetesVersion(version); }
/**
*
* @param version cluster version number
*/
constructor(version) {
this.version = version;
}
}
exports.KubernetesVersion = KubernetesVersion;
// Shared definition with packages/@aws-cdk/custom-resource-handlers/test/aws-eks/compare-log.test.ts
/**
* EKS cluster logging types
*/
var ClusterLoggingTypes;
(function (ClusterLoggingTypes) {
/**
* Logs pertaining to API requests to the cluster.
*/
ClusterLoggingTypes["API"] = "api";
/**
* Logs pertaining to cluster access via the Kubernetes API.
*/
ClusterLoggingTypes["AUDIT"] = "audit";
/**
* Logs pertaining to authentication requests into the cluster.
*/
ClusterLoggingTypes["AUTHENTICATOR"] = "authenticator";
/**
* Logs pertaining to state of cluster controllers.
*/
ClusterLoggingTypes["CONTROLLER_MANAGER"] = "controllerManager";
/**
* Logs pertaining to scheduling decisions.
*/
ClusterLoggingTypes["SCHEDULER"] = "scheduler";
})(ClusterLoggingTypes || (exports.ClusterLoggingTypes = ClusterLoggingTypes = {}));
/**
* EKS cluster IP family.
*/
var IpFamily;
(function (IpFamily) {
/**
* Use IPv4 for pods and services in your cluster.
*/
IpFamily["IP_V4"] = "ipv4";
/**
* Use IPv6 for pods and services in your cluster.
*/
IpFamily["IP_V6"] = "ipv6";
})(IpFamily || (exports.IpFamily = IpFamily = {}));
class ClusterBase extends core_1.Resource {
/**
* Defines a Kubernetes resource in this cluster.
*
* The manifest will be applied/deleted using kubectl as needed.
*
* @param id logical id of this manifest
* @param manifest a list of Kubernetes resource specifications
* @returns a `KubernetesResource` object.
*/
addManifest(id, ...manifest) {
return new k8s_manifest_1.KubernetesManifest(this, `manifest-${id}`, { cluster: this, manifest });
}
/**
* Defines a Helm chart in this cluster.
*
* @param id logical id of this chart.
* @param options options of this chart.
* @returns a `HelmChart` construct
*/
addHelmChart(id, options) {
return new helm_chart_1.HelmChart(this, `chart-${id}`, { cluster: this, ...options });
}
/**
* Defines a CDK8s chart in this cluster.
*
* @param id logical id of this chart.
* @param chart the cdk8s chart.
* @returns a `KubernetesManifest` construct representing the chart.
*/
addCdk8sChart(id, chart, options = {}) {
const cdk8sChart = chart;
// see https://github.com/awslabs/cdk8s/blob/master/packages/cdk8s/src/chart.ts#L84
if (typeof cdk8sChart.toJson !== 'function') {
throw new Error(`Invalid cdk8s chart. Must contain a 'toJson' method, but found ${typeof cdk8sChart.toJson}`);
}
const manifest = new k8s_manifest_1.KubernetesManifest(this, id, {
cluster: this,
manifest: cdk8sChart.toJson(),
...options,
});
return manifest;
}
addServiceAccount(id, options = {}) {
return new service_account_1.ServiceAccount(this, id, {
...options,
cluster: this,
});
}
/**
* Connect capacity in the form of an existing AutoScalingGroup to the EKS cluster.
*
* The AutoScalingGroup must be running an EKS-optimized AMI containing the
* /etc/eks/bootstrap.sh script. This method will configure Security Groups,
* add the right policies to the instance role, apply the right tags, and add
* the required user data to the instance's launch configuration.
*
* Prefer to use `addAutoScalingGroupCapacity` if possible.
*
* @see https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
* @param autoScalingGroup [disable-awslint:ref-via-interface]
* @param options options for adding auto scaling groups, like customizing the bootstrap script
*/
connectAutoScalingGroupCapacity(autoScalingGroup, options) {
// self rules
autoScalingGroup.connections.allowInternally(ec2.Port.allTraffic());
// Cluster to:nodes rules
autoScalingGroup.connections.allowFrom(this, ec2.Port.tcp(443));
autoScalingGroup.connections.allowFrom(this, ec2.Port.tcpRange(1025, 65535));
// Allow HTTPS from Nodes to Cluster
autoScalingGroup.connections.allowTo(this, ec2.Port.tcp(443));
// Allow all node outbound traffic
autoScalingGroup.connections.allowToAnyIpv4(ec2.Port.allTcp());
autoScalingGroup.connections.allowToAnyIpv4(ec2.Port.allUdp());
autoScalingGroup.connections.allowToAnyIpv4(ec2.Port.allIcmp());
// allow traffic to/from managed node groups (eks attaches this security group to the managed nodes)
autoScalingGroup.addSecurityGroup(this.clusterSecurityGroup);
const bootstrapEnabled = options.bootstrapEnabled ?? true;
if (options.bootstrapOptions && !bootstrapEnabled) {
throw new Error('Cannot specify "bootstrapOptions" if "bootstrapEnabled" is false');
}
if (bootstrapEnabled) {
const userData = options.machineImageType === MachineImageType.BOTTLEROCKET ?
(0, user_data_1.renderBottlerocketUserData)(this) :
(0, user_data_1.renderAmazonLinuxUserData)(this, autoScalingGroup, options.bootstrapOptions);
autoScalingGroup.addUserData(...userData);
}
autoScalingGroup.role.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSWorkerNodePolicy'));
autoScalingGroup.role.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKS_CNI_Policy'));
autoScalingGroup.role.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEC2ContainerRegistryReadOnly'));
// EKS Required Tags
// https://docs.aws.amazon.com/eks/latest/userguide/worker.html
core_1.Tags.of(autoScalingGroup).add(`kubernetes.io/cluster/${this.clusterName}`, 'owned', {
applyToLaunchedInstances: true,
// exclude security groups to avoid multiple "owned" security groups.
// (the cluster security group already has this tag)
excludeResourceTypes: ['AWS::EC2::SecurityGroup'],
});
// since we are not mapping the instance role to RBAC, synthesize an
// output so it can be pasted into `aws-auth-cm.yaml`
new core_1.CfnOutput(autoScalingGroup, 'InstanceRoleARN', {
value: autoScalingGroup.role.roleArn,
});
if (this instanceof Cluster && this.albController) {
// the controller runs on the worker nodes so they cannot
// be deleted before the controller.
constructs_1.Node.of(this.albController).addDependency(autoScalingGroup);
}
}
}
/**
* A Cluster represents a managed Kubernetes Service (EKS)
*
* This is a fully managed cluster of API Servers (control-plane)
* The user is still required to create the worker nodes.
* @resource AWS::EKS::Cluster
*/
let Cluster = (() => {
let _classDecorators = [prop_injectable_1.propertyInjectable];
let _classDescriptor;
let _classExtraInitializers = [];
let _classThis;
let _classSuper = ClusterBase;
let _instanceExtraInitializers = [];
let _grantAccess_decorators;
let _grantClusterAdmin_decorators;
let _getServiceLoadBalancerAddress_decorators;
let _getIngressLoadBalancerAddress_decorators;
let _addAutoScalingGroupCapacity_decorators;
let _addNodegroupCapacity_decorators;
let _addFargateProfile_decorators;
var Cluster = class extends _classSuper {
static { _classThis = this; }
static {
const _metadata = typeof Symbol === "function" && Symbol.metadata ? Object.create(_classSuper[Symbol.metadata] ?? null) : void 0;
_grantAccess_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_grantClusterAdmin_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_getServiceLoadBalancerAddress_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_getIngressLoadBalancerAddress_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_addAutoScalingGroupCapacity_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_addNodegroupCapacity_decorators = [(0, metadata_resource_1.MethodMetadata)()];
_addFargateProfile_decorators = [(0, metadata_resource_1.MethodMetadata)()];
__esDecorate(this, null, _grantAccess_decorators, { kind: "method", name: "grantAccess", static: false, private: false, access: { has: obj => "grantAccess" in obj, get: obj => obj.grantAccess }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _grantClusterAdmin_decorators, { kind: "method", name: "grantClusterAdmin", static: false, private: false, access: { has: obj => "grantClusterAdmin" in obj, get: obj => obj.grantClusterAdmin }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _getServiceLoadBalancerAddress_decorators, { kind: "method", name: "getServiceLoadBalancerAddress", static: false, private: false, access: { has: obj => "getServiceLoadBalancerAddress" in obj, get: obj => obj.getServiceLoadBalancerAddress }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _getIngressLoadBalancerAddress_decorators, { kind: "method", name: "getIngressLoadBalancerAddress", static: false, private: false, access: { has: obj => "getIngressLoadBalancerAddress" in obj, get: obj => obj.getIngressLoadBalancerAddress }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _addAutoScalingGroupCapacity_decorators, { kind: "method", name: "addAutoScalingGroupCapacity", static: false, private: false, access: { has: obj => "addAutoScalingGroupCapacity" in obj, get: obj => obj.addAutoScalingGroupCapacity }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _addNodegroupCapacity_decorators, { kind: "method", name: "addNodegroupCapacity", static: false, private: false, access: { has: obj => "addNodegroupCapacity" in obj, get: obj => obj.addNodegroupCapacity }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(this, null, _addFargateProfile_decorators, { kind: "method", name: "addFargateProfile", static: false, private: false, access: { has: obj => "addFargateProfile" in obj, get: obj => obj.addFargateProfile }, metadata: _metadata }, null, _instanceExtraInitializers);
__esDecorate(null, _classDescriptor = { value: _classThis }, _classDecorators, { kind: "class", name: _classThis.name, metadata: _metadata }, null, _classExtraInitializers);
Cluster = _classThis = _classDescriptor.value;
if (_metadata) Object.defineProperty(_classThis, Symbol.metadata, { enumerable: true, configurable: true, writable: true, value: _metadata });
}
static [JSII_RTTI_SYMBOL_1] = { fqn: "@aws-cdk/aws-eks-v2-alpha.Cluster", version: "2.222.0-alpha.0" };
/** Uniquely identifies this class. */
static PROPERTY_INJECTION_ID = '@aws-cdk.aws-eks-v2-alpha.Cluster';
/**
* Import an existing cluster
*
* @param scope the construct scope, in most cases 'this'
* @param id the id or name to import as
* @param attrs the cluster properties to use for importing information
*/
static fromClusterAttributes(scope, id, attrs) {
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_ClusterAttributes(attrs);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, this.fromClusterAttributes);
}
throw error;
}
return new ImportedCluster(scope, id, attrs);
}
accessEntries = (__runInitializers(this, _instanceExtraInitializers), new Map());
/**
* The VPC in which this Cluster was created
*/
vpc;
/**
* The Name of the created EKS Cluster
*/
clusterName;
/**
* The AWS generated ARN for the Cluster resource
*
* For example, `arn:aws:eks:us-west-2:666666666666:cluster/prod`
*/
clusterArn;
/**
* The endpoint URL for the Cluster
*
* This is the URL inside the kubeconfig file to use with kubectl
*
* For example, `https://5E1D0CEXAMPLEA591B746AFC5AB30262.yl4.us-west-2.eks.amazonaws.com`
*/
clusterEndpoint;
/**
* The certificate-authority-data for your cluster.
*/
clusterCertificateAuthorityData;
/**
* The id of the cluster security group that was created by Amazon EKS for the cluster.
*/
clusterSecurityGroupId;
/**
* The cluster security group that was created by Amazon EKS for the cluster.
*/
clusterSecurityGroup;
/**
* Amazon Resource Name (ARN) or alias of the customer master key (CMK).
*/
clusterEncryptionConfigKeyArn;
/**
* Manages connection rules (Security Group Rules) for the cluster
*
* @type {ec2.Connections}
* @memberof Cluster
*/
connections;
/**
* IAM role assumed by the EKS Control Plane
*/
role;
/**
* The auto scaling group that hosts the default capacity for this cluster.
* This will be `undefined` if the `defaultCapacityType` is not `EC2` or
* `defaultCapacityType` is `EC2` but default capacity is set to 0.
*/
defaultCapacity;
/**
* The node group that hosts the default capacity for this cluster.
* This will be `undefined` if the `defaultCapacityType` is `EC2` or
* `defaultCapacityType` is `NODEGROUP` but default capacity is set to 0.
*/
defaultNodegroup;
/**
* Specify which IP family is used to assign Kubernetes pod and service IP addresses.
*
* @default - IpFamily.IP_V4
* @see https://docs.aws.amazon.com/eks/latest/APIReference/API_KubernetesNetworkConfigRequest.html#AmazonEKS-Type-KubernetesNetworkConfigRequest-ipFamily
*/
ipFamily;
/**
* If the cluster has one (or more) FargateProfiles associated, this array
* will hold a reference to each.
*/
_fargateProfiles = [];
/**
* an Open ID Connect Provider instance
*/
_openIdConnectProvider;
/**
* an EKS Pod Identity Agent instance
*/
_eksPodIdentityAgent;
/**
* Determines if Kubernetes resources can be pruned automatically.
*/
prune;
/**
* The ALB Controller construct defined for this cluster.
* Will be undefined if `albController` wasn't configured.
*/
albController;
_clusterResource;
_neuronDevicePlugin;
endpointAccess;
vpcSubnets;
version;
// TODO: revisit logging format
logging;
/**
* A dummy CloudFormation resource that is used as a wait barrier which
* represents that the cluster is ready to receive "kubectl" commands.
*
* Specifically, all fargate profiles are automatically added as a dependency
* of this barrier, which means that it will only be "signaled" when all
* fargate profiles have been successfully created.
*
* When kubectl resources call `_attachKubectlResourceScope()`, this resource
* is added as their dependency which implies that they can only be deployed
* after the cluster is ready.
*/
_kubectlReadyBarrier;
_kubectlProviderOptions;
_kubectlProvider;
_clusterAdminAccess;
/**
* Initiates an EKS Cluster with the supplied arguments
*
* @param scope a Construct, most likely a cdk.Stack created
* @param id the id of the Construct to create
* @param props properties in the IClusterProps interface
*/
constructor(scope, id, props) {
super(scope, id, {
physicalName: props.clusterName,
});
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_ClusterProps(props);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, Cluster);
}
throw error;
}
// Enhanced CDK Analytics Telemetry
(0, metadata_resource_1.addConstructMetadata)(this, props);
this.prune = props.prune ?? true;
this.vpc = props.vpc || new ec2.Vpc(this, 'DefaultVpc');
this.version = props.version;
this._kubectlProviderOptions = props.kubectlProviderOptions;
this.tagSubnets();
// this is the role used by EKS when interacting with AWS resources
this.role = props.role || new iam.Role(this, 'Role', {
assumedBy: new iam.ServicePrincipal('eks.amazonaws.com'),
managedPolicies: [
iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSClusterPolicy'),
],
});
// validate all automode relevant configurations
const autoModeEnabled = this.isValidAutoModeConfig(props);
if (autoModeEnabled) {
// attach required managed policy for the cluster role in EKS Auto Mode
// see - https://docs.aws.amazon.com/eks/latest/userguide/auto-cluster-iam-role.html
['AmazonEKSComputePolicy',
'AmazonEKSBlockStoragePolicy',
'AmazonEKSLoadBalancingPolicy',
'AmazonEKSNetworkingPolicy'].forEach((policyName) => {
this.role.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName(policyName));
});
// sts:TagSession is required for EKS Auto Mode or when using EKS Pod Identity features.
// see https://docs.aws.amazon.com/eks/latest/userguide/pod-id-role.html
// https://docs.aws.amazon.com/eks/latest/userguide/automode-get-started-cli.html#_create_an_eks_auto_mode_cluster_iam_role
if (this.role instanceof iam.Role) {
this.role.assumeRolePolicy?.addStatements(new iam.PolicyStatement({
effect: iam.Effect.ALLOW,
principals: [new iam.ServicePrincipal('eks.amazonaws.com')],
actions: ['sts:TagSession'],
}));
}
}
const securityGroup = props.securityGroup || new ec2.SecurityGroup(this, 'ControlPlaneSecurityGroup', {
vpc: this.vpc,
description: 'EKS Control Plane Security Group',
});
this.vpcSubnets = props.vpcSubnets ?? [{ subnetType: ec2.SubnetType.PUBLIC }, { subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }];
const selectedSubnetIdsPerGroup = this.vpcSubnets.map(s => this.vpc.selectSubnets(s).subnetIds);
if (selectedSubnetIdsPerGroup.some(core_1.Token.isUnresolved) && selectedSubnetIdsPerGroup.length > 1) {
throw new Error('eks.Cluster: cannot select multiple subnet groups from a VPC imported from list tokens with unknown length. Select only one subnet group, pass a length to Fn.split, or switch to Vpc.fromLookup.');
}
// Get subnetIds for all selected subnets
const subnetIds = Array.from(new Set(flatten(selectedSubnetIdsPerGroup)));
this.logging = props.clusterLogging ? {
clusterLogging: {
enabledTypes: props.clusterLogging.map((type) => ({ type })),
},
} : undefined;
this.endpointAccess = props.endpointAccess ?? EndpointAccess.PUBLIC_AND_PRIVATE;
this.ipFamily = props.ipFamily ?? IpFamily.IP_V4;
const privateSubnets = this.selectPrivateSubnets().slice(0, 16);
const publicAccessDisabled = !this.endpointAccess._config.publicAccess;
const publicAccessRestricted = !publicAccessDisabled
&& this.endpointAccess._config.publicCidrs
&& this.endpointAccess._config.publicCidrs.length !== 0;
// validate endpoint access configuration
if (privateSubnets.length === 0 && publicAccessDisabled) {
// no private subnets and no public access at all, no good.
throw new Error('Vpc must contain private subnets when public endpoint access is disabled');
}
if (privateSubnets.length === 0 && publicAccessRestricted) {
// no private subnets and public access is restricted, no good.
throw new Error('Vpc must contain private subnets when public endpoint access is restricted');
}
if (props.serviceIpv4Cidr && props.ipFamily == IpFamily.IP_V6) {
throw new Error('Cannot specify serviceIpv4Cidr with ipFamily equal to IpFamily.IP_V6');
}
const resource = this._clusterResource = new aws_eks_1.CfnCluster(this, 'Resource', {
name: this.physicalName,
roleArn: this.role.roleArn,
version: props.version.version,
accessConfig: {
authenticationMode: 'API',
bootstrapClusterCreatorAdminPermissions: props.bootstrapClusterCreatorAdminPermissions,
},
computeConfig: {
enabled: autoModeEnabled,
// If the computeConfig enabled flag is set to false when creating a cluster with Auto Mode,
// the request must not include values for the nodeRoleArn or nodePools fields.
// Also, if nodePools is empty, nodeRoleArn should not be included to prevent deployment failures
nodePools: !autoModeEnabled ? undefined : props.compute?.nodePools ?? ['system', 'general-purpose'],
nodeRoleArn: !autoModeEnabled || (props.compute?.nodePools && props.compute.nodePools.length === 0) ?
undefined :
props.compute?.nodeRole?.roleArn ?? this.addNodePoolRole(`${id}nodePoolRole`).roleArn,
},
storageConfig: {
blockStorage: {
enabled: autoModeEnabled,
},
},
kubernetesNetworkConfig: {
ipFamily: this.ipFamily,
serviceIpv4Cidr: props.serviceIpv4Cidr,
elasticLoadBalancing: {
enabled: autoModeEnabled,
},
},
resourcesVpcConfig: {
securityGroupIds: [securityGroup.securityGroupId],
subnetIds,
endpointPrivateAccess: this.endpointAccess._config.privateAccess,
endpointPublicAccess: this.endpointAccess._config.publicAccess,
publicAccessCidrs: this.endpointAccess._config.publicCidrs,
},
...(props.secretsEncryptionKey ? {
encryptionConfig: [{
provider: {
keyArn: props.secretsEncryptionKey.keyRef.keyArn,
},
resources: ['secrets'],
}],
} : {}),
tags: Object.keys(props.tags ?? {}).map(k => ({ key: k, value: props.tags[k] })),
logging: this.logging,
});
let kubectlSubnets = this._kubectlProviderOptions?.privateSubnets;
if (this.endpointAccess._config.privateAccess && privateSubnets.length !== 0) {
// when private access is enabled and the vpc has private subnets, lets connect
// the provider to the vpc so that it will work even when restricting public access.
// validate VPC properties according to: https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html
if (this.vpc instanceof ec2.Vpc && !(this.vpc.dnsHostnamesEnabled && this.vpc.dnsSupportEnabled)) {
throw new Error('Private endpoint access requires the VPC to have DNS support and DNS hostnames enabled. Use `enableDnsHostnames: true` and `enableDnsSupport: true` when creating the VPC.');
}
kubectlSubnets = privateSubnets;
// the vpc must exist in order to properly delete the cluster (since we run `kubectl delete`).
// this ensures that.
this._clusterResource.node.addDependency(this.vpc);
}
// we use an SSM parameter as a barrier because it's free and fast.
this._kubectlReadyBarrier = new core_1.CfnResource(this, 'KubectlReadyBarrier', {
type: 'AWS::SSM::Parameter',
properties: {
Type: 'String',
Value: 'aws:cdk:eks:kubectl-ready',
},
});
// add the cluster resource itself as a dependency of the barrier
this._kubectlReadyBarrier.node.addDependency(this._clusterResource);
this.clusterName = this.getResourceNameAttribute(resource.ref);
this.clusterArn = this.getResourceArnAttribute(resource.attrArn, clusterArnComponents(this.physicalName));
this.clusterEndpoint = resource.attrEndpoint;
this.clusterCertificateAuthorityData = resource.attrCertificateAuthorityData;
this.clusterSecurityGroupId = resource.attrClusterSecurityGroupId;
this.clusterEncryptionConfigKeyArn = resource.attrEncryptionConfigKeyArn;
this.clusterSecurityGroup = ec2.SecurityGroup.fromSecurityGroupId(this, 'ClusterSecurityGroup', this.clusterSecurityGroupId);
this.connections = new ec2.Connections({
securityGroups: [this.clusterSecurityGroup, securityGroup],
defaultPort: ec2.Port.tcp(443), // Control Plane has an HTTPS API
});
const stack = core_1.Stack.of(this);
const updateConfigCommandPrefix = `aws eks update-kubeconfig --name ${this.clusterName}`;
const getTokenCommandPrefix = `aws eks get-token --cluster-name ${this.clusterName}`;
const commonCommandOptions = [`--region ${stack.region}`];
if (props.kubectlProviderOptions) {
this._kubectlProvider = new kubectl_provider_1.KubectlProvider(this, 'KubectlProvider', {
cluster: this,
role: this._kubectlProviderOptions?.role,
awscliLayer: this._kubectlProviderOptions?.awscliLayer,
kubectlLayer: this._kubectlProviderOptions.kubectlLayer,
environment: this._kubectlProviderOptions?.environment,
memory: this._kubectlProviderOptions?.memory,
privateSubnets: kubectlSubnets,
});
// give the handler role admin access to the cluster
// so it can deploy/query any resource.
this._clusterAdminAccess = this.grantClusterAdmin('ClusterAdminRoleAccess', this._kubectlProvider?.role.roleArn);
}
// do not create a masters role if one is not provided. Trusting the accountRootPrincipal() is too permissive.
if (props.mastersRole) {
const mastersRole = props.mastersRole;
this.grantAccess('mastersRoleAccess', props.mastersRole.roleArn, [
access_entry_1.AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', {
accessScopeType: access_entry_1.AccessScopeType.CLUSTER,
}),
]);
commonCommandOptions.push(`--role-arn ${mastersRole.roleArn}`);
}
if (props.albController) {
this.albController = alb_controller_1.AlbController.create(this, { ...props.albController, cluster: this });
}
// if any of defaultCapacity* properties are set, we need a default capacity(nodegroup)
if (props.defaultCapacity !== undefined ||
props.defaultCapacityType !== undefined ||
props.defaultCapacityInstance !== undefined) {
const minCapacity = props.defaultCapacity ?? DEFAULT_CAPACITY_COUNT;
if (minCapacity > 0) {
const instanceType = props.defaultCapacityInstance || DEFAULT_CAPACITY_TYPE;
// If defaultCapacityType is undefined, use AUTOMODE as the default
const capacityType = props.defaultCapacityType ?? DefaultCapacityType.AUTOMODE;
// Only create EC2 or Nodegroup capacity if not using AUTOMODE
if (capacityType === DefaultCapacityType.EC2) {
this.defaultCapacity = this.addAutoScalingGroupCapacity('DefaultCapacity', { instanceType, minCapacity });
}
else if (capacityType === DefaultCapacityType.NODEGROUP) {
this.defaultNodegroup = this.addNodegroupCapacity('DefaultCapacity', { instanceTypes: [instanceType], minSize: minCapacity });
}
// For AUTOMODE, we don't create any explicit capacity as it's managed by EKS
}
}
// ensure FARGATE still applies here
if (props.coreDnsComputeType === CoreDnsComputeType.FARGATE) {
this.defineCoreDnsComputeType(CoreDnsComputeType.FARGATE);
}
const outputConfigCommand = (props.outputConfigCommand ?? true) && props.mastersRole;
if (outputConfigCommand) {
const postfix = commonCommandOptions.join(' ');
new core_1.CfnOutput(this, 'ConfigCommand', { value: `${updateConfigCommandPrefix} ${postfix}` });
new core_1.CfnOutput(this, 'GetTokenCommand', { value: `${getTokenCommandPrefix} ${postfix}` });
}
}
/**
* Grants the specified IAM principal access to the EKS cluster based on the provided access policies.
*
* This method creates an `AccessEntry` construct that grants the specified IAM principal the access permissions
* defined by the provided `IAccessPolicy` array. This allows the IAM principal to perform the actions permitted
* by the access policies within the EKS cluster.
*
* @param id - The ID of the `AccessEntry` construct to be created.
* @param principal - The IAM principal (role or user) to be granted access to the EKS cluster.
* @param accessPolicies - An array of `IAccessPolicy` objects that define the access permissions to be granted to the IAM principal.
*/
grantAccess(id, principal, accessPolicies) {
this.addToAccessEntry(id, principal, accessPolicies);
}
/**
* Grants the specified IAM principal cluster admin access to the EKS cluster.
*
* This method creates an `AccessEntry` construct that grants the specified IAM principal the cluster admin
* access permissions. This allows the IAM principal to perform the actions permitted
* by the cluster admin acces.
*
* @param id - The ID of the `AccessEntry` construct to be created.
* @param principal - The IAM principal (role or user) to be granted access to the EKS cluster.
* @returns the access entry construct
*/
grantClusterAdmin(id, principal) {
const newEntry = new access_entry_1.AccessEntry(this, id, {
principal,
cluster: this,
accessPolicies: [
access_entry_1.AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', {
accessScopeType: access_entry_1.AccessScopeType.CLUSTER,
}),
],
});
this.accessEntries.set(principal, newEntry);
return newEntry;
}
/**
* Fetch the load balancer address of a service of type 'LoadBalancer'.
*
* @param serviceName The name of the service.
* @param options Additional operation options.
*/
getServiceLoadBalancerAddress(serviceName, options = {}) {
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_ServiceLoadBalancerAddressOptions(options);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, this.getServiceLoadBalancerAddress);
}
throw error;
}
const loadBalancerAddress = new k8s_object_value_1.KubernetesObjectValue(this, `${serviceName}LoadBalancerAddress`, {
cluster: this,
objectType: 'service',
objectName: serviceName,
objectNamespace: options.namespace,
jsonPath: '.status.loadBalancer.ingress[0].hostname',
timeout: options.timeout,
});
return loadBalancerAddress.value;
}
/**
* Fetch the load balancer address of an ingress backed by a load balancer.
*
* @param ingressName The name of the ingress.
* @param options Additional operation options.
*/
getIngressLoadBalancerAddress(ingressName, options = {}) {
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_IngressLoadBalancerAddressOptions(options);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, this.getIngressLoadBalancerAddress);
}
throw error;
}
const loadBalancerAddress = new k8s_object_value_1.KubernetesObjectValue(this, `${ingressName}LoadBalancerAddress`, {
cluster: this,
objectType: 'ingress',
objectName: ingressName,
objectNamespace: options.namespace,
jsonPath: '.status.loadBalancer.ingress[0].hostname',
timeout: options.timeout,
});
return loadBalancerAddress.value;
}
/**
* Add nodes to this EKS cluster
*
* The nodes will automatically be configured with the right VPC and AMI
* for the instance type and Kubernetes version.
*
* Note that if you specify `updateType: RollingUpdate` or `updateType: ReplacingUpdate`, your nodes might be replaced at deploy
* time without notice in case the recommended AMI for your machine image type has been updated by AWS.
* The default behavior for `updateType` is `None`, which means only new instances will be launched using the new AMI.
*
*/
addAutoScalingGroupCapacity(id, options) {
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_AutoScalingGroupCapacityOptions(options);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, this.addAutoScalingGroupCapacity);
}
throw error;
}
if (options.machineImageType === MachineImageType.BOTTLEROCKET && options.bootstrapOptions !== undefined) {
throw new Error('bootstrapOptions is not supported for Bottlerocket');
}
const asg = new autoscaling.AutoScalingGroup(this, id, {
...options,
vpc: this.vpc,
machineImage: options.machineImageType === MachineImageType.BOTTLEROCKET ?
new bottlerocket_1.BottleRocketImage({
kubernetesVersion: this.version.version,
}) :
new EksOptimizedImage({
nodeType: nodeTypeForInstanceType(options.instanceType),
cpuArch: cpuArchForInstanceType(options.instanceType),
kubernetesVersion: this.version.version,
}),
});
this.connectAutoScalingGroupCapacity(asg, {
bootstrapOptions: options.bootstrapOptions,
bootstrapEnabled: options.bootstrapEnabled,
machineImageType: options.machineImageType,
});
if (nodeTypeForInstanceType(options.instanceType) === NodeType.INFERENTIA ||
nodeTypeForInstanceType(options.instanceType) === NodeType.TRAINIUM) {
this.addNeuronDevicePlugin();
}
return asg;
}
/**
* Add managed nodegroup to this Amazon EKS cluster
*
* This method will create a new managed nodegroup and add into the capacity.
*
* @see https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html
* @param id The ID of the nodegroup
* @param options options for creating a new nodegroup
*/
addNodegroupCapacity(id, options) {
try {
jsiiDeprecationWarnings._aws_cdk_aws_eks_v2_alpha_NodegroupOptions(options);
}
catch (error) {
if (process.env.JSII_DEBUG !== "1" && error.name === "DeprecationError") {
Error.captureStackTrace(error, this.addNodegroupCapacity);
}
throw error;
}
const hasInferentiaOrTrainiumInstanceType = [
options?.instanceType,
...options?.instanceTypes ?? [],
].some(i => i && (nodeTypeForInstanceType(i) === NodeType.INFERENTIA ||
nodeTypeForInstanceType(i) === NodeType.TRAINIUM));
if (hasInferentiaOrTrainiumInstanceType) {
this.addNeuronDevicePlugin();
}
return new managed_nodegroup_1.Nodegroup(this, `Nodegroup${id}`, {
cluster: this,
...options,
});
}
/**
* If this cluster is kubectl-enabled, returns the OpenID Connect issuer url.
* If this cluster is not kubectl-enabled (i.e. uses the
* stock `CfnCluster`), this is `undefined`.
* @attribute
*/
get clusterOpenIdConnectIssuerUrl() {
return this._clusterResource.attrOpenIdConnectIssuerUrl;
}
/**
* An `OpenIdConnectProvider` resource associated with this cluster, and which can be used
* to link this cluster to AWS IAM.
*
* A provider will only be defined if this property is accessed (lazy initialization).
*/
get openIdConnec