create-elysiajs
Version:
Scaffolding your Elysia project with the environment with easy!
233 lines (216 loc) • 8.2 kB
JavaScript
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.getDockerfile = getDockerfile;
exports.getDockerCompose = getDockerCompose;
exports.getDevelopmentDockerCompose = getDevelopmentDockerCompose;
const ts_dedent_1 = __importDefault(require("ts-dedent"));
const utils_js_1 = require("../utils.js");
const ormDockerCopy = {
Prisma: "COPY --from=prerelease /usr/src/app/prisma ./prisma",
Drizzle: (0, ts_dedent_1.default) `
COPY --from=prerelease /usr/src/app/drizzle ./drizzle
COPY --from=prerelease /usr/src/app/drizzle.config.ts .`,
};
function getDockerfile({ packageManager, orm }) {
if (packageManager === "bun")
return (0, ts_dedent_1.default /* Dockerfile */) `
# use the official Bun image
# see all versions at https://hub.docker.com/r/oven/bun/tags
FROM oven/bun:${process.versions.bun ?? "1.3.2"} AS base
WORKDIR /usr/src/app
# install dependencies into temp directory
# this will cache them and speed up future builds
FROM base AS install
RUN mkdir -p /temp/dev
COPY package.json bun.lock /temp/dev/
RUN cd /temp/dev && bun install --frozen-lockfile
# install with --production (exclude devDependencies)
RUN mkdir -p /temp/prod
COPY package.json bun.lock /temp/prod/
RUN cd /temp/prod && bun install --frozen-lockfile --production
# copy node_modules from temp directory
# then copy all (non-ignored) project files into the image
FROM base AS prerelease
COPY --from=install /temp/dev/node_modules node_modules
COPY . .
ENV NODE_ENV=production
RUN ${utils_js_1.pmExecuteMap[packageManager]} tsc --noEmit
# copy production dependencies and source code into final image
FROM base AS release
COPY --from=install /temp/prod/node_modules node_modules
COPY --from=prerelease /usr/src/app/${utils_js_1.pmLockFilesMap[packageManager]} .
RUN mkdir -p /usr/src/app/src
COPY --from=prerelease /usr/src/app/src ./src
COPY --from=prerelease /usr/src/app/package.json .
COPY --from=prerelease /usr/src/app/tsconfig.json .
${orm !== "None" ? ormDockerCopy[orm] : ""}
ENTRYPOINT [ "bun", "start" ]`;
return (0, ts_dedent_1.default /* Dockerfile */) `
# Use the official Node.js 22 image.
# See https://hub.docker.com/_/node for more information.
FROM node:${process?.versions?.node ?? "22.12"} AS base
# Create app directory
WORKDIR /usr/src/app
${packageManager !== "npm" ? "npm install ${packageManager} -g" : ""}
# Install dependencies into temp directory
# This will cache them and speed up future builds
FROM base AS install
RUN mkdir -p /temp/dev
COPY package.json ${utils_js_1.pmLockFilesMap[packageManager]} /temp/dev/
RUN cd /temp/dev && ${utils_js_1.pmInstallFrozenLockfile[packageManager]}
# Install with --production (exclude devDependencies)
RUN mkdir -p /temp/prod
COPY package.json ${utils_js_1.pmLockFilesMap[packageManager]} /temp/prod/
RUN cd /temp/prod && ${utils_js_1.pmInstallFrozenLockfileProduction[packageManager]}
# Copy node_modules from temp directory
# Then copy all (non-ignored) project files into the image
FROM base AS prerelease
COPY --from=install /temp/dev/node_modules node_modules
COPY . .
ENV NODE_ENV=production
RUN ${utils_js_1.pmExecuteMap[packageManager]} tsc --noEmit
# Copy production dependencies and source code into final image
FROM base AS release
COPY --from=install /temp/prod/node_modules node_modules
RUN mkdir -p /usr/src/app/src
COPY --from=prerelease /usr/src/app/src ./src
COPY --from=prerelease /usr/src/app/${utils_js_1.pmLockFilesMap[packageManager]} .
COPY --from=prerelease /usr/src/app/package.json .
COPY --from=prerelease /usr/src/app/tsconfig.json .
${orm !== "None" ? ormDockerCopy[orm] : ""}
# TODO:// should be downloaded not at ENTRYPOINT
ENTRYPOINT [ "${utils_js_1.pmRunMap[packageManager]}", "start" ]`;
}
// TODO: generate redis+postgres
function getDockerCompose({ database, redis, projectName, meta, others, }) {
const volumes = [];
if (database === "PostgreSQL")
volumes.push("postgres_data:");
if (redis)
volumes.push("redis_data:");
if (others.includes("S3"))
volumes.push("minio_data:");
const services = [
/* yaml */ `bot:
container_name: ${projectName}-bot
restart: unless-stopped
build:
context: .
dockerfile: Dockerfile
environment:
- NODE_ENV=production`,
database === "PostgreSQL"
? /* yaml */ `postgres:
container_name: ${projectName}-postgres
image: postgres:latest
restart: unless-stopped
environment:
- POSTGRES_USER=${projectName}
- POSTGRES_PASSWORD=${meta.databasePassword}
- POSTGRES_DB=${projectName}
volumes:
- postgres_data:/var/lib/postgresql/data`
: "",
redis
? /* yaml */ `redis:
container_name: ${projectName}-redis
image: redis:latest
command: [ "redis-server", "--maxmemory-policy", "noeviction" ]
restart: unless-stopped
volumes:
- redis_data:/data`
: "",
others.includes("S3")
? /* yaml */ `minio:
container_name: ${projectName}-minio
image: minio/minio:latest
command: [ "minio", "server", "/data", "--console-address", ":9001" ]
restart: unless-stopped
environment:
- MINIO_ACCESS_KEY=${projectName}
- MINIO_SECRET_KEY=${meta.databasePassword}
ports:
- 9000:9000
- 9001:9001
volumes:
- minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5`
: "",
];
return (0, ts_dedent_1.default /* yaml */) `
services:
${services.filter(Boolean).join("\n")}
volumes:
${volumes.join("\n")}
networks:
default: {}
`;
}
function getDevelopmentDockerCompose({ database, redis, projectName, meta, others, }) {
const volumes = [];
if (database === "PostgreSQL")
volumes.push("postgres_data:");
if (redis)
volumes.push("redis_data:");
if (others.includes("S3"))
volumes.push("minio_data:");
const services = [
database === "PostgreSQL"
? /* yaml */ `postgres:
container_name: ${projectName}-postgres
image: postgres:latest
restart: unless-stopped
environment:
- POSTGRES_USER=${projectName}
- POSTGRES_PASSWORD=${meta.databasePassword}
- POSTGRES_DB=${projectName}
ports:
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data`
: "",
redis
? /* yaml */ `redis:
container_name: ${projectName}-redis
image: redis:latest
command: [ "redis-server", "--maxmemory-policy", "noeviction" ]
restart: unless-stopped
ports:
- 6379:6379
volumes:
- redis_data:/data`
: "",
others.includes("S3")
? /* yaml */ `minio:
container_name: ${projectName}-minio
image: minio/minio:latest
command: [ "minio", "server", "/data", "--console-address", ":9001" ]
restart: unless-stopped
environment:
- MINIO_ACCESS_KEY=${projectName}
- MINIO_SECRET_KEY=${meta.databasePassword}
volumes:
- minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5`
: "",
];
return (0, ts_dedent_1.default /* yaml */) `
services:
${services.filter(Boolean).join("\n")}
volumes:
${volumes.join("\n")}
networks:
default: {}
`;
}