@applitools/execution-grid-tunnel
Version:
Allows user to run tests with exection-grid and navigate to private hosts and ips
143 lines (120 loc) • 3.64 kB
JavaScript
const {
tunnelId,
stringifyConfig,
heartbeatTimeout = 10000,
startTunnelTimeoutThreshold = 10000,
stopProcessTimeout = 5000,
stringifyloggerOptions,
} = process.env
const loggerOptions = stringifyloggerOptions
? JSON.parse(stringifyloggerOptions)
: {mode: 'development', level: 'info'}
// TODO: fix logger
const {createLogger} = require('../../src/utils')
const logger = createLogger({...loggerOptions, filename: tunnelId})
logger.debug({
action: 'process-was-started',
tunnelId,
stringifyConfig,
stringifyloggerOptions,
heartbeatTimeout,
startTunnelTimeoutThreshold,
stopProcessTimeout,
})
const {promises: fs} = require('fs')
const nodeCleanup = require('node-cleanup')
const {TunnelConnectionPool, POOL_STATUS} = require('./tunnel-connection-pool.js')
const TUNNEL_STATUS = require('./tunnel-status.js')
const tls = require('tls')
let selfKillTimeoutId
let initTimeoutId
const _startTunnel = async () => {
const config = JSON.parse(stringifyConfig)
const pool = new TunnelConnectionPool({...config, logger})
pool.onSetStatus((status) => {
if (status === POOL_STATUS.RECONNECT) process.send({status: TUNNEL_STATUS.RECONNECT})
})
// Closing frpc if doesn't get heart bit from parent
const startSelfKillTimeout = (timeout) => {
selfKillTimeoutId = setTimeout(async () => {
logger.error({
action: 'self-kill-timeout',
error: `TunnelId ${tunnelId}: self-kill is called`,
heartbeatTimeout,
})
console.log(`TunnelId ${tunnelId}: self-kill is called`)
try {
await pool.end()
} catch (error) {
console.log(error)
pool.destroy()
} finally {
process.exit(1)
}
}, timeout)
}
process.on('message', ({status}) => {
if (status !== 'ok') {
return
}
if (selfKillTimeoutId !== undefined) {
clearTimeout(selfKillTimeoutId)
}
startSelfKillTimeout(heartbeatTimeout)
})
// In some cases when frpc connection fails, it tries to reconnect forever.
// In that line we send error to frpc controller after startTunnelTimeoutThreshold
nodeCleanup((exitCode, signal) => {
logger.info({
action: 'tunnel-is-closing',
exitCode,
signal,
})
process.send({status: TUNNEL_STATUS.STOPPED})
selfKillTimeoutId && clearTimeout(selfKillTimeoutId)
initTimeoutId && clearTimeout(initTimeoutId)
//TODO: should we check if proc.isRunning before run proc.stop
pool
.end(Number(stopProcessTimeout))
.catch((error) => {
logger.warn({
action: 'tunnel-pool-timeout',
error: error.message,
})
pool.destroy()
})
.finally(() => {
logger.info({
action: 'tunnel-process-was-closed',
})
setTimeout(() => {
logger.close()
process.exit()
}, 1000)
})
})
try {
initTimeoutId = setTimeout(() => {
process.send({status: TUNNEL_STATUS.INIT_TIMEOUT_ERROR})
}, startTunnelTimeoutThreshold)
await pool.start()
clearTimeout(initTimeoutId)
initTimeoutId = undefined
startSelfKillTimeout(parseInt(heartbeatTimeout))
process.send({status: TUNNEL_STATUS.RUNNING})
logger.info({
action: 'tunnel-started',
tunnelId,
success: true,
})
// set selfkill in case execution-grid-tunnel crashes before init
} catch (err) {
logger.error({
action: 'starting-tunnel-pool-failed',
error: err.message,
})
initTimeoutId && clearTimeout(initTimeoutId)
process.send({status: TUNNEL_STATUS.INIT_ERROR})
}
}
_startTunnel()