2021-07-12 12:47:21 -04:00
|
|
|
const Settings = require('@overleaf/settings')
|
2020-02-19 06:14:37 -05:00
|
|
|
const logger = require('logger-sharelatex')
|
|
|
|
const Docker = require('dockerode')
|
|
|
|
const dockerode = new Docker()
|
|
|
|
const crypto = require('crypto')
|
|
|
|
const async = require('async')
|
|
|
|
const LockManager = require('./DockerLockManager')
|
|
|
|
const fs = require('fs')
|
|
|
|
const Path = require('path')
|
2020-06-15 04:52:21 -04:00
|
|
|
const _ = require('lodash')
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2020-09-03 15:30:07 -04:00
|
|
|
const ONE_HOUR_IN_MS = 60 * 60 * 1000
|
2020-02-19 06:14:37 -05:00
|
|
|
logger.info('using docker runner')
|
|
|
|
|
2020-09-03 15:58:16 -04:00
|
|
|
function usingSiblingContainers() {
|
|
|
|
return (
|
|
|
|
Settings != null &&
|
|
|
|
Settings.path != null &&
|
|
|
|
Settings.path.sandboxedCompilesHostDir != null
|
|
|
|
)
|
|
|
|
}
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2020-02-20 11:24:28 -05:00
|
|
|
let containerMonitorTimeout
|
|
|
|
let containerMonitorInterval
|
|
|
|
|
2020-09-03 15:50:12 -04:00
|
|
|
const DockerRunner = {
|
2020-06-11 11:01:44 -04:00
|
|
|
run(
|
2020-09-03 14:58:37 -04:00
|
|
|
projectId,
|
2020-06-11 11:01:44 -04:00
|
|
|
command,
|
|
|
|
directory,
|
|
|
|
image,
|
|
|
|
timeout,
|
|
|
|
environment,
|
|
|
|
compileGroup,
|
|
|
|
callback
|
|
|
|
) {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (usingSiblingContainers()) {
|
|
|
|
const _newPath = Settings.path.sandboxedCompilesHostDir
|
|
|
|
logger.log(
|
|
|
|
{ path: _newPath },
|
|
|
|
'altering bind path for sibling containers'
|
|
|
|
)
|
|
|
|
// Server Pro, example:
|
|
|
|
// '/var/lib/sharelatex/data/compiles/<project-id>'
|
|
|
|
// ... becomes ...
|
|
|
|
// '/opt/sharelatex_data/data/compiles/<project-id>'
|
|
|
|
directory = Path.join(
|
|
|
|
Settings.path.sandboxedCompilesHostDir,
|
|
|
|
Path.basename(directory)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-09-03 15:50:12 -04:00
|
|
|
const volumes = { [directory]: '/compile' }
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2021-07-13 07:04:48 -04:00
|
|
|
command = command.map(arg =>
|
2020-09-02 17:06:35 -04:00
|
|
|
arg.toString().replace('$COMPILE_DIR', '/compile')
|
2020-02-19 06:14:37 -05:00
|
|
|
)
|
|
|
|
if (image == null) {
|
2020-09-03 15:50:12 -04:00
|
|
|
image = Settings.clsi.docker.image
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
|
2020-06-30 07:00:18 -04:00
|
|
|
if (
|
|
|
|
Settings.clsi.docker.allowedImages &&
|
|
|
|
!Settings.clsi.docker.allowedImages.includes(image)
|
|
|
|
) {
|
|
|
|
return callback(new Error('image not allowed'))
|
|
|
|
}
|
|
|
|
|
2020-07-01 05:01:25 -04:00
|
|
|
if (Settings.texliveImageNameOveride != null) {
|
|
|
|
const img = image.split('/')
|
|
|
|
image = `${Settings.texliveImageNameOveride}/${img[2]}`
|
|
|
|
}
|
|
|
|
|
2020-02-19 06:14:37 -05:00
|
|
|
const options = DockerRunner._getContainerOptions(
|
|
|
|
command,
|
|
|
|
image,
|
|
|
|
volumes,
|
|
|
|
timeout,
|
2020-06-11 11:01:44 -04:00
|
|
|
environment,
|
|
|
|
compileGroup
|
2020-02-19 06:14:37 -05:00
|
|
|
)
|
|
|
|
const fingerprint = DockerRunner._fingerprintContainer(options)
|
2020-09-03 15:50:12 -04:00
|
|
|
const name = `project-${projectId}-${fingerprint}`
|
|
|
|
options.name = name
|
2020-02-19 06:14:37 -05:00
|
|
|
|
|
|
|
// logOptions = _.clone(options)
|
|
|
|
// logOptions?.HostConfig?.SecurityOpt = "secomp used, removed in logging"
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ projectId }, 'running docker container')
|
2020-09-03 15:58:16 -04:00
|
|
|
DockerRunner._runAndWaitForContainer(
|
|
|
|
options,
|
|
|
|
volumes,
|
|
|
|
timeout,
|
|
|
|
(error, output) => {
|
|
|
|
if (error && error.statusCode === 500) {
|
|
|
|
logger.log(
|
|
|
|
{ err: error, projectId },
|
|
|
|
'error running container so destroying and retrying'
|
2020-02-19 06:14:37 -05:00
|
|
|
)
|
2021-07-13 07:04:48 -04:00
|
|
|
DockerRunner.destroyContainer(name, null, true, error => {
|
2020-09-03 15:58:16 -04:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
DockerRunner._runAndWaitForContainer(
|
|
|
|
options,
|
|
|
|
volumes,
|
|
|
|
timeout,
|
|
|
|
callback
|
|
|
|
)
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
callback(error, output)
|
|
|
|
}
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
2020-09-03 15:58:16 -04:00
|
|
|
)
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2020-09-02 16:58:41 -04:00
|
|
|
// pass back the container name to allow it to be killed
|
2020-02-19 06:14:37 -05:00
|
|
|
return name
|
2020-09-02 16:58:41 -04:00
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2020-09-03 14:58:37 -04:00
|
|
|
kill(containerId, callback) {
|
|
|
|
logger.log({ containerId }, 'sending kill signal to container')
|
|
|
|
const container = dockerode.getContainer(containerId)
|
2021-07-13 07:04:48 -04:00
|
|
|
container.kill(error => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (
|
|
|
|
error != null &&
|
2020-09-02 17:06:35 -04:00
|
|
|
error.message != null &&
|
|
|
|
error.message.match(/Cannot kill container .* is not running/)
|
2020-02-19 06:14:37 -05:00
|
|
|
) {
|
|
|
|
logger.warn(
|
2020-09-03 14:58:37 -04:00
|
|
|
{ err: error, containerId },
|
2020-02-19 06:14:37 -05:00
|
|
|
'container not running, continuing'
|
|
|
|
)
|
|
|
|
error = null
|
|
|
|
}
|
|
|
|
if (error != null) {
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.error({ err: error, containerId }, 'error killing container')
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(error)
|
2020-02-19 06:14:37 -05:00
|
|
|
} else {
|
2020-09-02 16:58:41 -04:00
|
|
|
callback()
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
})
|
|
|
|
},
|
|
|
|
|
|
|
|
_runAndWaitForContainer(options, volumes, timeout, _callback) {
|
2020-09-03 15:52:09 -04:00
|
|
|
const callback = _.once(_callback)
|
2020-02-19 06:14:37 -05:00
|
|
|
const { name } = options
|
|
|
|
|
|
|
|
let streamEnded = false
|
|
|
|
let containerReturned = false
|
|
|
|
let output = {}
|
|
|
|
|
2020-09-03 15:58:16 -04:00
|
|
|
function callbackIfFinished() {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (streamEnded && containerReturned) {
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(null, output)
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-03 15:58:16 -04:00
|
|
|
function attachStreamHandler(error, _output) {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
output = _output
|
|
|
|
streamEnded = true
|
2020-09-02 16:58:41 -04:00
|
|
|
callbackIfFinished()
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
|
2020-09-02 16:58:41 -04:00
|
|
|
DockerRunner.startContainer(
|
2020-02-19 06:14:37 -05:00
|
|
|
options,
|
|
|
|
volumes,
|
|
|
|
attachStreamHandler,
|
2020-09-03 15:58:16 -04:00
|
|
|
(error, containerId) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
|
2020-09-03 15:58:16 -04:00
|
|
|
DockerRunner.waitForContainer(name, timeout, (error, exitCode) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
if (exitCode === 137) {
|
|
|
|
// exit status from kill -9
|
2020-09-03 15:50:12 -04:00
|
|
|
const err = new Error('terminated')
|
2020-02-19 06:14:37 -05:00
|
|
|
err.terminated = true
|
|
|
|
return callback(err)
|
|
|
|
}
|
|
|
|
if (exitCode === 1) {
|
|
|
|
// exit status from chktex
|
2020-09-03 15:50:12 -04:00
|
|
|
const err = new Error('exited')
|
2020-02-19 06:14:37 -05:00
|
|
|
err.code = exitCode
|
|
|
|
return callback(err)
|
|
|
|
}
|
|
|
|
containerReturned = true
|
2020-09-02 17:06:35 -04:00
|
|
|
if (options != null && options.HostConfig != null) {
|
|
|
|
options.HostConfig.SecurityOpt = null
|
|
|
|
}
|
2020-09-03 15:50:12 -04:00
|
|
|
logger.log({ exitCode, options }, 'docker container has exited')
|
2020-09-02 16:58:41 -04:00
|
|
|
callbackIfFinished()
|
2020-02-19 06:14:37 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
)
|
|
|
|
},
|
|
|
|
|
2020-06-11 11:01:44 -04:00
|
|
|
_getContainerOptions(
|
|
|
|
command,
|
|
|
|
image,
|
|
|
|
volumes,
|
|
|
|
timeout,
|
|
|
|
environment,
|
|
|
|
compileGroup
|
|
|
|
) {
|
2020-02-19 06:14:37 -05:00
|
|
|
const timeoutInSeconds = timeout / 1000
|
|
|
|
|
|
|
|
const dockerVolumes = {}
|
2020-09-03 15:50:12 -04:00
|
|
|
for (const hostVol in volumes) {
|
|
|
|
const dockerVol = volumes[hostVol]
|
2020-02-19 06:14:37 -05:00
|
|
|
dockerVolumes[dockerVol] = {}
|
|
|
|
|
|
|
|
if (volumes[hostVol].slice(-3).indexOf(':r') === -1) {
|
|
|
|
volumes[hostVol] = `${dockerVol}:rw`
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// merge settings and environment parameter
|
|
|
|
const env = {}
|
|
|
|
for (const src of [Settings.clsi.docker.env, environment || {}]) {
|
2020-09-03 15:50:12 -04:00
|
|
|
for (const key in src) {
|
|
|
|
const value = src[key]
|
2020-02-19 06:14:37 -05:00
|
|
|
env[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// set the path based on the image year
|
2020-09-03 15:50:12 -04:00
|
|
|
const match = image.match(/:([0-9]+)\.[0-9]+/)
|
|
|
|
const year = match ? match[1] : '2014'
|
2020-02-19 06:14:37 -05:00
|
|
|
env.PATH = `/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/texlive/${year}/bin/x86_64-linux/`
|
|
|
|
const options = {
|
|
|
|
Cmd: command,
|
|
|
|
Image: image,
|
|
|
|
Volumes: dockerVolumes,
|
|
|
|
WorkingDir: '/compile',
|
|
|
|
NetworkDisabled: true,
|
|
|
|
Memory: 1024 * 1024 * 1024 * 1024, // 1 Gb
|
|
|
|
User: Settings.clsi.docker.user,
|
2020-09-02 17:16:24 -04:00
|
|
|
Env: Object.entries(env).map(([key, value]) => `${key}=${value}`),
|
2020-02-19 06:14:37 -05:00
|
|
|
HostConfig: {
|
2020-09-02 17:16:24 -04:00
|
|
|
Binds: Object.entries(volumes).map(
|
|
|
|
([hostVol, dockerVol]) => `${hostVol}:${dockerVol}`
|
|
|
|
),
|
2020-02-19 06:14:37 -05:00
|
|
|
LogConfig: { Type: 'none', Config: {} },
|
|
|
|
Ulimits: [
|
|
|
|
{
|
|
|
|
Name: 'cpu',
|
|
|
|
Soft: timeoutInSeconds + 5,
|
2021-07-13 07:04:48 -04:00
|
|
|
Hard: timeoutInSeconds + 10,
|
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
],
|
|
|
|
CapDrop: 'ALL',
|
2021-07-13 07:04:48 -04:00
|
|
|
SecurityOpt: ['no-new-privileges'],
|
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
|
2020-09-03 14:56:27 -04:00
|
|
|
if (Settings.path != null && Settings.path.synctexBinHostPath != null) {
|
2020-02-19 06:14:37 -05:00
|
|
|
options.HostConfig.Binds.push(
|
|
|
|
`${Settings.path.synctexBinHostPath}:/opt/synctex:ro`
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Settings.clsi.docker.seccomp_profile != null) {
|
|
|
|
options.HostConfig.SecurityOpt.push(
|
|
|
|
`seccomp=${Settings.clsi.docker.seccomp_profile}`
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-07-21 09:53:35 -04:00
|
|
|
if (Settings.clsi.docker.apparmor_profile != null) {
|
|
|
|
options.HostConfig.SecurityOpt.push(
|
|
|
|
`apparmor=${Settings.clsi.docker.apparmor_profile}`
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-04-23 08:56:33 -04:00
|
|
|
if (Settings.clsi.docker.runtime) {
|
|
|
|
options.HostConfig.Runtime = Settings.clsi.docker.runtime
|
|
|
|
}
|
|
|
|
|
2020-06-11 11:01:44 -04:00
|
|
|
if (Settings.clsi.docker.Readonly) {
|
|
|
|
options.HostConfig.ReadonlyRootfs = true
|
|
|
|
options.HostConfig.Tmpfs = { '/tmp': 'rw,noexec,nosuid,size=65536k' }
|
2020-09-03 17:10:24 -04:00
|
|
|
options.Volumes['/home/tex'] = {}
|
2020-06-11 11:01:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allow per-compile group overriding of individual settings
|
|
|
|
if (
|
|
|
|
Settings.clsi.docker.compileGroupConfig &&
|
|
|
|
Settings.clsi.docker.compileGroupConfig[compileGroup]
|
|
|
|
) {
|
|
|
|
const override = Settings.clsi.docker.compileGroupConfig[compileGroup]
|
2020-09-03 15:50:12 -04:00
|
|
|
for (const key in override) {
|
2020-06-11 11:01:44 -04:00
|
|
|
_.set(options, key, override[key])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-19 06:14:37 -05:00
|
|
|
return options
|
|
|
|
},
|
|
|
|
|
|
|
|
_fingerprintContainer(containerOptions) {
|
|
|
|
// Yay, Hashing!
|
|
|
|
const json = JSON.stringify(containerOptions)
|
2020-08-10 12:01:11 -04:00
|
|
|
return crypto.createHash('md5').update(json).digest('hex')
|
2020-02-19 06:14:37 -05:00
|
|
|
},
|
|
|
|
|
|
|
|
startContainer(options, volumes, attachStreamHandler, callback) {
|
2020-09-02 16:58:41 -04:00
|
|
|
LockManager.runWithLock(
|
2020-02-19 06:14:37 -05:00
|
|
|
options.name,
|
2021-07-13 07:04:48 -04:00
|
|
|
releaseLock =>
|
2020-02-19 06:14:37 -05:00
|
|
|
// Check that volumes exist before starting the container.
|
|
|
|
// When a container is started with volume pointing to a
|
|
|
|
// non-existent directory then docker creates the directory but
|
|
|
|
// with root ownership.
|
2021-07-13 07:04:48 -04:00
|
|
|
DockerRunner._checkVolumes(options, volumes, err => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (err != null) {
|
|
|
|
return releaseLock(err)
|
|
|
|
}
|
2020-09-02 16:58:41 -04:00
|
|
|
DockerRunner._startContainer(
|
2020-02-19 06:14:37 -05:00
|
|
|
options,
|
|
|
|
volumes,
|
|
|
|
attachStreamHandler,
|
|
|
|
releaseLock
|
|
|
|
)
|
|
|
|
}),
|
|
|
|
|
|
|
|
callback
|
|
|
|
)
|
|
|
|
},
|
|
|
|
|
|
|
|
// Check that volumes exist and are directories
|
|
|
|
_checkVolumes(options, volumes, callback) {
|
|
|
|
if (usingSiblingContainers()) {
|
|
|
|
// Server Pro, with sibling-containers active, skip checks
|
|
|
|
return callback(null)
|
|
|
|
}
|
|
|
|
|
|
|
|
const checkVolume = (path, cb) =>
|
2020-09-03 15:58:16 -04:00
|
|
|
fs.stat(path, (err, stats) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (err != null) {
|
|
|
|
return cb(err)
|
|
|
|
}
|
2020-09-03 14:56:27 -04:00
|
|
|
if (!stats.isDirectory()) {
|
2020-09-03 15:34:19 -04:00
|
|
|
return cb(new Error('not a directory'))
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
2020-09-02 16:58:41 -04:00
|
|
|
cb()
|
2020-02-19 06:14:37 -05:00
|
|
|
})
|
|
|
|
const jobs = []
|
|
|
|
for (const vol in volumes) {
|
2021-07-13 07:04:48 -04:00
|
|
|
jobs.push(cb => checkVolume(vol, cb))
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
2020-09-02 16:58:41 -04:00
|
|
|
async.series(jobs, callback)
|
2020-02-19 06:14:37 -05:00
|
|
|
},
|
|
|
|
|
|
|
|
_startContainer(options, volumes, attachStreamHandler, callback) {
|
|
|
|
callback = _.once(callback)
|
|
|
|
const { name } = options
|
|
|
|
|
|
|
|
logger.log({ container_name: name }, 'starting container')
|
|
|
|
const container = dockerode.getContainer(name)
|
|
|
|
|
2020-09-03 15:58:16 -04:00
|
|
|
function createAndStartContainer() {
|
|
|
|
dockerode.createContainer(options, (error, container) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
2020-09-02 16:58:41 -04:00
|
|
|
startExistingContainer()
|
2020-02-19 06:14:37 -05:00
|
|
|
})
|
2020-09-03 15:58:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
function startExistingContainer() {
|
2020-02-19 06:14:37 -05:00
|
|
|
DockerRunner.attachToContainer(
|
|
|
|
options.name,
|
|
|
|
attachStreamHandler,
|
2021-07-13 07:04:48 -04:00
|
|
|
error => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
2021-07-13 07:04:48 -04:00
|
|
|
container.start(error => {
|
2020-09-03 14:56:27 -04:00
|
|
|
if (error != null && error.statusCode !== 304) {
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(error)
|
2020-02-19 06:14:37 -05:00
|
|
|
} else {
|
2020-09-03 14:56:27 -04:00
|
|
|
// already running
|
2020-09-02 16:58:41 -04:00
|
|
|
callback()
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
)
|
2020-09-03 15:58:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
container.inspect((error, stats) => {
|
2020-09-03 14:56:27 -04:00
|
|
|
if (error != null && error.statusCode === 404) {
|
2020-09-02 16:58:41 -04:00
|
|
|
createAndStartContainer()
|
2020-06-11 06:50:43 -04:00
|
|
|
} else if (error != null) {
|
|
|
|
logger.err(
|
|
|
|
{ container_name: name, error },
|
|
|
|
'unable to inspect container to start'
|
|
|
|
)
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(error)
|
2020-06-11 06:50:43 -04:00
|
|
|
} else {
|
2020-09-02 16:58:41 -04:00
|
|
|
startExistingContainer()
|
2020-06-11 06:50:43 -04:00
|
|
|
}
|
|
|
|
})
|
2020-02-19 06:14:37 -05:00
|
|
|
},
|
|
|
|
|
|
|
|
attachToContainer(containerId, attachStreamHandler, attachStartCallback) {
|
|
|
|
const container = dockerode.getContainer(containerId)
|
2020-09-03 15:58:16 -04:00
|
|
|
container.attach({ stdout: 1, stderr: 1, stream: 1 }, (error, stream) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
logger.error(
|
2020-09-03 14:58:37 -04:00
|
|
|
{ err: error, containerId },
|
2020-02-19 06:14:37 -05:00
|
|
|
'error attaching to container'
|
|
|
|
)
|
|
|
|
return attachStartCallback(error)
|
|
|
|
} else {
|
|
|
|
attachStartCallback()
|
|
|
|
}
|
|
|
|
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ containerId }, 'attached to container')
|
2020-02-19 06:14:37 -05:00
|
|
|
|
|
|
|
const MAX_OUTPUT = 1024 * 1024 // limit output to 1MB
|
2020-09-03 15:58:16 -04:00
|
|
|
function createStringOutputStream(name) {
|
2020-02-19 06:14:37 -05:00
|
|
|
return {
|
|
|
|
data: '',
|
|
|
|
overflowed: false,
|
|
|
|
write(data) {
|
|
|
|
if (this.overflowed) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if (this.data.length < MAX_OUTPUT) {
|
2020-09-02 16:58:41 -04:00
|
|
|
this.data += data
|
2020-02-19 06:14:37 -05:00
|
|
|
} else {
|
|
|
|
logger.error(
|
|
|
|
{
|
2020-09-03 14:58:37 -04:00
|
|
|
containerId,
|
2020-02-19 06:14:37 -05:00
|
|
|
length: this.data.length,
|
2021-07-13 07:04:48 -04:00
|
|
|
maxLen: MAX_OUTPUT,
|
2020-02-19 06:14:37 -05:00
|
|
|
},
|
|
|
|
`${name} exceeds max size`
|
|
|
|
)
|
|
|
|
this.data += `(...truncated at ${MAX_OUTPUT} chars...)`
|
2020-09-02 16:58:41 -04:00
|
|
|
this.overflowed = true
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
2021-07-13 07:04:48 -04:00
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
// kill container if too much output
|
|
|
|
// docker.containers.kill(containerId, () ->)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const stdout = createStringOutputStream('stdout')
|
|
|
|
const stderr = createStringOutputStream('stderr')
|
|
|
|
|
|
|
|
container.modem.demuxStream(stream, stdout, stderr)
|
|
|
|
|
2021-07-13 07:04:48 -04:00
|
|
|
stream.on('error', err =>
|
2020-02-19 06:14:37 -05:00
|
|
|
logger.error(
|
2020-09-03 14:58:37 -04:00
|
|
|
{ err, containerId },
|
2020-02-19 06:14:37 -05:00
|
|
|
'error reading from container stream'
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2020-09-02 16:58:41 -04:00
|
|
|
stream.on('end', () =>
|
2020-02-19 06:14:37 -05:00
|
|
|
attachStreamHandler(null, { stdout: stdout.data, stderr: stderr.data })
|
|
|
|
)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
|
|
|
|
waitForContainer(containerId, timeout, _callback) {
|
2020-09-03 15:52:09 -04:00
|
|
|
const callback = _.once(_callback)
|
2020-02-19 06:14:37 -05:00
|
|
|
|
|
|
|
const container = dockerode.getContainer(containerId)
|
|
|
|
|
|
|
|
let timedOut = false
|
2020-09-03 15:58:16 -04:00
|
|
|
const timeoutId = setTimeout(() => {
|
2020-02-19 06:14:37 -05:00
|
|
|
timedOut = true
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ containerId }, 'timeout reached, killing container')
|
2021-07-13 07:04:48 -04:00
|
|
|
container.kill(err => {
|
2020-09-03 15:58:16 -04:00
|
|
|
logger.warn({ err, containerId }, 'failed to kill container')
|
|
|
|
})
|
2020-02-19 06:14:37 -05:00
|
|
|
}, timeout)
|
|
|
|
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ containerId }, 'waiting for docker container')
|
2020-09-03 15:58:16 -04:00
|
|
|
container.wait((error, res) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
clearTimeout(timeoutId)
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.error({ err: error, containerId }, 'error waiting for container')
|
2020-02-19 06:14:37 -05:00
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
if (timedOut) {
|
|
|
|
logger.log({ containerId }, 'docker container timed out')
|
2020-09-03 15:34:19 -04:00
|
|
|
error = new Error('container timed out')
|
2020-02-19 06:14:37 -05:00
|
|
|
error.timedout = true
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(error)
|
2020-02-19 06:14:37 -05:00
|
|
|
} else {
|
|
|
|
clearTimeout(timeoutId)
|
|
|
|
logger.log(
|
2020-09-03 14:58:37 -04:00
|
|
|
{ containerId, exitCode: res.StatusCode },
|
2020-02-19 06:14:37 -05:00
|
|
|
'docker container returned'
|
|
|
|
)
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(null, res.StatusCode)
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
})
|
|
|
|
},
|
|
|
|
|
|
|
|
destroyContainer(containerName, containerId, shouldForce, callback) {
|
|
|
|
// We want the containerName for the lock and, ideally, the
|
|
|
|
// containerId to delete. There is a bug in the docker.io module
|
|
|
|
// where if you delete by name and there is an error, it throws an
|
|
|
|
// async exception, but if you delete by id it just does a normal
|
|
|
|
// error callback. We fall back to deleting by name if no id is
|
|
|
|
// supplied.
|
2020-09-02 16:58:41 -04:00
|
|
|
LockManager.runWithLock(
|
2020-02-19 06:14:37 -05:00
|
|
|
containerName,
|
2021-07-13 07:04:48 -04:00
|
|
|
releaseLock =>
|
2020-02-19 06:14:37 -05:00
|
|
|
DockerRunner._destroyContainer(
|
|
|
|
containerId || containerName,
|
|
|
|
shouldForce,
|
|
|
|
releaseLock
|
|
|
|
),
|
|
|
|
callback
|
|
|
|
)
|
|
|
|
},
|
|
|
|
|
|
|
|
_destroyContainer(containerId, shouldForce, callback) {
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ containerId }, 'destroying docker container')
|
2020-02-19 06:14:37 -05:00
|
|
|
const container = dockerode.getContainer(containerId)
|
2021-07-13 07:04:48 -04:00
|
|
|
container.remove({ force: shouldForce === true, v: true }, error => {
|
2020-09-03 14:56:27 -04:00
|
|
|
if (error != null && error.statusCode === 404) {
|
2020-02-19 06:14:37 -05:00
|
|
|
logger.warn(
|
2020-09-03 14:58:37 -04:00
|
|
|
{ err: error, containerId },
|
2020-02-19 06:14:37 -05:00
|
|
|
'container not found, continuing'
|
|
|
|
)
|
|
|
|
error = null
|
|
|
|
}
|
|
|
|
if (error != null) {
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.error({ err: error, containerId }, 'error destroying container')
|
2020-02-19 06:14:37 -05:00
|
|
|
} else {
|
2020-09-03 14:58:37 -04:00
|
|
|
logger.log({ containerId }, 'destroyed container')
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
2020-09-02 16:58:41 -04:00
|
|
|
callback(error)
|
2020-02-19 06:14:37 -05:00
|
|
|
})
|
|
|
|
},
|
|
|
|
|
|
|
|
// handle expiry of docker containers
|
|
|
|
|
2020-09-03 15:30:07 -04:00
|
|
|
MAX_CONTAINER_AGE: Settings.clsi.docker.maxContainerAge || ONE_HOUR_IN_MS,
|
2020-02-19 06:14:37 -05:00
|
|
|
|
|
|
|
examineOldContainer(container, callback) {
|
2020-09-03 14:56:27 -04:00
|
|
|
const name = container.Name || (container.Names && container.Names[0])
|
2020-02-19 06:14:37 -05:00
|
|
|
const created = container.Created * 1000 // creation time is returned in seconds
|
|
|
|
const now = Date.now()
|
|
|
|
const age = now - created
|
|
|
|
const maxAge = DockerRunner.MAX_CONTAINER_AGE
|
|
|
|
const ttl = maxAge - age
|
|
|
|
logger.log(
|
|
|
|
{ containerName: name, created, now, age, maxAge, ttl },
|
|
|
|
'checking whether to destroy container'
|
|
|
|
)
|
2020-09-03 15:05:13 -04:00
|
|
|
return { name, id: container.Id, ttl }
|
2020-02-19 06:14:37 -05:00
|
|
|
},
|
|
|
|
|
|
|
|
destroyOldContainers(callback) {
|
2020-09-03 15:58:16 -04:00
|
|
|
dockerode.listContainers({ all: true }, (error, containers) => {
|
2020-02-19 06:14:37 -05:00
|
|
|
if (error != null) {
|
|
|
|
return callback(error)
|
|
|
|
}
|
|
|
|
const jobs = []
|
2020-09-02 16:45:49 -04:00
|
|
|
for (const container of containers) {
|
2020-09-03 15:05:13 -04:00
|
|
|
const { name, id, ttl } = DockerRunner.examineOldContainer(container)
|
|
|
|
if (name.slice(0, 9) === '/project-' && ttl <= 0) {
|
|
|
|
// strip the / prefix
|
|
|
|
// the LockManager uses the plain container name
|
|
|
|
const plainName = name.slice(1)
|
2021-07-13 07:04:48 -04:00
|
|
|
jobs.push(cb =>
|
2020-09-03 15:05:13 -04:00
|
|
|
DockerRunner.destroyContainer(plainName, id, false, () => cb())
|
|
|
|
)
|
|
|
|
}
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
// Ignore errors because some containers get stuck but
|
|
|
|
// will be destroyed next time
|
2020-09-02 16:58:41 -04:00
|
|
|
async.series(jobs, callback)
|
2020-02-19 06:14:37 -05:00
|
|
|
})
|
|
|
|
},
|
|
|
|
|
|
|
|
startContainerMonitor() {
|
|
|
|
logger.log(
|
|
|
|
{ maxAge: DockerRunner.MAX_CONTAINER_AGE },
|
|
|
|
'starting container expiry'
|
|
|
|
)
|
2020-02-20 11:24:28 -05:00
|
|
|
|
|
|
|
// guarantee only one monitor is running
|
|
|
|
DockerRunner.stopContainerMonitor()
|
|
|
|
|
2020-02-19 06:14:37 -05:00
|
|
|
// randomise the start time
|
|
|
|
const randomDelay = Math.floor(Math.random() * 5 * 60 * 1000)
|
2020-02-20 11:24:28 -05:00
|
|
|
containerMonitorTimeout = setTimeout(() => {
|
|
|
|
containerMonitorInterval = setInterval(
|
2020-09-03 14:43:04 -04:00
|
|
|
() =>
|
2021-07-13 07:04:48 -04:00
|
|
|
DockerRunner.destroyOldContainers(err => {
|
2020-09-03 14:43:04 -04:00
|
|
|
if (err) {
|
|
|
|
logger.error({ err }, 'failed to destroy old containers')
|
|
|
|
}
|
|
|
|
}),
|
2020-09-03 15:30:07 -04:00
|
|
|
ONE_HOUR_IN_MS
|
2020-02-20 11:24:28 -05:00
|
|
|
)
|
|
|
|
}, randomDelay)
|
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
|
2020-02-20 11:24:28 -05:00
|
|
|
stopContainerMonitor() {
|
|
|
|
if (containerMonitorTimeout) {
|
|
|
|
clearTimeout(containerMonitorTimeout)
|
|
|
|
containerMonitorTimeout = undefined
|
|
|
|
}
|
|
|
|
if (containerMonitorInterval) {
|
2020-09-03 15:50:45 -04:00
|
|
|
clearInterval(containerMonitorInterval)
|
|
|
|
containerMonitorInterval = undefined
|
2020-02-20 11:24:28 -05:00
|
|
|
}
|
2021-07-13 07:04:48 -04:00
|
|
|
},
|
2020-02-19 06:14:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
DockerRunner.startContainerMonitor()
|
2020-09-03 15:50:12 -04:00
|
|
|
|
|
|
|
module.exports = DockerRunner
|