mirror of
https://github.com/overleaf/overleaf.git
synced 2025-04-05 07:10:55 +00:00
prettier: convert individual decaffeinated files to Prettier format
This commit is contained in:
parent
bdfca5f155
commit
92dede867f
2 changed files with 322 additions and 241 deletions
|
@ -5,214 +5,249 @@
|
|||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const Metrics = require("metrics-sharelatex");
|
||||
const Settings = require("settings-sharelatex");
|
||||
Metrics.initialize(Settings.appName || "real-time");
|
||||
const async = require("async");
|
||||
const _ = require("underscore");
|
||||
const Metrics = require('metrics-sharelatex')
|
||||
const Settings = require('settings-sharelatex')
|
||||
Metrics.initialize(Settings.appName || 'real-time')
|
||||
const async = require('async')
|
||||
const _ = require('underscore')
|
||||
|
||||
const logger = require("logger-sharelatex");
|
||||
logger.initialize("real-time");
|
||||
Metrics.event_loop.monitor(logger);
|
||||
const logger = require('logger-sharelatex')
|
||||
logger.initialize('real-time')
|
||||
Metrics.event_loop.monitor(logger)
|
||||
|
||||
const express = require("express");
|
||||
const session = require("express-session");
|
||||
const redis = require("redis-sharelatex");
|
||||
const express = require('express')
|
||||
const session = require('express-session')
|
||||
const redis = require('redis-sharelatex')
|
||||
if ((Settings.sentry != null ? Settings.sentry.dsn : undefined) != null) {
|
||||
logger.initializeErrorReporting(Settings.sentry.dsn);
|
||||
logger.initializeErrorReporting(Settings.sentry.dsn)
|
||||
}
|
||||
|
||||
const sessionRedisClient = redis.createClient(Settings.redis.websessions);
|
||||
const sessionRedisClient = redis.createClient(Settings.redis.websessions)
|
||||
|
||||
const RedisStore = require('connect-redis')(session);
|
||||
const SessionSockets = require('./app/js/SessionSockets');
|
||||
const CookieParser = require("cookie-parser");
|
||||
const RedisStore = require('connect-redis')(session)
|
||||
const SessionSockets = require('./app/js/SessionSockets')
|
||||
const CookieParser = require('cookie-parser')
|
||||
|
||||
const DrainManager = require("./app/js/DrainManager");
|
||||
const HealthCheckManager = require("./app/js/HealthCheckManager");
|
||||
const DrainManager = require('./app/js/DrainManager')
|
||||
const HealthCheckManager = require('./app/js/HealthCheckManager')
|
||||
|
||||
// work around frame handler bug in socket.io v0.9.16
|
||||
require("./socket.io.patch.js");
|
||||
require('./socket.io.patch.js')
|
||||
// Set up socket.io server
|
||||
const app = express();
|
||||
const app = express()
|
||||
|
||||
const server = require('http').createServer(app);
|
||||
const io = require('socket.io').listen(server);
|
||||
const server = require('http').createServer(app)
|
||||
const io = require('socket.io').listen(server)
|
||||
|
||||
// Bind to sessions
|
||||
const sessionStore = new RedisStore({client: sessionRedisClient});
|
||||
const cookieParser = CookieParser(Settings.security.sessionSecret);
|
||||
const sessionStore = new RedisStore({ client: sessionRedisClient })
|
||||
const cookieParser = CookieParser(Settings.security.sessionSecret)
|
||||
|
||||
const sessionSockets = new SessionSockets(io, sessionStore, cookieParser, Settings.cookieName);
|
||||
const sessionSockets = new SessionSockets(
|
||||
io,
|
||||
sessionStore,
|
||||
cookieParser,
|
||||
Settings.cookieName
|
||||
)
|
||||
|
||||
Metrics.injectMetricsRoute(app);
|
||||
app.use(Metrics.http.monitor(logger));
|
||||
Metrics.injectMetricsRoute(app)
|
||||
app.use(Metrics.http.monitor(logger))
|
||||
|
||||
io.configure(function() {
|
||||
io.enable('browser client minification');
|
||||
io.enable('browser client etag');
|
||||
io.configure(function () {
|
||||
io.enable('browser client minification')
|
||||
io.enable('browser client etag')
|
||||
|
||||
// Fix for Safari 5 error of "Error during WebSocket handshake: location mismatch"
|
||||
// See http://answers.dotcloud.com/question/578/problem-with-websocket-over-ssl-in-safari-with
|
||||
io.set('match origin protocol', true);
|
||||
// Fix for Safari 5 error of "Error during WebSocket handshake: location mismatch"
|
||||
// See http://answers.dotcloud.com/question/578/problem-with-websocket-over-ssl-in-safari-with
|
||||
io.set('match origin protocol', true)
|
||||
|
||||
// gzip uses a Node 0.8.x method of calling the gzip program which
|
||||
// doesn't work with 0.6.x
|
||||
//io.enable('browser client gzip')
|
||||
io.set('transports', ['websocket', 'flashsocket', 'htmlfile', 'xhr-polling', 'jsonp-polling']);
|
||||
return io.set('log level', 1);
|
||||
});
|
||||
// gzip uses a Node 0.8.x method of calling the gzip program which
|
||||
// doesn't work with 0.6.x
|
||||
// io.enable('browser client gzip')
|
||||
io.set('transports', [
|
||||
'websocket',
|
||||
'flashsocket',
|
||||
'htmlfile',
|
||||
'xhr-polling',
|
||||
'jsonp-polling'
|
||||
])
|
||||
return io.set('log level', 1)
|
||||
})
|
||||
|
||||
app.get("/", (req, res, next) => res.send("real-time-sharelatex is alive"));
|
||||
app.get('/', (req, res, next) => res.send('real-time-sharelatex is alive'))
|
||||
|
||||
app.get("/status", function(req, res, next) {
|
||||
if (Settings.shutDownInProgress) {
|
||||
return res.send(503); // Service unavailable
|
||||
} else {
|
||||
return res.send("real-time-sharelatex is alive");
|
||||
}
|
||||
});
|
||||
app.get('/status', function (req, res, next) {
|
||||
if (Settings.shutDownInProgress) {
|
||||
return res.send(503) // Service unavailable
|
||||
} else {
|
||||
return res.send('real-time-sharelatex is alive')
|
||||
}
|
||||
})
|
||||
|
||||
app.get("/debug/events", function(req, res, next) {
|
||||
Settings.debugEvents = parseInt(req.query != null ? req.query.count : undefined,10) || 20;
|
||||
logger.log({count: Settings.debugEvents}, "starting debug mode");
|
||||
return res.send(`debug mode will log next ${Settings.debugEvents} events`);
|
||||
});
|
||||
app.get('/debug/events', function (req, res, next) {
|
||||
Settings.debugEvents =
|
||||
parseInt(req.query != null ? req.query.count : undefined, 10) || 20
|
||||
logger.log({ count: Settings.debugEvents }, 'starting debug mode')
|
||||
return res.send(`debug mode will log next ${Settings.debugEvents} events`)
|
||||
})
|
||||
|
||||
const rclient = require("redis-sharelatex").createClient(Settings.redis.realtime);
|
||||
const rclient = require('redis-sharelatex').createClient(
|
||||
Settings.redis.realtime
|
||||
)
|
||||
|
||||
const healthCheck = (req, res, next) => rclient.healthCheck(function(error) {
|
||||
const healthCheck = (req, res, next) =>
|
||||
rclient.healthCheck(function (error) {
|
||||
if (error != null) {
|
||||
logger.err({err: error}, "failed redis health check");
|
||||
return res.sendStatus(500);
|
||||
logger.err({ err: error }, 'failed redis health check')
|
||||
return res.sendStatus(500)
|
||||
} else if (HealthCheckManager.isFailing()) {
|
||||
const status = HealthCheckManager.status();
|
||||
logger.err({pubSubErrors: status}, "failed pubsub health check");
|
||||
return res.sendStatus(500);
|
||||
const status = HealthCheckManager.status()
|
||||
logger.err({ pubSubErrors: status }, 'failed pubsub health check')
|
||||
return res.sendStatus(500)
|
||||
} else {
|
||||
return res.sendStatus(200);
|
||||
return res.sendStatus(200)
|
||||
}
|
||||
});
|
||||
})
|
||||
|
||||
app.get("/health_check", healthCheck);
|
||||
app.get('/health_check', healthCheck)
|
||||
|
||||
app.get("/health_check/redis", healthCheck);
|
||||
app.get('/health_check/redis', healthCheck)
|
||||
|
||||
const Router = require('./app/js/Router')
|
||||
Router.configure(app, io, sessionSockets)
|
||||
|
||||
const WebsocketLoadBalancer = require('./app/js/WebsocketLoadBalancer')
|
||||
WebsocketLoadBalancer.listenForEditorEvents(io)
|
||||
|
||||
const Router = require("./app/js/Router");
|
||||
Router.configure(app, io, sessionSockets);
|
||||
const DocumentUpdaterController = require('./app/js/DocumentUpdaterController')
|
||||
DocumentUpdaterController.listenForUpdatesFromDocumentUpdater(io)
|
||||
|
||||
const WebsocketLoadBalancer = require("./app/js/WebsocketLoadBalancer");
|
||||
WebsocketLoadBalancer.listenForEditorEvents(io);
|
||||
const { port } = Settings.internal.realTime
|
||||
const { host } = Settings.internal.realTime
|
||||
|
||||
const DocumentUpdaterController = require("./app/js/DocumentUpdaterController");
|
||||
DocumentUpdaterController.listenForUpdatesFromDocumentUpdater(io);
|
||||
|
||||
const {
|
||||
port
|
||||
} = Settings.internal.realTime;
|
||||
const {
|
||||
host
|
||||
} = Settings.internal.realTime;
|
||||
|
||||
server.listen(port, host, function(error) {
|
||||
if (error != null) { throw error; }
|
||||
return logger.info(`realtime starting up, listening on ${host}:${port}`);
|
||||
});
|
||||
server.listen(port, host, function (error) {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return logger.info(`realtime starting up, listening on ${host}:${port}`)
|
||||
})
|
||||
|
||||
// Stop huge stack traces in logs from all the socket.io parsing steps.
|
||||
Error.stackTraceLimit = 10;
|
||||
Error.stackTraceLimit = 10
|
||||
|
||||
var shutdownCleanly = function (signal) {
|
||||
const connectedClients = __guard__(io.sockets.clients(), (x) => x.length)
|
||||
if (connectedClients === 0) {
|
||||
logger.warn('no clients connected, exiting')
|
||||
return process.exit()
|
||||
} else {
|
||||
logger.warn(
|
||||
{ connectedClients },
|
||||
'clients still connected, not shutting down yet'
|
||||
)
|
||||
return setTimeout(() => shutdownCleanly(signal), 30 * 1000)
|
||||
}
|
||||
}
|
||||
|
||||
var shutdownCleanly = function(signal) {
|
||||
const connectedClients = __guard__(io.sockets.clients(), x => x.length);
|
||||
if (connectedClients === 0) {
|
||||
logger.warn("no clients connected, exiting");
|
||||
return process.exit();
|
||||
} else {
|
||||
logger.warn({connectedClients}, "clients still connected, not shutting down yet");
|
||||
return setTimeout(() => shutdownCleanly(signal)
|
||||
, 30 * 1000);
|
||||
}
|
||||
};
|
||||
const drainAndShutdown = function (signal) {
|
||||
if (Settings.shutDownInProgress) {
|
||||
logger.warn({ signal }, 'shutdown already in progress, ignoring signal')
|
||||
} else {
|
||||
Settings.shutDownInProgress = true
|
||||
const { statusCheckInterval } = Settings
|
||||
if (statusCheckInterval) {
|
||||
logger.warn(
|
||||
{ signal },
|
||||
`received interrupt, delay drain by ${statusCheckInterval}ms`
|
||||
)
|
||||
}
|
||||
return setTimeout(function () {
|
||||
logger.warn(
|
||||
{ signal },
|
||||
`received interrupt, starting drain over ${shutdownDrainTimeWindow} mins`
|
||||
)
|
||||
DrainManager.startDrainTimeWindow(io, shutdownDrainTimeWindow)
|
||||
return shutdownCleanly(signal)
|
||||
}, statusCheckInterval)
|
||||
}
|
||||
}
|
||||
|
||||
const drainAndShutdown = function(signal) {
|
||||
if (Settings.shutDownInProgress) {
|
||||
logger.warn({signal}, "shutdown already in progress, ignoring signal");
|
||||
return;
|
||||
} else {
|
||||
Settings.shutDownInProgress = true;
|
||||
const {
|
||||
statusCheckInterval
|
||||
} = Settings;
|
||||
if (statusCheckInterval) {
|
||||
logger.warn({signal}, `received interrupt, delay drain by ${statusCheckInterval}ms`);
|
||||
}
|
||||
return setTimeout(function() {
|
||||
logger.warn({signal}, `received interrupt, starting drain over ${shutdownDrainTimeWindow} mins`);
|
||||
DrainManager.startDrainTimeWindow(io, shutdownDrainTimeWindow);
|
||||
return shutdownCleanly(signal);
|
||||
}
|
||||
, statusCheckInterval);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Settings.shutDownInProgress = false;
|
||||
Settings.shutDownInProgress = false
|
||||
if (Settings.shutdownDrainTimeWindow != null) {
|
||||
var shutdownDrainTimeWindow = parseInt(Settings.shutdownDrainTimeWindow, 10);
|
||||
logger.log({shutdownDrainTimeWindow},"shutdownDrainTimeWindow enabled");
|
||||
for (let signal of ['SIGINT', 'SIGHUP', 'SIGQUIT', 'SIGUSR1', 'SIGUSR2', 'SIGTERM', 'SIGABRT']) {
|
||||
process.on(signal, drainAndShutdown);
|
||||
} // signal is passed as argument to event handler
|
||||
var shutdownDrainTimeWindow = parseInt(Settings.shutdownDrainTimeWindow, 10)
|
||||
logger.log({ shutdownDrainTimeWindow }, 'shutdownDrainTimeWindow enabled')
|
||||
for (const signal of [
|
||||
'SIGINT',
|
||||
'SIGHUP',
|
||||
'SIGQUIT',
|
||||
'SIGUSR1',
|
||||
'SIGUSR2',
|
||||
'SIGTERM',
|
||||
'SIGABRT'
|
||||
]) {
|
||||
process.on(signal, drainAndShutdown)
|
||||
} // signal is passed as argument to event handler
|
||||
|
||||
// global exception handler
|
||||
if (Settings.errors != null ? Settings.errors.catchUncaughtErrors : undefined) {
|
||||
process.removeAllListeners('uncaughtException');
|
||||
process.on('uncaughtException', function(error) {
|
||||
if (['EPIPE', 'ECONNRESET'].includes(error.code)) {
|
||||
Metrics.inc('disconnected_write', 1, {status: error.code});
|
||||
return logger.warn({err: error}, 'attempted to write to disconnected client');
|
||||
}
|
||||
logger.error({err: error}, 'uncaught exception');
|
||||
if (Settings.errors != null ? Settings.errors.shutdownOnUncaughtError : undefined) {
|
||||
return drainAndShutdown('SIGABRT');
|
||||
}
|
||||
});
|
||||
}
|
||||
// global exception handler
|
||||
if (
|
||||
Settings.errors != null ? Settings.errors.catchUncaughtErrors : undefined
|
||||
) {
|
||||
process.removeAllListeners('uncaughtException')
|
||||
process.on('uncaughtException', function (error) {
|
||||
if (['EPIPE', 'ECONNRESET'].includes(error.code)) {
|
||||
Metrics.inc('disconnected_write', 1, { status: error.code })
|
||||
return logger.warn(
|
||||
{ err: error },
|
||||
'attempted to write to disconnected client'
|
||||
)
|
||||
}
|
||||
logger.error({ err: error }, 'uncaught exception')
|
||||
if (
|
||||
Settings.errors != null
|
||||
? Settings.errors.shutdownOnUncaughtError
|
||||
: undefined
|
||||
) {
|
||||
return drainAndShutdown('SIGABRT')
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (Settings.continualPubsubTraffic) {
|
||||
console.log("continualPubsubTraffic enabled");
|
||||
console.log('continualPubsubTraffic enabled')
|
||||
|
||||
const pubsubClient = redis.createClient(Settings.redis.pubsub);
|
||||
const clusterClient = redis.createClient(Settings.redis.websessions);
|
||||
const pubsubClient = redis.createClient(Settings.redis.pubsub)
|
||||
const clusterClient = redis.createClient(Settings.redis.websessions)
|
||||
|
||||
const publishJob = function(channel, callback){
|
||||
const checker = new HealthCheckManager(channel);
|
||||
logger.debug({channel}, "sending pub to keep connection alive");
|
||||
const json = JSON.stringify({health_check:true, key: checker.id, date: new Date().toString()});
|
||||
Metrics.summary(`redis.publish.${channel}`, json.length);
|
||||
return pubsubClient.publish(channel, json, function(err){
|
||||
if (err != null) {
|
||||
logger.err({err, channel}, "error publishing pubsub traffic to redis");
|
||||
}
|
||||
const blob = JSON.stringify({keep: "alive"});
|
||||
Metrics.summary("redis.publish.cluster-continual-traffic", blob.length);
|
||||
return clusterClient.publish("cluster-continual-traffic", blob, callback);
|
||||
});
|
||||
};
|
||||
const publishJob = function (channel, callback) {
|
||||
const checker = new HealthCheckManager(channel)
|
||||
logger.debug({ channel }, 'sending pub to keep connection alive')
|
||||
const json = JSON.stringify({
|
||||
health_check: true,
|
||||
key: checker.id,
|
||||
date: new Date().toString()
|
||||
})
|
||||
Metrics.summary(`redis.publish.${channel}`, json.length)
|
||||
return pubsubClient.publish(channel, json, function (err) {
|
||||
if (err != null) {
|
||||
logger.err({ err, channel }, 'error publishing pubsub traffic to redis')
|
||||
}
|
||||
const blob = JSON.stringify({ keep: 'alive' })
|
||||
Metrics.summary('redis.publish.cluster-continual-traffic', blob.length)
|
||||
return clusterClient.publish('cluster-continual-traffic', blob, callback)
|
||||
})
|
||||
}
|
||||
|
||||
var runPubSubTraffic = () =>
|
||||
async.map(['applied-ops', 'editor-events'], publishJob, (err) =>
|
||||
setTimeout(runPubSubTraffic, 1000 * 20)
|
||||
)
|
||||
|
||||
var runPubSubTraffic = () => async.map(["applied-ops", "editor-events"], publishJob, err => setTimeout(runPubSubTraffic, 1000 * 20));
|
||||
|
||||
runPubSubTraffic();
|
||||
runPubSubTraffic()
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined;
|
||||
}
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
||||
|
|
|
@ -1,95 +1,141 @@
|
|||
const settings = {
|
||||
redis: {
|
||||
redis: {
|
||||
pubsub: {
|
||||
host:
|
||||
process.env.PUBSUB_REDIS_HOST || process.env.REDIS_HOST || 'localhost',
|
||||
port: process.env.PUBSUB_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
password:
|
||||
process.env.PUBSUB_REDIS_PASSWORD || process.env.REDIS_PASSWORD || '',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.PUBSUB_REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
'20'
|
||||
)
|
||||
},
|
||||
|
||||
pubsub: {
|
||||
host: process.env['PUBSUB_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
|
||||
port: process.env['PUBSUB_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
|
||||
password: process.env["PUBSUB_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
|
||||
maxRetriesPerRequest: parseInt(process.env["PUBSUB_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
|
||||
},
|
||||
realtime: {
|
||||
host:
|
||||
process.env.REAL_TIME_REDIS_HOST ||
|
||||
process.env.REDIS_HOST ||
|
||||
'localhost',
|
||||
port:
|
||||
process.env.REAL_TIME_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
password:
|
||||
process.env.REAL_TIME_REDIS_PASSWORD ||
|
||||
process.env.REDIS_PASSWORD ||
|
||||
'',
|
||||
key_schema: {
|
||||
clientsInProject({ project_id }) {
|
||||
return `clients_in_project:{${project_id}}`
|
||||
},
|
||||
connectedUser({ project_id, client_id }) {
|
||||
return `connected_user:{${project_id}}:${client_id}`
|
||||
}
|
||||
},
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REAL_TIME_REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
'20'
|
||||
)
|
||||
},
|
||||
|
||||
realtime: {
|
||||
host: process.env['REAL_TIME_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
|
||||
port: process.env['REAL_TIME_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
|
||||
password: process.env["REAL_TIME_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
|
||||
key_schema: {
|
||||
clientsInProject({project_id}) { return `clients_in_project:{${project_id}}`; },
|
||||
connectedUser({project_id, client_id}){ return `connected_user:{${project_id}}:${client_id}`; }
|
||||
},
|
||||
maxRetriesPerRequest: parseInt(process.env["REAL_TIME_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
|
||||
},
|
||||
documentupdater: {
|
||||
host:
|
||||
process.env.DOC_UPDATER_REDIS_HOST ||
|
||||
process.env.REDIS_HOST ||
|
||||
'localhost',
|
||||
port:
|
||||
process.env.DOC_UPDATER_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
password:
|
||||
process.env.DOC_UPDATER_REDIS_PASSWORD ||
|
||||
process.env.REDIS_PASSWORD ||
|
||||
'',
|
||||
key_schema: {
|
||||
pendingUpdates({ doc_id }) {
|
||||
return `PendingUpdates:{${doc_id}}`
|
||||
}
|
||||
},
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.DOC_UPDATER_REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
'20'
|
||||
)
|
||||
},
|
||||
|
||||
documentupdater: {
|
||||
host: process.env['DOC_UPDATER_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
|
||||
port: process.env['DOC_UPDATER_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
|
||||
password: process.env["DOC_UPDATER_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
|
||||
key_schema: {
|
||||
pendingUpdates({doc_id}) { return `PendingUpdates:{${doc_id}}`; }
|
||||
},
|
||||
maxRetriesPerRequest: parseInt(process.env["DOC_UPDATER_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
|
||||
},
|
||||
websessions: {
|
||||
host: process.env.WEB_REDIS_HOST || process.env.REDIS_HOST || 'localhost',
|
||||
port: process.env.WEB_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
password:
|
||||
process.env.WEB_REDIS_PASSWORD || process.env.REDIS_PASSWORD || '',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.WEB_REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST ||
|
||||
'20'
|
||||
)
|
||||
}
|
||||
},
|
||||
|
||||
websessions: {
|
||||
host: process.env['WEB_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
|
||||
port: process.env['WEB_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
|
||||
password: process.env["WEB_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
|
||||
maxRetriesPerRequest: parseInt(process.env["WEB_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
|
||||
}
|
||||
},
|
||||
internal: {
|
||||
realTime: {
|
||||
port: 3026,
|
||||
host: process.env.LISTEN_ADDRESS || 'localhost',
|
||||
user: 'sharelatex',
|
||||
pass: 'password'
|
||||
}
|
||||
},
|
||||
|
||||
internal: {
|
||||
realTime: {
|
||||
port: 3026,
|
||||
host: process.env['LISTEN_ADDRESS'] || "localhost",
|
||||
user: "sharelatex",
|
||||
pass: "password"
|
||||
}
|
||||
},
|
||||
|
||||
apis: {
|
||||
web: {
|
||||
url: `http://${process.env['WEB_API_HOST'] || process.env['WEB_HOST'] || "localhost"}:${process.env['WEB_API_PORT'] || process.env['WEB_PORT'] || 3000}`,
|
||||
user: process.env['WEB_API_USER'] || "sharelatex",
|
||||
pass: process.env['WEB_API_PASSWORD'] || "password"
|
||||
},
|
||||
documentupdater: {
|
||||
url: `http://${process.env['DOCUMENT_UPDATER_HOST'] || process.env['DOCUPDATER_HOST'] || "localhost"}:3003`
|
||||
}
|
||||
},
|
||||
|
||||
security: {
|
||||
sessionSecret: process.env['SESSION_SECRET'] || "secret-please-change"
|
||||
},
|
||||
|
||||
cookieName: process.env['COOKIE_NAME'] || "sharelatex.sid",
|
||||
|
||||
max_doc_length: 2 * 1024 * 1024, // 2mb
|
||||
apis: {
|
||||
web: {
|
||||
url: `http://${
|
||||
process.env.WEB_API_HOST || process.env.WEB_HOST || 'localhost'
|
||||
}:${process.env.WEB_API_PORT || process.env.WEB_PORT || 3000}`,
|
||||
user: process.env.WEB_API_USER || 'sharelatex',
|
||||
pass: process.env.WEB_API_PASSWORD || 'password'
|
||||
},
|
||||
documentupdater: {
|
||||
url: `http://${
|
||||
process.env.DOCUMENT_UPDATER_HOST ||
|
||||
process.env.DOCUPDATER_HOST ||
|
||||
'localhost'
|
||||
}:3003`
|
||||
}
|
||||
},
|
||||
|
||||
// combine
|
||||
// max_doc_length (2mb see above) * 2 (delete + insert)
|
||||
// max_ranges_size (3mb see MAX_RANGES_SIZE in document-updater)
|
||||
// overhead for JSON serialization
|
||||
maxUpdateSize: parseInt(process.env['MAX_UPDATE_SIZE']) || ((7 * 1024 * 1024) + (64 * 1024)),
|
||||
security: {
|
||||
sessionSecret: process.env.SESSION_SECRET || 'secret-please-change'
|
||||
},
|
||||
|
||||
shutdownDrainTimeWindow: process.env['SHUTDOWN_DRAIN_TIME_WINDOW'] || 9,
|
||||
cookieName: process.env.COOKIE_NAME || 'sharelatex.sid',
|
||||
|
||||
continualPubsubTraffic: process.env['CONTINUAL_PUBSUB_TRAFFIC'] || false,
|
||||
max_doc_length: 2 * 1024 * 1024, // 2mb
|
||||
|
||||
checkEventOrder: process.env['CHECK_EVENT_ORDER'] || false,
|
||||
|
||||
publishOnIndividualChannels: process.env['PUBLISH_ON_INDIVIDUAL_CHANNELS'] || false,
|
||||
// combine
|
||||
// max_doc_length (2mb see above) * 2 (delete + insert)
|
||||
// max_ranges_size (3mb see MAX_RANGES_SIZE in document-updater)
|
||||
// overhead for JSON serialization
|
||||
maxUpdateSize:
|
||||
parseInt(process.env.MAX_UPDATE_SIZE) || 7 * 1024 * 1024 + 64 * 1024,
|
||||
|
||||
statusCheckInterval: parseInt(process.env['STATUS_CHECK_INTERVAL'] || '0'),
|
||||
shutdownDrainTimeWindow: process.env.SHUTDOWN_DRAIN_TIME_WINDOW || 9,
|
||||
|
||||
sentry: {
|
||||
dsn: process.env.SENTRY_DSN
|
||||
},
|
||||
continualPubsubTraffic: process.env.CONTINUAL_PUBSUB_TRAFFIC || false,
|
||||
|
||||
checkEventOrder: process.env.CHECK_EVENT_ORDER || false,
|
||||
|
||||
publishOnIndividualChannels:
|
||||
process.env.PUBLISH_ON_INDIVIDUAL_CHANNELS || false,
|
||||
|
||||
statusCheckInterval: parseInt(process.env.STATUS_CHECK_INTERVAL || '0'),
|
||||
|
||||
sentry: {
|
||||
dsn: process.env.SENTRY_DSN
|
||||
},
|
||||
|
||||
errors: {
|
||||
catchUncaughtErrors: true,
|
||||
shutdownOnUncaughtError: true
|
||||
}
|
||||
}
|
||||
|
||||
errors: {
|
||||
catchUncaughtErrors: true,
|
||||
shutdownOnUncaughtError: true
|
||||
}
|
||||
};
|
||||
|
||||
// console.log settings.redis
|
||||
module.exports = settings;
|
||||
module.exports = settings
|
||||
|
|
Loading…
Reference in a new issue