decaffeinate: convert individual files to js

This commit is contained in:
decaffeinate 2020-06-23 18:30:48 +01:00 committed by Jakob Ackermann
parent 42f55c4651
commit bdfca5f155
2 changed files with 249 additions and 197 deletions

View file

@ -1,182 +1,218 @@
Metrics = require("metrics-sharelatex")
Settings = require "settings-sharelatex"
Metrics.initialize(Settings.appName or "real-time")
async = require("async")
_ = require "underscore"
/*
* decaffeinate suggestions:
* DS102: Remove unnecessary code created because of implicit returns
* DS103: Rewrite code to no longer use __guard__
* DS207: Consider shorter variations of null checks
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
*/
const Metrics = require("metrics-sharelatex");
const Settings = require("settings-sharelatex");
Metrics.initialize(Settings.appName || "real-time");
const async = require("async");
const _ = require("underscore");
logger = require "logger-sharelatex"
logger.initialize("real-time")
Metrics.event_loop.monitor(logger)
const logger = require("logger-sharelatex");
logger.initialize("real-time");
Metrics.event_loop.monitor(logger);
express = require("express")
session = require("express-session")
redis = require("redis-sharelatex")
if Settings.sentry?.dsn?
logger.initializeErrorReporting(Settings.sentry.dsn)
const express = require("express");
const session = require("express-session");
const redis = require("redis-sharelatex");
if ((Settings.sentry != null ? Settings.sentry.dsn : undefined) != null) {
logger.initializeErrorReporting(Settings.sentry.dsn);
}
sessionRedisClient = redis.createClient(Settings.redis.websessions)
const sessionRedisClient = redis.createClient(Settings.redis.websessions);
RedisStore = require('connect-redis')(session)
SessionSockets = require('./app/js/SessionSockets')
CookieParser = require("cookie-parser")
const RedisStore = require('connect-redis')(session);
const SessionSockets = require('./app/js/SessionSockets');
const CookieParser = require("cookie-parser");
DrainManager = require("./app/js/DrainManager")
HealthCheckManager = require("./app/js/HealthCheckManager")
const DrainManager = require("./app/js/DrainManager");
const HealthCheckManager = require("./app/js/HealthCheckManager");
# work around frame handler bug in socket.io v0.9.16
require("./socket.io.patch.js")
# Set up socket.io server
app = express()
// work around frame handler bug in socket.io v0.9.16
require("./socket.io.patch.js");
// Set up socket.io server
const app = express();
server = require('http').createServer(app)
io = require('socket.io').listen(server)
const server = require('http').createServer(app);
const io = require('socket.io').listen(server);
# Bind to sessions
sessionStore = new RedisStore(client: sessionRedisClient)
cookieParser = CookieParser(Settings.security.sessionSecret)
// Bind to sessions
const sessionStore = new RedisStore({client: sessionRedisClient});
const cookieParser = CookieParser(Settings.security.sessionSecret);
sessionSockets = new SessionSockets(io, sessionStore, cookieParser, Settings.cookieName)
const sessionSockets = new SessionSockets(io, sessionStore, cookieParser, Settings.cookieName);
Metrics.injectMetricsRoute(app)
app.use(Metrics.http.monitor(logger))
Metrics.injectMetricsRoute(app);
app.use(Metrics.http.monitor(logger));
io.configure ->
io.enable('browser client minification')
io.enable('browser client etag')
io.configure(function() {
io.enable('browser client minification');
io.enable('browser client etag');
# Fix for Safari 5 error of "Error during WebSocket handshake: location mismatch"
# See http://answers.dotcloud.com/question/578/problem-with-websocket-over-ssl-in-safari-with
io.set('match origin protocol', true)
// Fix for Safari 5 error of "Error during WebSocket handshake: location mismatch"
// See http://answers.dotcloud.com/question/578/problem-with-websocket-over-ssl-in-safari-with
io.set('match origin protocol', true);
# gzip uses a Node 0.8.x method of calling the gzip program which
# doesn't work with 0.6.x
#io.enable('browser client gzip')
io.set('transports', ['websocket', 'flashsocket', 'htmlfile', 'xhr-polling', 'jsonp-polling'])
io.set('log level', 1)
// gzip uses a Node 0.8.x method of calling the gzip program which
// doesn't work with 0.6.x
//io.enable('browser client gzip')
io.set('transports', ['websocket', 'flashsocket', 'htmlfile', 'xhr-polling', 'jsonp-polling']);
return io.set('log level', 1);
});
app.get "/", (req, res, next) ->
res.send "real-time-sharelatex is alive"
app.get("/", (req, res, next) => res.send("real-time-sharelatex is alive"));
app.get "/status", (req, res, next) ->
if Settings.shutDownInProgress
res.send 503 # Service unavailable
else
res.send "real-time-sharelatex is alive"
app.get("/status", function(req, res, next) {
if (Settings.shutDownInProgress) {
return res.send(503); // Service unavailable
} else {
return res.send("real-time-sharelatex is alive");
}
});
app.get "/debug/events", (req, res, next) ->
Settings.debugEvents = parseInt(req.query?.count,10) || 20
logger.log {count: Settings.debugEvents}, "starting debug mode"
res.send "debug mode will log next #{Settings.debugEvents} events"
app.get("/debug/events", function(req, res, next) {
Settings.debugEvents = parseInt(req.query != null ? req.query.count : undefined,10) || 20;
logger.log({count: Settings.debugEvents}, "starting debug mode");
return res.send(`debug mode will log next ${Settings.debugEvents} events`);
});
rclient = require("redis-sharelatex").createClient(Settings.redis.realtime)
const rclient = require("redis-sharelatex").createClient(Settings.redis.realtime);
healthCheck = (req, res, next)->
rclient.healthCheck (error) ->
if error?
logger.err {err: error}, "failed redis health check"
res.sendStatus 500
else if HealthCheckManager.isFailing()
status = HealthCheckManager.status()
logger.err {pubSubErrors: status}, "failed pubsub health check"
res.sendStatus 500
else
res.sendStatus 200
const healthCheck = (req, res, next) => rclient.healthCheck(function(error) {
if (error != null) {
logger.err({err: error}, "failed redis health check");
return res.sendStatus(500);
} else if (HealthCheckManager.isFailing()) {
const status = HealthCheckManager.status();
logger.err({pubSubErrors: status}, "failed pubsub health check");
return res.sendStatus(500);
} else {
return res.sendStatus(200);
}
});
app.get "/health_check", healthCheck
app.get("/health_check", healthCheck);
app.get "/health_check/redis", healthCheck
app.get("/health_check/redis", healthCheck);
Router = require "./app/js/Router"
Router.configure(app, io, sessionSockets)
const Router = require("./app/js/Router");
Router.configure(app, io, sessionSockets);
WebsocketLoadBalancer = require "./app/js/WebsocketLoadBalancer"
WebsocketLoadBalancer.listenForEditorEvents(io)
const WebsocketLoadBalancer = require("./app/js/WebsocketLoadBalancer");
WebsocketLoadBalancer.listenForEditorEvents(io);
DocumentUpdaterController = require "./app/js/DocumentUpdaterController"
DocumentUpdaterController.listenForUpdatesFromDocumentUpdater(io)
const DocumentUpdaterController = require("./app/js/DocumentUpdaterController");
DocumentUpdaterController.listenForUpdatesFromDocumentUpdater(io);
port = Settings.internal.realTime.port
host = Settings.internal.realTime.host
const {
port
} = Settings.internal.realTime;
const {
host
} = Settings.internal.realTime;
server.listen port, host, (error) ->
throw error if error?
logger.info "realtime starting up, listening on #{host}:#{port}"
server.listen(port, host, function(error) {
if (error != null) { throw error; }
return logger.info(`realtime starting up, listening on ${host}:${port}`);
});
# Stop huge stack traces in logs from all the socket.io parsing steps.
Error.stackTraceLimit = 10
// Stop huge stack traces in logs from all the socket.io parsing steps.
Error.stackTraceLimit = 10;
shutdownCleanly = (signal) ->
connectedClients = io.sockets.clients()?.length
if connectedClients == 0
logger.warn("no clients connected, exiting")
process.exit()
else
logger.warn {connectedClients}, "clients still connected, not shutting down yet"
setTimeout () ->
shutdownCleanly(signal)
, 30 * 1000
var shutdownCleanly = function(signal) {
const connectedClients = __guard__(io.sockets.clients(), x => x.length);
if (connectedClients === 0) {
logger.warn("no clients connected, exiting");
return process.exit();
} else {
logger.warn({connectedClients}, "clients still connected, not shutting down yet");
return setTimeout(() => shutdownCleanly(signal)
, 30 * 1000);
}
};
drainAndShutdown = (signal) ->
if Settings.shutDownInProgress
logger.warn signal: signal, "shutdown already in progress, ignoring signal"
return
else
Settings.shutDownInProgress = true
statusCheckInterval = Settings.statusCheckInterval
if statusCheckInterval
logger.warn signal: signal, "received interrupt, delay drain by #{statusCheckInterval}ms"
setTimeout () ->
logger.warn signal: signal, "received interrupt, starting drain over #{shutdownDrainTimeWindow} mins"
DrainManager.startDrainTimeWindow(io, shutdownDrainTimeWindow)
shutdownCleanly(signal)
, statusCheckInterval
const drainAndShutdown = function(signal) {
if (Settings.shutDownInProgress) {
logger.warn({signal}, "shutdown already in progress, ignoring signal");
return;
} else {
Settings.shutDownInProgress = true;
const {
statusCheckInterval
} = Settings;
if (statusCheckInterval) {
logger.warn({signal}, `received interrupt, delay drain by ${statusCheckInterval}ms`);
}
return setTimeout(function() {
logger.warn({signal}, `received interrupt, starting drain over ${shutdownDrainTimeWindow} mins`);
DrainManager.startDrainTimeWindow(io, shutdownDrainTimeWindow);
return shutdownCleanly(signal);
}
, statusCheckInterval);
}
};
Settings.shutDownInProgress = false
if Settings.shutdownDrainTimeWindow?
shutdownDrainTimeWindow = parseInt(Settings.shutdownDrainTimeWindow, 10)
logger.log shutdownDrainTimeWindow: shutdownDrainTimeWindow,"shutdownDrainTimeWindow enabled"
for signal in ['SIGINT', 'SIGHUP', 'SIGQUIT', 'SIGUSR1', 'SIGUSR2', 'SIGTERM', 'SIGABRT']
process.on signal, drainAndShutdown # signal is passed as argument to event handler
Settings.shutDownInProgress = false;
if (Settings.shutdownDrainTimeWindow != null) {
var shutdownDrainTimeWindow = parseInt(Settings.shutdownDrainTimeWindow, 10);
logger.log({shutdownDrainTimeWindow},"shutdownDrainTimeWindow enabled");
for (let signal of ['SIGINT', 'SIGHUP', 'SIGQUIT', 'SIGUSR1', 'SIGUSR2', 'SIGTERM', 'SIGABRT']) {
process.on(signal, drainAndShutdown);
} // signal is passed as argument to event handler
# global exception handler
if Settings.errors?.catchUncaughtErrors
process.removeAllListeners('uncaughtException')
process.on 'uncaughtException', (error) ->
if ['EPIPE', 'ECONNRESET'].includes(error.code)
Metrics.inc('disconnected_write', 1, {status: error.code})
return logger.warn err: error, 'attempted to write to disconnected client'
logger.error err: error, 'uncaught exception'
if Settings.errors?.shutdownOnUncaughtError
drainAndShutdown('SIGABRT')
// global exception handler
if (Settings.errors != null ? Settings.errors.catchUncaughtErrors : undefined) {
process.removeAllListeners('uncaughtException');
process.on('uncaughtException', function(error) {
if (['EPIPE', 'ECONNRESET'].includes(error.code)) {
Metrics.inc('disconnected_write', 1, {status: error.code});
return logger.warn({err: error}, 'attempted to write to disconnected client');
}
logger.error({err: error}, 'uncaught exception');
if (Settings.errors != null ? Settings.errors.shutdownOnUncaughtError : undefined) {
return drainAndShutdown('SIGABRT');
}
});
}
}
if Settings.continualPubsubTraffic
console.log "continualPubsubTraffic enabled"
if (Settings.continualPubsubTraffic) {
console.log("continualPubsubTraffic enabled");
pubsubClient = redis.createClient(Settings.redis.pubsub)
clusterClient = redis.createClient(Settings.redis.websessions)
const pubsubClient = redis.createClient(Settings.redis.pubsub);
const clusterClient = redis.createClient(Settings.redis.websessions);
publishJob = (channel, callback)->
checker = new HealthCheckManager(channel)
logger.debug {channel:channel}, "sending pub to keep connection alive"
json = JSON.stringify({health_check:true, key: checker.id, date: new Date().toString()})
Metrics.summary "redis.publish.#{channel}", json.length
pubsubClient.publish channel, json, (err)->
if err?
logger.err {err, channel}, "error publishing pubsub traffic to redis"
blob = JSON.stringify({keep: "alive"})
Metrics.summary "redis.publish.cluster-continual-traffic", blob.length
clusterClient.publish "cluster-continual-traffic", blob, callback
const publishJob = function(channel, callback){
const checker = new HealthCheckManager(channel);
logger.debug({channel}, "sending pub to keep connection alive");
const json = JSON.stringify({health_check:true, key: checker.id, date: new Date().toString()});
Metrics.summary(`redis.publish.${channel}`, json.length);
return pubsubClient.publish(channel, json, function(err){
if (err != null) {
logger.err({err, channel}, "error publishing pubsub traffic to redis");
}
const blob = JSON.stringify({keep: "alive"});
Metrics.summary("redis.publish.cluster-continual-traffic", blob.length);
return clusterClient.publish("cluster-continual-traffic", blob, callback);
});
};
runPubSubTraffic = ->
async.map ["applied-ops", "editor-events"], publishJob, (err)->
setTimeout(runPubSubTraffic, 1000 * 20)
var runPubSubTraffic = () => async.map(["applied-ops", "editor-events"], publishJob, err => setTimeout(runPubSubTraffic, 1000 * 20));
runPubSubTraffic()
runPubSubTraffic();
}
function __guard__(value, transform) {
return (typeof value !== 'undefined' && value !== null) ? transform(value) : undefined;
}

View file

@ -1,79 +1,95 @@
settings =
redis:
const settings = {
redis: {
pubsub:
host: process.env['PUBSUB_REDIS_HOST'] or process.env['REDIS_HOST'] or "localhost"
port: process.env['PUBSUB_REDIS_PORT'] or process.env['REDIS_PORT'] or "6379"
password: process.env["PUBSUB_REDIS_PASSWORD"] or process.env["REDIS_PASSWORD"] or ""
maxRetriesPerRequest: parseInt(process.env["PUBSUB_REDIS_MAX_RETRIES_PER_REQUEST"] or process.env["REDIS_MAX_RETRIES_PER_REQUEST"] or "20")
pubsub: {
host: process.env['PUBSUB_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
port: process.env['PUBSUB_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
password: process.env["PUBSUB_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
maxRetriesPerRequest: parseInt(process.env["PUBSUB_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
},
realtime:
host: process.env['REAL_TIME_REDIS_HOST'] or process.env['REDIS_HOST'] or "localhost"
port: process.env['REAL_TIME_REDIS_PORT'] or process.env['REDIS_PORT'] or "6379"
password: process.env["REAL_TIME_REDIS_PASSWORD"] or process.env["REDIS_PASSWORD"] or ""
key_schema:
clientsInProject: ({project_id}) -> "clients_in_project:{#{project_id}}"
connectedUser: ({project_id, client_id})-> "connected_user:{#{project_id}}:#{client_id}"
maxRetriesPerRequest: parseInt(process.env["REAL_TIME_REDIS_MAX_RETRIES_PER_REQUEST"] or process.env["REDIS_MAX_RETRIES_PER_REQUEST"] or "20")
realtime: {
host: process.env['REAL_TIME_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
port: process.env['REAL_TIME_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
password: process.env["REAL_TIME_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
key_schema: {
clientsInProject({project_id}) { return `clients_in_project:{${project_id}}`; },
connectedUser({project_id, client_id}){ return `connected_user:{${project_id}}:${client_id}`; }
},
maxRetriesPerRequest: parseInt(process.env["REAL_TIME_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
},
documentupdater:
host: process.env['DOC_UPDATER_REDIS_HOST'] or process.env['REDIS_HOST'] or "localhost"
port: process.env['DOC_UPDATER_REDIS_PORT'] or process.env['REDIS_PORT'] or "6379"
password: process.env["DOC_UPDATER_REDIS_PASSWORD"] or process.env["REDIS_PASSWORD"] or ""
key_schema:
pendingUpdates: ({doc_id}) -> "PendingUpdates:{#{doc_id}}"
maxRetriesPerRequest: parseInt(process.env["DOC_UPDATER_REDIS_MAX_RETRIES_PER_REQUEST"] or process.env["REDIS_MAX_RETRIES_PER_REQUEST"] or "20")
documentupdater: {
host: process.env['DOC_UPDATER_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
port: process.env['DOC_UPDATER_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
password: process.env["DOC_UPDATER_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
key_schema: {
pendingUpdates({doc_id}) { return `PendingUpdates:{${doc_id}}`; }
},
maxRetriesPerRequest: parseInt(process.env["DOC_UPDATER_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
},
websessions:
host: process.env['WEB_REDIS_HOST'] or process.env['REDIS_HOST'] or "localhost"
port: process.env['WEB_REDIS_PORT'] or process.env['REDIS_PORT'] or "6379"
password: process.env["WEB_REDIS_PASSWORD"] or process.env["REDIS_PASSWORD"] or ""
maxRetriesPerRequest: parseInt(process.env["WEB_REDIS_MAX_RETRIES_PER_REQUEST"] or process.env["REDIS_MAX_RETRIES_PER_REQUEST"] or "20")
websessions: {
host: process.env['WEB_REDIS_HOST'] || process.env['REDIS_HOST'] || "localhost",
port: process.env['WEB_REDIS_PORT'] || process.env['REDIS_PORT'] || "6379",
password: process.env["WEB_REDIS_PASSWORD"] || process.env["REDIS_PASSWORD"] || "",
maxRetriesPerRequest: parseInt(process.env["WEB_REDIS_MAX_RETRIES_PER_REQUEST"] || process.env["REDIS_MAX_RETRIES_PER_REQUEST"] || "20")
}
},
internal:
realTime:
port: 3026
host: process.env['LISTEN_ADDRESS'] or "localhost"
user: "sharelatex"
internal: {
realTime: {
port: 3026,
host: process.env['LISTEN_ADDRESS'] || "localhost",
user: "sharelatex",
pass: "password"
}
},
apis:
web:
url: "http://#{process.env['WEB_API_HOST'] or process.env['WEB_HOST'] or "localhost"}:#{process.env['WEB_API_PORT'] or process.env['WEB_PORT'] or 3000}"
user: process.env['WEB_API_USER'] or "sharelatex"
pass: process.env['WEB_API_PASSWORD'] or "password"
documentupdater:
url: "http://#{process.env['DOCUMENT_UPDATER_HOST'] or process.env['DOCUPDATER_HOST'] or "localhost"}:3003"
apis: {
web: {
url: `http://${process.env['WEB_API_HOST'] || process.env['WEB_HOST'] || "localhost"}:${process.env['WEB_API_PORT'] || process.env['WEB_PORT'] || 3000}`,
user: process.env['WEB_API_USER'] || "sharelatex",
pass: process.env['WEB_API_PASSWORD'] || "password"
},
documentupdater: {
url: `http://${process.env['DOCUMENT_UPDATER_HOST'] || process.env['DOCUPDATER_HOST'] || "localhost"}:3003`
}
},
security:
sessionSecret: process.env['SESSION_SECRET'] or "secret-please-change"
security: {
sessionSecret: process.env['SESSION_SECRET'] || "secret-please-change"
},
cookieName: process.env['COOKIE_NAME'] or "sharelatex.sid"
cookieName: process.env['COOKIE_NAME'] || "sharelatex.sid",
max_doc_length: 2 * 1024 * 1024 # 2mb
max_doc_length: 2 * 1024 * 1024, // 2mb
# combine
# max_doc_length (2mb see above) * 2 (delete + insert)
# max_ranges_size (3mb see MAX_RANGES_SIZE in document-updater)
# overhead for JSON serialization
maxUpdateSize: parseInt(process.env['MAX_UPDATE_SIZE']) or 7 * 1024 * 1024 + 64 * 1024
// combine
// max_doc_length (2mb see above) * 2 (delete + insert)
// max_ranges_size (3mb see MAX_RANGES_SIZE in document-updater)
// overhead for JSON serialization
maxUpdateSize: parseInt(process.env['MAX_UPDATE_SIZE']) || ((7 * 1024 * 1024) + (64 * 1024)),
shutdownDrainTimeWindow: process.env['SHUTDOWN_DRAIN_TIME_WINDOW'] or 9
shutdownDrainTimeWindow: process.env['SHUTDOWN_DRAIN_TIME_WINDOW'] || 9,
continualPubsubTraffic: process.env['CONTINUAL_PUBSUB_TRAFFIC'] or false
continualPubsubTraffic: process.env['CONTINUAL_PUBSUB_TRAFFIC'] || false,
checkEventOrder: process.env['CHECK_EVENT_ORDER'] or false
checkEventOrder: process.env['CHECK_EVENT_ORDER'] || false,
publishOnIndividualChannels: process.env['PUBLISH_ON_INDIVIDUAL_CHANNELS'] or false
publishOnIndividualChannels: process.env['PUBLISH_ON_INDIVIDUAL_CHANNELS'] || false,
statusCheckInterval: parseInt(process.env['STATUS_CHECK_INTERVAL'] or '0')
statusCheckInterval: parseInt(process.env['STATUS_CHECK_INTERVAL'] || '0'),
sentry:
sentry: {
dsn: process.env.SENTRY_DSN
},
errors:
catchUncaughtErrors: true
errors: {
catchUncaughtErrors: true,
shutdownOnUncaughtError: true
}
};
# console.log settings.redis
module.exports = settings
// console.log settings.redis
module.exports = settings;