mirror of
https://github.com/overleaf/overleaf.git
synced 2024-11-21 20:47:08 -05:00
decaffeinate: Convert settings.defaults.coffee to JS
This commit is contained in:
parent
8786542ad6
commit
3acf183240
1 changed files with 92 additions and 69 deletions
|
@ -1,95 +1,118 @@
|
|||
Path = require "path"
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const Path = require("path");
|
||||
|
||||
# environment variables renamed for consistency
|
||||
# use AWS_ACCESS_KEY_ID-style going forward
|
||||
if process.env['AWS_KEY'] && !process.env['AWS_ACCESS_KEY_ID']
|
||||
process.env['AWS_ACCESS_KEY_ID'] = process.env['AWS_KEY']
|
||||
if process.env['AWS_SECRET'] && !process.env['AWS_SECRET_ACCESS_KEY']
|
||||
process.env['AWS_SECRET_ACCESS_KEY'] = process.env['AWS_SECRET']
|
||||
// environment variables renamed for consistency
|
||||
// use AWS_ACCESS_KEY_ID-style going forward
|
||||
if (process.env['AWS_KEY'] && !process.env['AWS_ACCESS_KEY_ID']) {
|
||||
process.env['AWS_ACCESS_KEY_ID'] = process.env['AWS_KEY'];
|
||||
}
|
||||
if (process.env['AWS_SECRET'] && !process.env['AWS_SECRET_ACCESS_KEY']) {
|
||||
process.env['AWS_SECRET_ACCESS_KEY'] = process.env['AWS_SECRET'];
|
||||
}
|
||||
|
||||
# pre-backend setting, fall back to old behaviour
|
||||
unless process.env['BACKEND']?
|
||||
if process.env['AWS_ACCESS_KEY_ID']? or process.env['S3_BUCKET_CREDENTIALS']?
|
||||
process.env['BACKEND'] = "s3"
|
||||
process.env['USER_FILES_BUCKET_NAME'] = process.env['AWS_S3_USER_FILES_BUCKET_NAME']
|
||||
process.env['TEMPLATE_FILES_BUCKET_NAME'] = process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME']
|
||||
process.env['PUBLIC_FILES_BUCKET_NAME'] = process.env['AWS_S3_PUBLIC_FILES_BUCKET_NAME']
|
||||
else
|
||||
process.env['BACKEND'] = "fs"
|
||||
process.env['USER_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../user_files")
|
||||
process.env['TEMPLATE_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../template_files")
|
||||
process.env['PUBLIC_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../public_files")
|
||||
// pre-backend setting, fall back to old behaviour
|
||||
if (process.env['BACKEND'] == null) {
|
||||
if ((process.env['AWS_ACCESS_KEY_ID'] != null) || (process.env['S3_BUCKET_CREDENTIALS'] != null)) {
|
||||
process.env['BACKEND'] = "s3";
|
||||
process.env['USER_FILES_BUCKET_NAME'] = process.env['AWS_S3_USER_FILES_BUCKET_NAME'];
|
||||
process.env['TEMPLATE_FILES_BUCKET_NAME'] = process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME'];
|
||||
process.env['PUBLIC_FILES_BUCKET_NAME'] = process.env['AWS_S3_PUBLIC_FILES_BUCKET_NAME'];
|
||||
} else {
|
||||
process.env['BACKEND'] = "fs";
|
||||
process.env['USER_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../user_files");
|
||||
process.env['TEMPLATE_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../template_files");
|
||||
process.env['PUBLIC_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../public_files");
|
||||
}
|
||||
}
|
||||
|
||||
settings =
|
||||
internal:
|
||||
filestore:
|
||||
port: 3009
|
||||
host: process.env['LISTEN_ADDRESS'] or "localhost"
|
||||
const settings = {
|
||||
internal: {
|
||||
filestore: {
|
||||
port: 3009,
|
||||
host: process.env['LISTEN_ADDRESS'] || "localhost"
|
||||
}
|
||||
},
|
||||
|
||||
filestore:
|
||||
# Which backend persistor to use.
|
||||
# Choices are
|
||||
# s3 - Amazon S3
|
||||
# fs - local filesystem
|
||||
# gcs - Google Cloud Storage
|
||||
backend: process.env['BACKEND']
|
||||
filestore: {
|
||||
// Which backend persistor to use.
|
||||
// Choices are
|
||||
// s3 - Amazon S3
|
||||
// fs - local filesystem
|
||||
// gcs - Google Cloud Storage
|
||||
backend: process.env['BACKEND'],
|
||||
|
||||
gcs:
|
||||
gcs: {
|
||||
endpoint:
|
||||
if process.env['GCS_API_ENDPOINT']
|
||||
apiEndpoint: process.env['GCS_API_ENDPOINT']
|
||||
apiScheme: process.env['GCS_API_SCHEME']
|
||||
process.env['GCS_API_ENDPOINT'] ?{
|
||||
apiEndpoint: process.env['GCS_API_ENDPOINT'],
|
||||
apiScheme: process.env['GCS_API_SCHEME'],
|
||||
projectId: process.env['GCS_PROJECT_ID']
|
||||
unlockBeforeDelete: process.env['GCS_UNLOCK_BEFORE_DELETE'] == "true" # unlock an event-based hold before deleting. default false
|
||||
deletedBucketSuffix: process.env['GCS_DELETED_BUCKET_SUFFIX'] # if present, copy file to another bucket on delete. default null
|
||||
deleteConcurrency: parseInt(process.env['GCS_DELETE_CONCURRENCY']) || 50
|
||||
} : undefined,
|
||||
unlockBeforeDelete: process.env['GCS_UNLOCK_BEFORE_DELETE'] === "true", // unlock an event-based hold before deleting. default false
|
||||
deletedBucketSuffix: process.env['GCS_DELETED_BUCKET_SUFFIX'], // if present, copy file to another bucket on delete. default null
|
||||
deleteConcurrency: parseInt(process.env['GCS_DELETE_CONCURRENCY']) || 50,
|
||||
signedUrlExpiryInMs: parseInt(process.env['LINK_EXPIRY_TIMEOUT'] || 60000)
|
||||
},
|
||||
|
||||
s3:
|
||||
if process.env['AWS_ACCESS_KEY_ID']? or process.env['S3_BUCKET_CREDENTIALS']?
|
||||
key: process.env['AWS_ACCESS_KEY_ID']
|
||||
secret: process.env['AWS_SECRET_ACCESS_KEY']
|
||||
endpoint: process.env['AWS_S3_ENDPOINT']
|
||||
pathStyle: process.env['AWS_S3_PATH_STYLE']
|
||||
partSize: process.env['AWS_S3_PARTSIZE'] or (100 * 1024 * 1024)
|
||||
bucketCreds: JSON.parse process.env['S3_BUCKET_CREDENTIALS'] if process.env['S3_BUCKET_CREDENTIALS']?
|
||||
(process.env['AWS_ACCESS_KEY_ID'] != null) || (process.env['S3_BUCKET_CREDENTIALS'] != null) ?{
|
||||
key: process.env['AWS_ACCESS_KEY_ID'],
|
||||
secret: process.env['AWS_SECRET_ACCESS_KEY'],
|
||||
endpoint: process.env['AWS_S3_ENDPOINT'],
|
||||
pathStyle: process.env['AWS_S3_PATH_STYLE'],
|
||||
partSize: process.env['AWS_S3_PARTSIZE'] || (100 * 1024 * 1024),
|
||||
bucketCreds: ((process.env['S3_BUCKET_CREDENTIALS'] != null) ? JSON.parse(process.env['S3_BUCKET_CREDENTIALS']) : undefined)
|
||||
} : undefined,
|
||||
|
||||
# GCS should be configured by the service account on the kubernetes pod. See GOOGLE_APPLICATION_CREDENTIALS,
|
||||
# which will be picked up automatically.
|
||||
// GCS should be configured by the service account on the kubernetes pod. See GOOGLE_APPLICATION_CREDENTIALS,
|
||||
// which will be picked up automatically.
|
||||
|
||||
stores:
|
||||
user_files: process.env['USER_FILES_BUCKET_NAME']
|
||||
template_files: process.env['TEMPLATE_FILES_BUCKET_NAME']
|
||||
stores: {
|
||||
user_files: process.env['USER_FILES_BUCKET_NAME'],
|
||||
template_files: process.env['TEMPLATE_FILES_BUCKET_NAME'],
|
||||
public_files: process.env['PUBLIC_FILES_BUCKET_NAME']
|
||||
},
|
||||
|
||||
fallback:
|
||||
if process.env['FALLBACK_BACKEND']?
|
||||
backend: process.env['FALLBACK_BACKEND']
|
||||
# mapping of bucket names on the fallback, to bucket names on the primary.
|
||||
# e.g. { myS3UserFilesBucketName: 'myGoogleUserFilesBucketName' }
|
||||
buckets: JSON.parse(process.env['FALLBACK_BUCKET_MAPPING'] || '{}')
|
||||
copyOnMiss: process.env['COPY_ON_MISS'] == 'true'
|
||||
(process.env['FALLBACK_BACKEND'] != null) ?{
|
||||
backend: process.env['FALLBACK_BACKEND'],
|
||||
// mapping of bucket names on the fallback, to bucket names on the primary.
|
||||
// e.g. { myS3UserFilesBucketName: 'myGoogleUserFilesBucketName' }
|
||||
buckets: JSON.parse(process.env['FALLBACK_BUCKET_MAPPING'] || '{}'),
|
||||
copyOnMiss: process.env['COPY_ON_MISS'] === 'true'
|
||||
} : undefined,
|
||||
|
||||
allowRedirects: if process.env['ALLOW_REDIRECTS'] == 'true' then true else false
|
||||
allowRedirects: process.env['ALLOW_REDIRECTS'] === 'true' ? true : false
|
||||
},
|
||||
|
||||
path:
|
||||
path: {
|
||||
uploadFolder: Path.resolve(__dirname + "/../uploads")
|
||||
},
|
||||
|
||||
commands:
|
||||
# Any commands to wrap the convert utility in, for example ["nice"], or ["firejail", "--profile=/etc/firejail/convert.profile"]
|
||||
commands: {
|
||||
// Any commands to wrap the convert utility in, for example ["nice"], or ["firejail", "--profile=/etc/firejail/convert.profile"]
|
||||
convertCommandPrefix: []
|
||||
},
|
||||
|
||||
enableConversions: if process.env['ENABLE_CONVERSIONS'] == 'true' then true else false
|
||||
enableConversions: process.env['ENABLE_CONVERSIONS'] === 'true' ? true : false,
|
||||
|
||||
sentry:
|
||||
sentry: {
|
||||
dsn: process.env.SENTRY_DSN
|
||||
}
|
||||
};
|
||||
|
||||
# Filestore health check
|
||||
# ----------------------
|
||||
# Project and file details to check in persistor when calling /health_check
|
||||
if process.env['HEALTH_CHECK_PROJECT_ID']? and process.env['HEALTH_CHECK_FILE_ID']?
|
||||
settings.health_check =
|
||||
project_id: process.env['HEALTH_CHECK_PROJECT_ID']
|
||||
// Filestore health check
|
||||
// ----------------------
|
||||
// Project and file details to check in persistor when calling /health_check
|
||||
if ((process.env['HEALTH_CHECK_PROJECT_ID'] != null) && (process.env['HEALTH_CHECK_FILE_ID'] != null)) {
|
||||
settings.health_check = {
|
||||
project_id: process.env['HEALTH_CHECK_PROJECT_ID'],
|
||||
file_id: process.env['HEALTH_CHECK_FILE_ID']
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = settings
|
||||
module.exports = settings;
|
||||
|
|
Loading…
Reference in a new issue