decaffeinate: Run post-processing cleanups on settings.defaults.coffee

This commit is contained in:
decaffeinate 2021-05-19 17:24:23 +01:00 committed by Alf Eaton
parent 3acf183240
commit 0b62828041

View file

@ -1,3 +1,8 @@
/* eslint-disable
no-path-concat,
*/
// TODO: This file was created by bulk-decaffeinate.
// Fix any style issues and re-enable lint.
/*
* decaffeinate suggestions:
* DS207: Consider shorter variations of null checks
@ -7,25 +12,25 @@ const Path = require("path");
// environment variables renamed for consistency
// use AWS_ACCESS_KEY_ID-style going forward
if (process.env['AWS_KEY'] && !process.env['AWS_ACCESS_KEY_ID']) {
process.env['AWS_ACCESS_KEY_ID'] = process.env['AWS_KEY'];
if (process.env.AWS_KEY && !process.env.AWS_ACCESS_KEY_ID) {
process.env.AWS_ACCESS_KEY_ID = process.env.AWS_KEY;
}
if (process.env['AWS_SECRET'] && !process.env['AWS_SECRET_ACCESS_KEY']) {
process.env['AWS_SECRET_ACCESS_KEY'] = process.env['AWS_SECRET'];
if (process.env.AWS_SECRET && !process.env.AWS_SECRET_ACCESS_KEY) {
process.env.AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET;
}
// pre-backend setting, fall back to old behaviour
if (process.env['BACKEND'] == null) {
if ((process.env['AWS_ACCESS_KEY_ID'] != null) || (process.env['S3_BUCKET_CREDENTIALS'] != null)) {
process.env['BACKEND'] = "s3";
process.env['USER_FILES_BUCKET_NAME'] = process.env['AWS_S3_USER_FILES_BUCKET_NAME'];
process.env['TEMPLATE_FILES_BUCKET_NAME'] = process.env['AWS_S3_TEMPLATE_FILES_BUCKET_NAME'];
process.env['PUBLIC_FILES_BUCKET_NAME'] = process.env['AWS_S3_PUBLIC_FILES_BUCKET_NAME'];
if (process.env.BACKEND == null) {
if ((process.env.AWS_ACCESS_KEY_ID != null) || (process.env.S3_BUCKET_CREDENTIALS != null)) {
process.env.BACKEND = "s3";
process.env.USER_FILES_BUCKET_NAME = process.env.AWS_S3_USER_FILES_BUCKET_NAME;
process.env.TEMPLATE_FILES_BUCKET_NAME = process.env.AWS_S3_TEMPLATE_FILES_BUCKET_NAME;
process.env.PUBLIC_FILES_BUCKET_NAME = process.env.AWS_S3_PUBLIC_FILES_BUCKET_NAME;
} else {
process.env['BACKEND'] = "fs";
process.env['USER_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../user_files");
process.env['TEMPLATE_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../template_files");
process.env['PUBLIC_FILES_BUCKET_NAME'] = Path.resolve(__dirname + "/../public_files");
process.env.BACKEND = "fs";
process.env.USER_FILES_BUCKET_NAME = Path.resolve(__dirname + "/../user_files");
process.env.TEMPLATE_FILES_BUCKET_NAME = Path.resolve(__dirname + "/../template_files");
process.env.PUBLIC_FILES_BUCKET_NAME = Path.resolve(__dirname + "/../public_files");
}
}
@ -33,7 +38,7 @@ const settings = {
internal: {
filestore: {
port: 3009,
host: process.env['LISTEN_ADDRESS'] || "localhost"
host: process.env.LISTEN_ADDRESS || "localhost"
}
},
@ -43,50 +48,50 @@ const settings = {
// s3 - Amazon S3
// fs - local filesystem
// gcs - Google Cloud Storage
backend: process.env['BACKEND'],
backend: process.env.BACKEND,
gcs: {
endpoint:
process.env['GCS_API_ENDPOINT'] ?{
apiEndpoint: process.env['GCS_API_ENDPOINT'],
apiScheme: process.env['GCS_API_SCHEME'],
projectId: process.env['GCS_PROJECT_ID']
process.env.GCS_API_ENDPOINT ?{
apiEndpoint: process.env.GCS_API_ENDPOINT,
apiScheme: process.env.GCS_API_SCHEME,
projectId: process.env.GCS_PROJECT_ID
} : undefined,
unlockBeforeDelete: process.env['GCS_UNLOCK_BEFORE_DELETE'] === "true", // unlock an event-based hold before deleting. default false
deletedBucketSuffix: process.env['GCS_DELETED_BUCKET_SUFFIX'], // if present, copy file to another bucket on delete. default null
deleteConcurrency: parseInt(process.env['GCS_DELETE_CONCURRENCY']) || 50,
signedUrlExpiryInMs: parseInt(process.env['LINK_EXPIRY_TIMEOUT'] || 60000)
unlockBeforeDelete: process.env.GCS_UNLOCK_BEFORE_DELETE === "true", // unlock an event-based hold before deleting. default false
deletedBucketSuffix: process.env.GCS_DELETED_BUCKET_SUFFIX, // if present, copy file to another bucket on delete. default null
deleteConcurrency: parseInt(process.env.GCS_DELETE_CONCURRENCY) || 50,
signedUrlExpiryInMs: parseInt(process.env.LINK_EXPIRY_TIMEOUT || 60000)
},
s3:
(process.env['AWS_ACCESS_KEY_ID'] != null) || (process.env['S3_BUCKET_CREDENTIALS'] != null) ?{
key: process.env['AWS_ACCESS_KEY_ID'],
secret: process.env['AWS_SECRET_ACCESS_KEY'],
endpoint: process.env['AWS_S3_ENDPOINT'],
pathStyle: process.env['AWS_S3_PATH_STYLE'],
partSize: process.env['AWS_S3_PARTSIZE'] || (100 * 1024 * 1024),
bucketCreds: ((process.env['S3_BUCKET_CREDENTIALS'] != null) ? JSON.parse(process.env['S3_BUCKET_CREDENTIALS']) : undefined)
(process.env.AWS_ACCESS_KEY_ID != null) || (process.env.S3_BUCKET_CREDENTIALS != null) ?{
key: process.env.AWS_ACCESS_KEY_ID,
secret: process.env.AWS_SECRET_ACCESS_KEY,
endpoint: process.env.AWS_S3_ENDPOINT,
pathStyle: process.env.AWS_S3_PATH_STYLE,
partSize: process.env.AWS_S3_PARTSIZE || (100 * 1024 * 1024),
bucketCreds: ((process.env.S3_BUCKET_CREDENTIALS != null) ? JSON.parse(process.env.S3_BUCKET_CREDENTIALS) : undefined)
} : undefined,
// GCS should be configured by the service account on the kubernetes pod. See GOOGLE_APPLICATION_CREDENTIALS,
// which will be picked up automatically.
stores: {
user_files: process.env['USER_FILES_BUCKET_NAME'],
template_files: process.env['TEMPLATE_FILES_BUCKET_NAME'],
public_files: process.env['PUBLIC_FILES_BUCKET_NAME']
user_files: process.env.USER_FILES_BUCKET_NAME,
template_files: process.env.TEMPLATE_FILES_BUCKET_NAME,
public_files: process.env.PUBLIC_FILES_BUCKET_NAME
},
fallback:
(process.env['FALLBACK_BACKEND'] != null) ?{
backend: process.env['FALLBACK_BACKEND'],
(process.env.FALLBACK_BACKEND != null) ?{
backend: process.env.FALLBACK_BACKEND,
// mapping of bucket names on the fallback, to bucket names on the primary.
// e.g. { myS3UserFilesBucketName: 'myGoogleUserFilesBucketName' }
buckets: JSON.parse(process.env['FALLBACK_BUCKET_MAPPING'] || '{}'),
copyOnMiss: process.env['COPY_ON_MISS'] === 'true'
buckets: JSON.parse(process.env.FALLBACK_BUCKET_MAPPING || '{}'),
copyOnMiss: process.env.COPY_ON_MISS === 'true'
} : undefined,
allowRedirects: process.env['ALLOW_REDIRECTS'] === 'true' ? true : false
allowRedirects: process.env.ALLOW_REDIRECTS === 'true'
},
path: {
@ -98,7 +103,7 @@ const settings = {
convertCommandPrefix: []
},
enableConversions: process.env['ENABLE_CONVERSIONS'] === 'true' ? true : false,
enableConversions: process.env.ENABLE_CONVERSIONS === 'true',
sentry: {
dsn: process.env.SENTRY_DSN
@ -108,10 +113,10 @@ const settings = {
// Filestore health check
// ----------------------
// Project and file details to check in persistor when calling /health_check
if ((process.env['HEALTH_CHECK_PROJECT_ID'] != null) && (process.env['HEALTH_CHECK_FILE_ID'] != null)) {
if ((process.env.HEALTH_CHECK_PROJECT_ID != null) && (process.env.HEALTH_CHECK_FILE_ID != null)) {
settings.health_check = {
project_id: process.env['HEALTH_CHECK_PROJECT_ID'],
file_id: process.env['HEALTH_CHECK_FILE_ID']
project_id: process.env.HEALTH_CHECK_PROJECT_ID,
file_id: process.env.HEALTH_CHECK_FILE_ID
};
}