overleaf/services/docstore/config/settings.defaults.js

89 lines
2.8 KiB
JavaScript
Raw Normal View History

const http = require('http')
http.globalAgent.maxSockets = 300
const Settings = {
internal: {
docstore: {
port: 3016,
2021-07-13 07:04:48 -04:00
host: process.env.LISTEN_ADDRESS || 'localhost',
},
},
mongo: {
options: {
useUnifiedTopology:
2021-07-13 07:04:48 -04:00
(process.env.MONGO_USE_UNIFIED_TOPOLOGY || 'true') === 'true',
},
},
2015-06-01 17:24:40 -04:00
docstore: {
archiveOnSoftDelete: process.env.ARCHIVE_ON_SOFT_DELETE === 'true',
keepSoftDeletedDocsArchived:
process.env.KEEP_SOFT_DELETED_DOCS_ARCHIVED === 'true',
backend: process.env.BACKEND || 's3',
healthCheck: {
2021-07-13 07:04:48 -04:00
project_id: process.env.HEALTH_CHECK_PROJECT_ID,
},
bucket: process.env.BUCKET_NAME || process.env.AWS_BUCKET || 'bucket',
gcs: {
unlockBeforeDelete: process.env.GCS_UNLOCK_BEFORE_DELETE === 'true',
deletedBucketSuffix: process.env.GCS_DELETED_BUCKET_SUFFIX,
2021-07-13 07:04:48 -04:00
deleteConcurrency: parseInt(process.env.GCS_DELETE_CONCURRENCY) || 50,
},
},
2015-11-06 07:42:44 -05:00
max_deleted_docs: parseInt(process.env.MAX_DELETED_DOCS, 10) || 2000,
max_doc_length: parseInt(process.env.MAX_DOC_LENGTH) || 2 * 1024 * 1024, // 2mb
archiveBatchSize: parseInt(process.env.ARCHIVE_BATCH_SIZE, 10) || 50,
unArchiveBatchSize: parseInt(process.env.UN_ARCHIVE_BATCH_SIZE, 10) || 50,
destroyBatchSize: parseInt(process.env.DESTROY_BATCH_SIZE, 10) || 2000,
destroyRetryCount: parseInt(process.env.DESTROY_RETRY_COUNT || '3', 10),
2021-07-13 07:04:48 -04:00
parallelArchiveJobs: parseInt(process.env.PARALLEL_ARCHIVE_JOBS, 10) || 5,
}
2021-05-19 15:22:02 -04:00
if (process.env.MONGO_CONNECTION_STRING) {
Settings.mongo.url = process.env.MONGO_CONNECTION_STRING
2021-05-19 15:22:02 -04:00
} else if (process.env.MONGO_HOST) {
Settings.mongo.url = `mongodb://${process.env.MONGO_HOST}/sharelatex`
} else {
Settings.mongo.url = 'mongodb://127.0.0.1/sharelatex'
}
if (
2021-05-19 15:22:02 -04:00
process.env.AWS_ACCESS_KEY_ID &&
process.env.AWS_SECRET_ACCESS_KEY &&
process.env.AWS_BUCKET
) {
Settings.docstore.s3 = {
key: process.env.AWS_ACCESS_KEY_ID,
secret: process.env.AWS_SECRET_ACCESS_KEY,
bucket: process.env.AWS_BUCKET,
endpoint: process.env.AWS_S3_ENDPOINT,
pathStyle: process.env.AWS_S3_PATH_STYLE,
2021-07-13 07:04:48 -04:00
partSize: parseInt(process.env.AWS_S3_PARTSIZE) || 100 * 1024 * 1024,
}
}
if (process.env.GCS_API_ENDPOINT) {
Settings.docstore.gcs.endpoint = {
apiEndpoint: process.env.GCS_API_ENDPOINT,
apiScheme: process.env.GCS_API_SCHEME,
2021-07-13 07:04:48 -04:00
projectId: process.env.GCS_PROJECT_ID,
}
}
if (process.env.FALLBACK_BACKEND) {
Settings.docstore.fallback = {
backend: process.env.FALLBACK_BACKEND,
// mapping of bucket names on the fallback, to bucket names on the primary.
// e.g. { myS3UserFilesBucketName: 'myGoogleUserFilesBucketName' }
buckets: JSON.parse(process.env.FALLBACK_BUCKET_MAPPING || '{}'),
2021-07-13 07:04:48 -04:00
copyOnMiss: process.env.COPY_ON_MISS === 'true',
}
}
module.exports = Settings