mirror of
https://github.com/overleaf/overleaf.git
synced 2024-11-14 20:40:17 -05:00
merge multiple repositories into an existing monorepo
- merged using: 'monorepo_add.sh services-document-updater:services/document-updater' - see https://github.com/shopsys/monorepo-tools
This commit is contained in:
commit
583b7d0030
136 changed files with 36213 additions and 0 deletions
7
services/document-updater/.dockerignore
Normal file
7
services/document-updater/.dockerignore
Normal file
|
@ -0,0 +1,7 @@
|
|||
node_modules/*
|
||||
gitrev
|
||||
.git
|
||||
.gitignore
|
||||
.npm
|
||||
.nvmrc
|
||||
nodemon.json
|
86
services/document-updater/.eslintrc
Normal file
86
services/document-updater/.eslintrc
Normal file
|
@ -0,0 +1,86 @@
|
|||
// this file was auto-generated, do not edit it directly.
|
||||
// instead run bin/update_build_scripts from
|
||||
// https://github.com/sharelatex/sharelatex-dev-environment
|
||||
{
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"standard",
|
||||
"prettier"
|
||||
],
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2018
|
||||
},
|
||||
"plugins": [
|
||||
"mocha",
|
||||
"chai-expect",
|
||||
"chai-friendly"
|
||||
],
|
||||
"env": {
|
||||
"node": true,
|
||||
"mocha": true
|
||||
},
|
||||
"rules": {
|
||||
// TODO(das7pad): remove overrides after fixing all the violations manually (https://github.com/overleaf/issues/issues/3882#issuecomment-878999671)
|
||||
// START of temporary overrides
|
||||
"array-callback-return": "off",
|
||||
"no-dupe-else-if": "off",
|
||||
"no-var": "off",
|
||||
"no-empty": "off",
|
||||
"node/handle-callback-err": "off",
|
||||
"no-loss-of-precision": "off",
|
||||
"node/no-callback-literal": "off",
|
||||
"node/no-path-concat": "off",
|
||||
"prefer-regex-literals": "off",
|
||||
// END of temporary overrides
|
||||
|
||||
// Swap the no-unused-expressions rule with a more chai-friendly one
|
||||
"no-unused-expressions": 0,
|
||||
"chai-friendly/no-unused-expressions": "error",
|
||||
|
||||
// Do not allow importing of implicit dependencies.
|
||||
"import/no-extraneous-dependencies": "error"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
// Test specific rules
|
||||
"files": ["test/**/*.js"],
|
||||
"globals": {
|
||||
"expect": true
|
||||
},
|
||||
"rules": {
|
||||
// mocha-specific rules
|
||||
"mocha/handle-done-callback": "error",
|
||||
"mocha/no-exclusive-tests": "error",
|
||||
"mocha/no-global-tests": "error",
|
||||
"mocha/no-identical-title": "error",
|
||||
"mocha/no-nested-tests": "error",
|
||||
"mocha/no-pending-tests": "error",
|
||||
"mocha/no-skipped-tests": "error",
|
||||
"mocha/no-mocha-arrows": "error",
|
||||
|
||||
// chai-specific rules
|
||||
"chai-expect/missing-assertion": "error",
|
||||
"chai-expect/terminating-properties": "error",
|
||||
|
||||
// prefer-arrow-callback applies to all callbacks, not just ones in mocha tests.
|
||||
// we don't enforce this at the top-level - just in tests to manage `this` scope
|
||||
// based on mocha's context mechanism
|
||||
"mocha/prefer-arrow-callback": "error"
|
||||
}
|
||||
},
|
||||
{
|
||||
// Backend specific rules
|
||||
"files": ["app/**/*.js", "app.js", "index.js"],
|
||||
"rules": {
|
||||
// don't allow console.log in backend code
|
||||
"no-console": "error",
|
||||
|
||||
// Do not allow importing of implicit dependencies.
|
||||
"import/no-extraneous-dependencies": ["error", {
|
||||
// Do not allow importing of devDependencies.
|
||||
"devDependencies": false
|
||||
}]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
38
services/document-updater/.github/ISSUE_TEMPLATE.md
vendored
Normal file
38
services/document-updater/.github/ISSUE_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
<!-- BUG REPORT TEMPLATE -->
|
||||
|
||||
## Steps to Reproduce
|
||||
<!-- Describe the steps leading up to when / where you found the bug. -->
|
||||
<!-- Screenshots may be helpful here. -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Expected Behaviour
|
||||
<!-- What should have happened when you completed the steps above? -->
|
||||
|
||||
## Observed Behaviour
|
||||
<!-- What actually happened when you completed the steps above? -->
|
||||
<!-- Screenshots may be helpful here. -->
|
||||
|
||||
## Context
|
||||
<!-- How has this issue affected you? What were you trying to accomplish? -->
|
||||
|
||||
## Technical Info
|
||||
<!-- Provide any technical details that may be applicable (or N/A if not applicable). -->
|
||||
|
||||
* URL:
|
||||
* Browser Name and version:
|
||||
* Operating System and version (desktop or mobile):
|
||||
* Signed in as:
|
||||
* Project and/or file:
|
||||
|
||||
## Analysis
|
||||
<!--- Optionally, document investigation of / suggest a fix for the bug, e.g. 'comes from this line / commit' -->
|
||||
|
||||
## Who Needs to Know?
|
||||
<!-- If you want to bring this to the attention of particular people, @-mention them below. -->
|
||||
<!-- If a user reported this bug and should be notified when it is fixed, provide the Front conversation link. -->
|
||||
|
||||
-
|
||||
-
|
48
services/document-updater/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
48
services/document-updater/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
|
||||
<!-- ** This is an Overleaf public repository ** -->
|
||||
|
||||
<!-- Please review https://github.com/overleaf/overleaf/blob/master/CONTRIBUTING.md for guidance on what is expected of a contribution. -->
|
||||
|
||||
### Description
|
||||
|
||||
|
||||
|
||||
#### Screenshots
|
||||
|
||||
|
||||
|
||||
#### Related Issues / PRs
|
||||
|
||||
|
||||
|
||||
### Review
|
||||
|
||||
|
||||
|
||||
#### Potential Impact
|
||||
|
||||
|
||||
|
||||
#### Manual Testing Performed
|
||||
|
||||
- [ ]
|
||||
- [ ]
|
||||
|
||||
#### Accessibility
|
||||
|
||||
|
||||
|
||||
### Deployment
|
||||
|
||||
|
||||
|
||||
#### Deployment Checklist
|
||||
|
||||
- [ ] Update documentation not included in the PR (if any)
|
||||
- [ ]
|
||||
|
||||
#### Metrics and Monitoring
|
||||
|
||||
|
||||
|
||||
#### Who Needs to Know?
|
23
services/document-updater/.github/dependabot.yml
vendored
Normal file
23
services/document-updater/.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
pull-request-branch-name:
|
||||
# Separate sections of the branch name with a hyphen
|
||||
# Docker images use the branch name and do not support slashes in tags
|
||||
# https://github.com/overleaf/google-ops/issues/822
|
||||
# https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#pull-request-branch-nameseparator
|
||||
separator: "-"
|
||||
|
||||
# Block informal upgrades -- security upgrades use a separate queue.
|
||||
# https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#open-pull-requests-limit
|
||||
open-pull-requests-limit: 0
|
||||
|
||||
# currently assign team-magma to all dependabot PRs - this may change in
|
||||
# future if we reorganise teams
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "type:maintenance"
|
52
services/document-updater/.gitignore
vendored
Normal file
52
services/document-updater/.gitignore
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
compileFolder
|
||||
|
||||
Compiled source #
|
||||
###################
|
||||
*.com
|
||||
*.class
|
||||
*.dll
|
||||
*.exe
|
||||
*.o
|
||||
*.so
|
||||
|
||||
# Packages #
|
||||
############
|
||||
# it's better to unpack these files and commit the raw source
|
||||
# git has its own built in compression methods
|
||||
*.7z
|
||||
*.dmg
|
||||
*.gz
|
||||
*.iso
|
||||
*.jar
|
||||
*.rar
|
||||
*.tar
|
||||
*.zip
|
||||
|
||||
# Logs and databases #
|
||||
######################
|
||||
*.log
|
||||
*.sql
|
||||
*.sqlite
|
||||
|
||||
# OS generated files #
|
||||
######################
|
||||
.DS_Store?
|
||||
ehthumbs.db
|
||||
Icon?
|
||||
Thumbs.db
|
||||
|
||||
/node_modules/*
|
||||
|
||||
|
||||
|
||||
forever/
|
||||
|
||||
**.swp
|
||||
|
||||
# Redis cluster
|
||||
**/appendonly.aof
|
||||
**/dump.rdb
|
||||
**/nodes.conf
|
||||
|
||||
# managed by dev-environment$ bin/update_build_scripts
|
||||
.npmrc
|
3
services/document-updater/.mocharc.json
Normal file
3
services/document-updater/.mocharc.json
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"require": "test/setup.js"
|
||||
}
|
1
services/document-updater/.nvmrc
Normal file
1
services/document-updater/.nvmrc
Normal file
|
@ -0,0 +1 @@
|
|||
12.22.3
|
11
services/document-updater/.prettierrc
Normal file
11
services/document-updater/.prettierrc
Normal file
|
@ -0,0 +1,11 @@
|
|||
# This file was auto-generated, do not edit it directly.
|
||||
# Instead run bin/update_build_scripts from
|
||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||
{
|
||||
"arrowParens": "avoid",
|
||||
"semi": false,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "es5",
|
||||
"tabWidth": 2,
|
||||
"useTabs": false
|
||||
}
|
23
services/document-updater/Dockerfile
Normal file
23
services/document-updater/Dockerfile
Normal file
|
@ -0,0 +1,23 @@
|
|||
# This file was auto-generated, do not edit it directly.
|
||||
# Instead run bin/update_build_scripts from
|
||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||
|
||||
FROM node:12.22.3 as base
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
FROM base as app
|
||||
|
||||
#wildcard as some files may not be in all repos
|
||||
COPY package*.json npm-shrink*.json /app/
|
||||
|
||||
RUN npm ci --quiet
|
||||
|
||||
COPY . /app
|
||||
|
||||
FROM base
|
||||
|
||||
COPY --from=app /app /app
|
||||
USER node
|
||||
|
||||
CMD ["node", "--expose-gc", "app.js"]
|
662
services/document-updater/LICENSE
Normal file
662
services/document-updater/LICENSE
Normal file
|
@ -0,0 +1,662 @@
|
|||
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
90
services/document-updater/Makefile
Normal file
90
services/document-updater/Makefile
Normal file
|
@ -0,0 +1,90 @@
|
|||
# This file was auto-generated, do not edit it directly.
|
||||
# Instead run bin/update_build_scripts from
|
||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||
|
||||
BUILD_NUMBER ?= local
|
||||
BRANCH_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
PROJECT_NAME = document-updater
|
||||
BUILD_DIR_NAME = $(shell pwd | xargs basename | tr -cd '[a-zA-Z0-9_.\-]')
|
||||
|
||||
DOCKER_COMPOSE_FLAGS ?= -f docker-compose.yml
|
||||
DOCKER_COMPOSE := BUILD_NUMBER=$(BUILD_NUMBER) \
|
||||
BRANCH_NAME=$(BRANCH_NAME) \
|
||||
PROJECT_NAME=$(PROJECT_NAME) \
|
||||
MOCHA_GREP=${MOCHA_GREP} \
|
||||
docker-compose ${DOCKER_COMPOSE_FLAGS}
|
||||
|
||||
DOCKER_COMPOSE_TEST_ACCEPTANCE = \
|
||||
COMPOSE_PROJECT_NAME=test_acceptance_$(BUILD_DIR_NAME) $(DOCKER_COMPOSE)
|
||||
|
||||
DOCKER_COMPOSE_TEST_UNIT = \
|
||||
COMPOSE_PROJECT_NAME=test_unit_$(BUILD_DIR_NAME) $(DOCKER_COMPOSE)
|
||||
|
||||
clean:
|
||||
-docker rmi ci/$(PROJECT_NAME):$(BRANCH_NAME)-$(BUILD_NUMBER)
|
||||
-docker rmi gcr.io/overleaf-ops/$(PROJECT_NAME):$(BRANCH_NAME)-$(BUILD_NUMBER)
|
||||
-$(DOCKER_COMPOSE_TEST_UNIT) down --rmi local
|
||||
-$(DOCKER_COMPOSE_TEST_ACCEPTANCE) down --rmi local
|
||||
|
||||
format:
|
||||
$(DOCKER_COMPOSE) run --rm test_unit npm run --silent format
|
||||
|
||||
format_fix:
|
||||
$(DOCKER_COMPOSE) run --rm test_unit npm run --silent format:fix
|
||||
|
||||
lint:
|
||||
$(DOCKER_COMPOSE) run --rm test_unit npm run --silent lint
|
||||
|
||||
test: format lint test_unit test_acceptance
|
||||
|
||||
test_unit:
|
||||
ifneq (,$(wildcard test/unit))
|
||||
$(DOCKER_COMPOSE_TEST_UNIT) run --rm test_unit
|
||||
$(MAKE) test_unit_clean
|
||||
endif
|
||||
|
||||
test_clean: test_unit_clean
|
||||
test_unit_clean:
|
||||
ifneq (,$(wildcard test/unit))
|
||||
$(DOCKER_COMPOSE_TEST_UNIT) down -v -t 0
|
||||
endif
|
||||
|
||||
test_acceptance: test_acceptance_clean test_acceptance_pre_run test_acceptance_run
|
||||
$(MAKE) test_acceptance_clean
|
||||
|
||||
test_acceptance_debug: test_acceptance_clean test_acceptance_pre_run test_acceptance_run_debug
|
||||
$(MAKE) test_acceptance_clean
|
||||
|
||||
test_acceptance_run:
|
||||
ifneq (,$(wildcard test/acceptance))
|
||||
$(DOCKER_COMPOSE_TEST_ACCEPTANCE) run --rm test_acceptance
|
||||
endif
|
||||
|
||||
test_acceptance_run_debug:
|
||||
ifneq (,$(wildcard test/acceptance))
|
||||
$(DOCKER_COMPOSE_TEST_ACCEPTANCE) run -p 127.0.0.9:19999:19999 --rm test_acceptance npm run test:acceptance -- --inspect=0.0.0.0:19999 --inspect-brk
|
||||
endif
|
||||
|
||||
test_clean: test_acceptance_clean
|
||||
test_acceptance_clean:
|
||||
$(DOCKER_COMPOSE_TEST_ACCEPTANCE) down -v -t 0
|
||||
|
||||
test_acceptance_pre_run:
|
||||
ifneq (,$(wildcard test/acceptance/js/scripts/pre-run))
|
||||
$(DOCKER_COMPOSE_TEST_ACCEPTANCE) run --rm test_acceptance test/acceptance/js/scripts/pre-run
|
||||
endif
|
||||
|
||||
build:
|
||||
docker build --pull --tag ci/$(PROJECT_NAME):$(BRANCH_NAME)-$(BUILD_NUMBER) \
|
||||
--tag gcr.io/overleaf-ops/$(PROJECT_NAME):$(BRANCH_NAME)-$(BUILD_NUMBER) \
|
||||
.
|
||||
|
||||
tar:
|
||||
$(DOCKER_COMPOSE) up tar
|
||||
|
||||
publish:
|
||||
|
||||
docker push $(DOCKER_REPO)/$(PROJECT_NAME):$(BRANCH_NAME)-$(BUILD_NUMBER)
|
||||
|
||||
|
||||
.PHONY: clean test test_unit test_acceptance test_clean build publish
|
12
services/document-updater/README.md
Normal file
12
services/document-updater/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
overleaf/document-updater
|
||||
===========================
|
||||
|
||||
An API for applying incoming updates to documents in real-time.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The code in this repository is released under the GNU AFFERO GENERAL PUBLIC LICENSE, version 3. A copy can be found in the `LICENSE` file.
|
||||
|
||||
Copyright (c) Overleaf, 2014-2019.
|
||||
|
258
services/document-updater/app.js
Normal file
258
services/document-updater/app.js
Normal file
|
@ -0,0 +1,258 @@
|
|||
const Metrics = require('@overleaf/metrics')
|
||||
Metrics.initialize('doc-updater')
|
||||
|
||||
const express = require('express')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const logger = require('logger-sharelatex')
|
||||
logger.initialize('document-updater')
|
||||
|
||||
logger.logger.addSerializers(require('./app/js/LoggerSerializers'))
|
||||
|
||||
if (Settings.sentry != null && Settings.sentry.dsn != null) {
|
||||
logger.initializeErrorReporting(Settings.sentry.dsn)
|
||||
}
|
||||
|
||||
const RedisManager = require('./app/js/RedisManager')
|
||||
const DispatchManager = require('./app/js/DispatchManager')
|
||||
const DeleteQueueManager = require('./app/js/DeleteQueueManager')
|
||||
const Errors = require('./app/js/Errors')
|
||||
const HttpController = require('./app/js/HttpController')
|
||||
const mongodb = require('./app/js/mongodb')
|
||||
const async = require('async')
|
||||
|
||||
const Path = require('path')
|
||||
const bodyParser = require('body-parser')
|
||||
|
||||
Metrics.mongodb.monitor(
|
||||
Path.resolve(__dirname, '/node_modules/mongodb'),
|
||||
logger
|
||||
)
|
||||
Metrics.event_loop.monitor(logger, 100)
|
||||
|
||||
const app = express()
|
||||
app.use(Metrics.http.monitor(logger))
|
||||
app.use(bodyParser.json({ limit: Settings.maxJsonRequestSize }))
|
||||
Metrics.injectMetricsRoute(app)
|
||||
|
||||
DispatchManager.createAndStartDispatchers(Settings.dispatcherCount)
|
||||
|
||||
app.param('project_id', (req, res, next, projectId) => {
|
||||
if (projectId != null && projectId.match(/^[0-9a-f]{24}$/)) {
|
||||
return next()
|
||||
} else {
|
||||
return next(new Error('invalid project id'))
|
||||
}
|
||||
})
|
||||
|
||||
app.param('doc_id', (req, res, next, docId) => {
|
||||
if (docId != null && docId.match(/^[0-9a-f]{24}$/)) {
|
||||
return next()
|
||||
} else {
|
||||
return next(new Error('invalid doc id'))
|
||||
}
|
||||
})
|
||||
|
||||
app.get('/project/:project_id/doc/:doc_id', HttpController.getDoc)
|
||||
app.get('/project/:project_id/doc/:doc_id/peek', HttpController.peekDoc)
|
||||
// temporarily keep the GET method for backwards compatibility
|
||||
app.get('/project/:project_id/doc', HttpController.getProjectDocsAndFlushIfOld)
|
||||
// will migrate to the POST method of get_and_flush_if_old instead
|
||||
app.post(
|
||||
'/project/:project_id/get_and_flush_if_old',
|
||||
HttpController.getProjectDocsAndFlushIfOld
|
||||
)
|
||||
app.post('/project/:project_id/clearState', HttpController.clearProjectState)
|
||||
app.post('/project/:project_id/doc/:doc_id', HttpController.setDoc)
|
||||
app.post(
|
||||
'/project/:project_id/doc/:doc_id/flush',
|
||||
HttpController.flushDocIfLoaded
|
||||
)
|
||||
app.delete('/project/:project_id/doc/:doc_id', HttpController.deleteDoc)
|
||||
app.delete('/project/:project_id', HttpController.deleteProject)
|
||||
app.delete('/project', HttpController.deleteMultipleProjects)
|
||||
app.post('/project/:project_id', HttpController.updateProject)
|
||||
app.post(
|
||||
'/project/:project_id/history/resync',
|
||||
HttpController.resyncProjectHistory
|
||||
)
|
||||
app.post('/project/:project_id/flush', HttpController.flushProject)
|
||||
app.post(
|
||||
'/project/:project_id/doc/:doc_id/change/:change_id/accept',
|
||||
HttpController.acceptChanges
|
||||
)
|
||||
app.post(
|
||||
'/project/:project_id/doc/:doc_id/change/accept',
|
||||
HttpController.acceptChanges
|
||||
)
|
||||
app.delete(
|
||||
'/project/:project_id/doc/:doc_id/comment/:comment_id',
|
||||
HttpController.deleteComment
|
||||
)
|
||||
|
||||
app.get('/flush_all_projects', HttpController.flushAllProjects)
|
||||
app.get('/flush_queued_projects', HttpController.flushQueuedProjects)
|
||||
|
||||
app.get('/total', (req, res, next) => {
|
||||
const timer = new Metrics.Timer('http.allDocList')
|
||||
RedisManager.getCountOfDocsInMemory((err, count) => {
|
||||
if (err) {
|
||||
return next(err)
|
||||
}
|
||||
timer.done()
|
||||
res.send({ total: count })
|
||||
})
|
||||
})
|
||||
|
||||
app.get('/status', (req, res) => {
|
||||
if (Settings.shuttingDown) {
|
||||
return res.sendStatus(503) // Service unavailable
|
||||
} else {
|
||||
return res.send('document updater is alive')
|
||||
}
|
||||
})
|
||||
|
||||
const pubsubClient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.pubsub
|
||||
)
|
||||
app.get('/health_check/redis', (req, res, next) => {
|
||||
pubsubClient.healthCheck(error => {
|
||||
if (error) {
|
||||
logger.err({ err: error }, 'failed redis health check')
|
||||
return res.sendStatus(500)
|
||||
} else {
|
||||
return res.sendStatus(200)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
const docUpdaterRedisClient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.documentupdater
|
||||
)
|
||||
app.get('/health_check/redis_cluster', (req, res, next) => {
|
||||
docUpdaterRedisClient.healthCheck(error => {
|
||||
if (error) {
|
||||
logger.err({ err: error }, 'failed redis cluster health check')
|
||||
return res.sendStatus(500)
|
||||
} else {
|
||||
return res.sendStatus(200)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
app.get('/health_check', (req, res, next) => {
|
||||
async.series(
|
||||
[
|
||||
cb => {
|
||||
pubsubClient.healthCheck(error => {
|
||||
if (error) {
|
||||
logger.err({ err: error }, 'failed redis health check')
|
||||
}
|
||||
cb(error)
|
||||
})
|
||||
},
|
||||
cb => {
|
||||
docUpdaterRedisClient.healthCheck(error => {
|
||||
if (error) {
|
||||
logger.err({ err: error }, 'failed redis cluster health check')
|
||||
}
|
||||
cb(error)
|
||||
})
|
||||
},
|
||||
cb => {
|
||||
mongodb.healthCheck(error => {
|
||||
if (error) {
|
||||
logger.err({ err: error }, 'failed mongo health check')
|
||||
}
|
||||
cb(error)
|
||||
})
|
||||
},
|
||||
],
|
||||
error => {
|
||||
if (error) {
|
||||
return res.sendStatus(500)
|
||||
} else {
|
||||
return res.sendStatus(200)
|
||||
}
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
app.use((error, req, res, next) => {
|
||||
if (error instanceof Errors.NotFoundError) {
|
||||
return res.sendStatus(404)
|
||||
} else if (error instanceof Errors.OpRangeNotAvailableError) {
|
||||
return res.sendStatus(422) // Unprocessable Entity
|
||||
} else if (error.statusCode === 413) {
|
||||
return res.status(413).send('request entity too large')
|
||||
} else {
|
||||
logger.error({ err: error, req }, 'request errored')
|
||||
return res.status(500).send('Oops, something went wrong')
|
||||
}
|
||||
})
|
||||
|
||||
const shutdownCleanly = signal => () => {
|
||||
logger.log({ signal }, 'received interrupt, cleaning up')
|
||||
Settings.shuttingDown = true
|
||||
setTimeout(() => {
|
||||
logger.log({ signal }, 'shutting down')
|
||||
process.exit()
|
||||
}, 10000)
|
||||
}
|
||||
|
||||
const watchForEvent = eventName => {
|
||||
docUpdaterRedisClient.on(eventName, e => {
|
||||
console.log(`redis event: ${eventName} ${e}`) // eslint-disable-line no-console
|
||||
})
|
||||
}
|
||||
|
||||
const events = ['connect', 'ready', 'error', 'close', 'reconnecting', 'end']
|
||||
for (const eventName of events) {
|
||||
watchForEvent(eventName)
|
||||
}
|
||||
|
||||
const port =
|
||||
Settings.internal.documentupdater.port ||
|
||||
(Settings.api &&
|
||||
Settings.api.documentupdater &&
|
||||
Settings.api.documentupdater.port) ||
|
||||
3003
|
||||
const host = Settings.internal.documentupdater.host || 'localhost'
|
||||
|
||||
if (!module.parent) {
|
||||
// Called directly
|
||||
mongodb
|
||||
.waitForDb()
|
||||
.then(() => {
|
||||
app.listen(port, host, function (err) {
|
||||
if (err) {
|
||||
logger.fatal({ err }, `Cannot bind to ${host}:${port}. Exiting.`)
|
||||
process.exit(1)
|
||||
}
|
||||
logger.info(
|
||||
`Document-updater starting up, listening on ${host}:${port}`
|
||||
)
|
||||
if (Settings.continuousBackgroundFlush) {
|
||||
logger.info('Starting continuous background flush')
|
||||
DeleteQueueManager.startBackgroundFlush()
|
||||
}
|
||||
})
|
||||
})
|
||||
.catch(err => {
|
||||
logger.fatal({ err }, 'Cannot connect to mongo. Exiting.')
|
||||
process.exit(1)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = app
|
||||
|
||||
for (const signal of [
|
||||
'SIGINT',
|
||||
'SIGHUP',
|
||||
'SIGQUIT',
|
||||
'SIGUSR1',
|
||||
'SIGUSR2',
|
||||
'SIGTERM',
|
||||
'SIGABRT',
|
||||
]) {
|
||||
process.on(signal, shutdownCleanly(signal))
|
||||
}
|
143
services/document-updater/app/js/DeleteQueueManager.js
Normal file
143
services/document-updater/app/js/DeleteQueueManager.js
Normal file
|
@ -0,0 +1,143 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let DeleteQueueManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const ProjectManager = require('./ProjectManager')
|
||||
const logger = require('logger-sharelatex')
|
||||
const metrics = require('./Metrics')
|
||||
const async = require('async')
|
||||
|
||||
// Maintain a sorted set of project flushAndDelete requests, ordered by timestamp
|
||||
// (ZADD), and process them from oldest to newest. A flushAndDelete request comes
|
||||
// from real-time and is triggered when a user leaves a project.
|
||||
//
|
||||
// The aim is to remove the project from redis 5 minutes after the last request
|
||||
// if there has been no activity (document updates) in that time. If there is
|
||||
// activity we can expect a further flushAndDelete request when the editing user
|
||||
// leaves the project.
|
||||
//
|
||||
// If a new flushAndDelete request comes in while an existing request is already
|
||||
// in the queue we update the timestamp as we can postpone flushing further.
|
||||
//
|
||||
// Documents are processed by checking the queue, seeing if the first entry is
|
||||
// older than 5 minutes, and popping it from the queue in that case.
|
||||
|
||||
module.exports = DeleteQueueManager = {
|
||||
flushAndDeleteOldProjects(options, callback) {
|
||||
const startTime = Date.now()
|
||||
const cutoffTime =
|
||||
startTime - options.min_delete_age + 100 * (Math.random() - 0.5)
|
||||
let count = 0
|
||||
|
||||
const flushProjectIfNotModified = (project_id, flushTimestamp, cb) =>
|
||||
ProjectManager.getProjectDocsTimestamps(
|
||||
project_id,
|
||||
function (err, timestamps) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
if (timestamps.length === 0) {
|
||||
logger.log(
|
||||
{ project_id },
|
||||
'skipping flush of queued project - no timestamps'
|
||||
)
|
||||
return cb()
|
||||
}
|
||||
// are any of the timestamps newer than the time the project was flushed?
|
||||
for (const timestamp of Array.from(timestamps)) {
|
||||
if (timestamp > flushTimestamp) {
|
||||
metrics.inc('queued-delete-skipped')
|
||||
logger.debug(
|
||||
{ project_id, timestamps, flushTimestamp },
|
||||
'found newer timestamp, will skip delete'
|
||||
)
|
||||
return cb()
|
||||
}
|
||||
}
|
||||
logger.log({ project_id, flushTimestamp }, 'flushing queued project')
|
||||
return ProjectManager.flushAndDeleteProjectWithLocks(
|
||||
project_id,
|
||||
{ skip_history_flush: false },
|
||||
function (err) {
|
||||
if (err != null) {
|
||||
logger.err({ project_id, err }, 'error flushing queued project')
|
||||
}
|
||||
metrics.inc('queued-delete-completed')
|
||||
return cb(null, true)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
var flushNextProject = function () {
|
||||
const now = Date.now()
|
||||
if (now - startTime > options.timeout) {
|
||||
logger.log('hit time limit on flushing old projects')
|
||||
return callback(null, count)
|
||||
}
|
||||
if (count > options.limit) {
|
||||
logger.log('hit count limit on flushing old projects')
|
||||
return callback(null, count)
|
||||
}
|
||||
return RedisManager.getNextProjectToFlushAndDelete(
|
||||
cutoffTime,
|
||||
function (err, project_id, flushTimestamp, queueLength) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
if (project_id == null) {
|
||||
return callback(null, count)
|
||||
}
|
||||
logger.log({ project_id, queueLength }, 'flushing queued project')
|
||||
metrics.globalGauge('queued-flush-backlog', queueLength)
|
||||
return flushProjectIfNotModified(
|
||||
project_id,
|
||||
flushTimestamp,
|
||||
function (err, flushed) {
|
||||
if (flushed) {
|
||||
count++
|
||||
}
|
||||
return flushNextProject()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
return flushNextProject()
|
||||
},
|
||||
|
||||
startBackgroundFlush() {
|
||||
const SHORT_DELAY = 10
|
||||
const LONG_DELAY = 1000
|
||||
var doFlush = function () {
|
||||
if (Settings.shuttingDown) {
|
||||
logger.warn('discontinuing background flush due to shutdown')
|
||||
return
|
||||
}
|
||||
return DeleteQueueManager.flushAndDeleteOldProjects(
|
||||
{
|
||||
timeout: 1000,
|
||||
min_delete_age: 3 * 60 * 1000,
|
||||
limit: 1000, // high value, to ensure we always flush enough projects
|
||||
},
|
||||
(err, flushed) =>
|
||||
setTimeout(doFlush, flushed > 10 ? SHORT_DELAY : LONG_DELAY)
|
||||
)
|
||||
}
|
||||
return doFlush()
|
||||
},
|
||||
}
|
40
services/document-updater/app/js/DiffCodec.js
Normal file
40
services/document-updater/app/js/DiffCodec.js
Normal file
|
@ -0,0 +1,40 @@
|
|||
const DMP = require('diff-match-patch')
|
||||
const dmp = new DMP()
|
||||
|
||||
// Do not attempt to produce a diff for more than 100ms
|
||||
dmp.Diff_Timeout = 0.1
|
||||
|
||||
module.exports = {
|
||||
ADDED: 1,
|
||||
REMOVED: -1,
|
||||
UNCHANGED: 0,
|
||||
|
||||
diffAsShareJsOp(before, after, callback) {
|
||||
const diffs = dmp.diff_main(before.join('\n'), after.join('\n'))
|
||||
dmp.diff_cleanupSemantic(diffs)
|
||||
|
||||
const ops = []
|
||||
let position = 0
|
||||
for (const diff of diffs) {
|
||||
const type = diff[0]
|
||||
const content = diff[1]
|
||||
if (type === this.ADDED) {
|
||||
ops.push({
|
||||
i: content,
|
||||
p: position,
|
||||
})
|
||||
position += content.length
|
||||
} else if (type === this.REMOVED) {
|
||||
ops.push({
|
||||
d: content,
|
||||
p: position,
|
||||
})
|
||||
} else if (type === this.UNCHANGED) {
|
||||
position += content.length
|
||||
} else {
|
||||
throw new Error('Unknown type')
|
||||
}
|
||||
}
|
||||
callback(null, ops)
|
||||
},
|
||||
}
|
114
services/document-updater/app/js/DispatchManager.js
Normal file
114
services/document-updater/app/js/DispatchManager.js
Normal file
|
@ -0,0 +1,114 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS202: Simplify dynamic range loops
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let DispatchManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Keys = require('./UpdateKeys')
|
||||
const redis = require('@overleaf/redis-wrapper')
|
||||
const Errors = require('./Errors')
|
||||
const _ = require('lodash')
|
||||
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
const Metrics = require('./Metrics')
|
||||
const RateLimitManager = require('./RateLimitManager')
|
||||
|
||||
module.exports = DispatchManager = {
|
||||
createDispatcher(RateLimiter, queueShardNumber) {
|
||||
let pendingListKey
|
||||
if (queueShardNumber === 0) {
|
||||
pendingListKey = 'pending-updates-list'
|
||||
} else {
|
||||
pendingListKey = `pending-updates-list-${queueShardNumber}`
|
||||
}
|
||||
|
||||
const client = redis.createClient(Settings.redis.documentupdater)
|
||||
var worker = {
|
||||
client,
|
||||
_waitForUpdateThenDispatchWorker(callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('worker.waiting')
|
||||
return worker.client.blpop(pendingListKey, 0, function (error, result) {
|
||||
logger.log(`getting ${queueShardNumber}`, error, result)
|
||||
timer.done()
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (result == null) {
|
||||
return callback()
|
||||
}
|
||||
const [list_name, doc_key] = Array.from(result)
|
||||
const [project_id, doc_id] = Array.from(
|
||||
Keys.splitProjectIdAndDocId(doc_key)
|
||||
)
|
||||
// Dispatch this in the background
|
||||
const backgroundTask = cb =>
|
||||
UpdateManager.processOutstandingUpdatesWithLock(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
// log everything except OpRangeNotAvailable errors, these are normal
|
||||
if (error != null) {
|
||||
// downgrade OpRangeNotAvailable and "Delete component" errors so they are not sent to sentry
|
||||
const logAsWarning =
|
||||
error instanceof Errors.OpRangeNotAvailableError ||
|
||||
error instanceof Errors.DeleteMismatchError
|
||||
if (logAsWarning) {
|
||||
logger.warn(
|
||||
{ err: error, project_id, doc_id },
|
||||
'error processing update'
|
||||
)
|
||||
} else {
|
||||
logger.error(
|
||||
{ err: error, project_id, doc_id },
|
||||
'error processing update'
|
||||
)
|
||||
}
|
||||
}
|
||||
return cb()
|
||||
}
|
||||
)
|
||||
return RateLimiter.run(backgroundTask, callback)
|
||||
})
|
||||
},
|
||||
|
||||
run() {
|
||||
if (Settings.shuttingDown) {
|
||||
return
|
||||
}
|
||||
return worker._waitForUpdateThenDispatchWorker(error => {
|
||||
if (error != null) {
|
||||
logger.error({ err: error }, 'Error in worker process')
|
||||
throw error
|
||||
} else {
|
||||
return worker.run()
|
||||
}
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
return worker
|
||||
},
|
||||
|
||||
createAndStartDispatchers(number) {
|
||||
const RateLimiter = new RateLimitManager(number)
|
||||
_.times(number, function (shardNumber) {
|
||||
return DispatchManager.createDispatcher(RateLimiter, shardNumber).run()
|
||||
})
|
||||
},
|
||||
}
|
808
services/document-updater/app/js/DocumentManager.js
Normal file
808
services/document-updater/app/js/DocumentManager.js
Normal file
|
@ -0,0 +1,808 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let DocumentManager
|
||||
const RedisManager = require('./RedisManager')
|
||||
const ProjectHistoryRedisManager = require('./ProjectHistoryRedisManager')
|
||||
const PersistenceManager = require('./PersistenceManager')
|
||||
const DiffCodec = require('./DiffCodec')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Metrics = require('./Metrics')
|
||||
const HistoryManager = require('./HistoryManager')
|
||||
const RealTimeRedisManager = require('./RealTimeRedisManager')
|
||||
const Errors = require('./Errors')
|
||||
const RangesManager = require('./RangesManager')
|
||||
const async = require('async')
|
||||
|
||||
const MAX_UNFLUSHED_AGE = 300 * 1000 // 5 mins, document should be flushed to mongo this time after a change
|
||||
|
||||
module.exports = DocumentManager = {
|
||||
getDoc(project_id, doc_id, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
alreadyLoaded
|
||||
) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.getDoc')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return RedisManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime
|
||||
) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
logger.log(
|
||||
{ project_id, doc_id },
|
||||
'doc not in redis so getting from persistence API'
|
||||
)
|
||||
return PersistenceManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
projectHistoryType
|
||||
) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
logger.log(
|
||||
{
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
projectHistoryType,
|
||||
},
|
||||
'got doc from persistence API'
|
||||
)
|
||||
return RedisManager.putDocInMemory(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RedisManager.setHistoryType(
|
||||
doc_id,
|
||||
projectHistoryType,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(
|
||||
null,
|
||||
lines,
|
||||
version,
|
||||
ranges || {},
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
null,
|
||||
false
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
return callback(
|
||||
null,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
true
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
getDocAndRecentOps(project_id, doc_id, fromVersion, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ops,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId
|
||||
) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.getDocAndRecentOps')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version, ranges, pathname, projectHistoryId) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (fromVersion === -1) {
|
||||
return callback(
|
||||
null,
|
||||
lines,
|
||||
version,
|
||||
[],
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId
|
||||
)
|
||||
} else {
|
||||
return RedisManager.getPreviousDocOps(
|
||||
doc_id,
|
||||
fromVersion,
|
||||
version,
|
||||
function (error, ops) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(
|
||||
null,
|
||||
lines,
|
||||
version,
|
||||
ops,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
setDoc(project_id, doc_id, newLines, source, user_id, undoing, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.setDoc')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
if (newLines == null) {
|
||||
return callback(new Error('No lines were provided to setDoc'))
|
||||
}
|
||||
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
oldLines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
alreadyLoaded
|
||||
) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (
|
||||
oldLines != null &&
|
||||
oldLines.length > 0 &&
|
||||
oldLines[0].text != null
|
||||
) {
|
||||
logger.log(
|
||||
{ doc_id, project_id, oldLines, newLines },
|
||||
'document is JSON so not updating'
|
||||
)
|
||||
return callback(null)
|
||||
}
|
||||
|
||||
logger.log(
|
||||
{ doc_id, project_id, oldLines, newLines },
|
||||
'setting a document via http'
|
||||
)
|
||||
return DiffCodec.diffAsShareJsOp(
|
||||
oldLines,
|
||||
newLines,
|
||||
function (error, op) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (undoing) {
|
||||
for (const o of Array.from(op || [])) {
|
||||
o.u = true
|
||||
} // Turn on undo flag for each op for track changes
|
||||
}
|
||||
const update = {
|
||||
doc: doc_id,
|
||||
op,
|
||||
v: version,
|
||||
meta: {
|
||||
type: 'external',
|
||||
source,
|
||||
user_id,
|
||||
},
|
||||
}
|
||||
return UpdateManager.applyUpdate(
|
||||
project_id,
|
||||
doc_id,
|
||||
update,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
// If the document was loaded already, then someone has it open
|
||||
// in a project, and the usual flushing mechanism will happen.
|
||||
// Otherwise we should remove it immediately since nothing else
|
||||
// is using it.
|
||||
if (alreadyLoaded) {
|
||||
return DocumentManager.flushDocIfLoaded(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
return DocumentManager.flushAndDeleteDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
{},
|
||||
function (error) {
|
||||
// There is no harm in flushing project history if the previous
|
||||
// call failed and sometimes it is required
|
||||
HistoryManager.flushProjectChangesAsync(project_id)
|
||||
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
flushDocIfLoaded(project_id, doc_id, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.flushDocIfLoaded')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
return RedisManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy
|
||||
) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
logger.log(
|
||||
{ project_id, doc_id },
|
||||
'doc is not loaded so not flushing'
|
||||
)
|
||||
return callback(null) // TODO: return a flag to bail out, as we go on to remove doc from memory?
|
||||
} else {
|
||||
logger.log({ project_id, doc_id, version }, 'flushing doc')
|
||||
return PersistenceManager.setDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RedisManager.clearUnflushedTime(doc_id, callback)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
flushAndDeleteDoc(project_id, doc_id, options, _callback) {
|
||||
const timer = new Metrics.Timer('docManager.flushAndDeleteDoc')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return DocumentManager.flushDocIfLoaded(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
if (options.ignoreFlushErrors) {
|
||||
logger.warn(
|
||||
{ project_id, doc_id, err: error },
|
||||
'ignoring flush error while deleting document'
|
||||
)
|
||||
} else {
|
||||
return callback(error)
|
||||
}
|
||||
}
|
||||
|
||||
// Flush in the background since it requires a http request
|
||||
HistoryManager.flushDocChangesAsync(project_id, doc_id)
|
||||
|
||||
return RedisManager.removeDocFromMemory(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
acceptChanges(project_id, doc_id, change_ids, _callback) {
|
||||
if (change_ids == null) {
|
||||
change_ids = []
|
||||
}
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.acceptChanges')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version, ranges) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(`document not found: ${doc_id}`)
|
||||
)
|
||||
}
|
||||
return RangesManager.acceptChanges(
|
||||
change_ids,
|
||||
ranges,
|
||||
function (error, new_ranges) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RedisManager.updateDocument(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
[],
|
||||
new_ranges,
|
||||
{},
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
deleteComment(project_id, doc_id, comment_id, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.deleteComment')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version, ranges) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(`document not found: ${doc_id}`)
|
||||
)
|
||||
}
|
||||
return RangesManager.deleteComment(
|
||||
comment_id,
|
||||
ranges,
|
||||
function (error, new_ranges) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RedisManager.updateDocument(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
[],
|
||||
new_ranges,
|
||||
{},
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
renameDoc(project_id, doc_id, user_id, update, projectHistoryId, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('docManager.updateProject')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
return RedisManager.renameDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
user_id,
|
||||
update,
|
||||
projectHistoryId,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
getDocAndFlushIfOld(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, doc) {}
|
||||
}
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
alreadyLoaded
|
||||
) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
// if doc was already loaded see if it needs to be flushed
|
||||
if (
|
||||
alreadyLoaded &&
|
||||
unflushedTime != null &&
|
||||
Date.now() - unflushedTime > MAX_UNFLUSHED_AGE
|
||||
) {
|
||||
return DocumentManager.flushDocIfLoaded(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null, lines, version)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
return callback(null, lines, version)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
resyncDocContents(project_id, doc_id, callback) {
|
||||
logger.log({ project_id, doc_id }, 'start resyncing doc contents')
|
||||
return RedisManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version, ranges, pathname, projectHistoryId) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (lines == null || version == null) {
|
||||
logger.log(
|
||||
{ project_id, doc_id },
|
||||
'resyncing doc contents - not found in redis - retrieving from web'
|
||||
)
|
||||
return PersistenceManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId
|
||||
) {
|
||||
if (error != null) {
|
||||
logger.error(
|
||||
{ project_id, doc_id, getDocError: error },
|
||||
'resyncing doc contents - error retrieving from web'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
return ProjectHistoryRedisManager.queueResyncDocContent(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
pathname,
|
||||
callback
|
||||
)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
logger.log(
|
||||
{ project_id, doc_id },
|
||||
'resyncing doc contents - doc in redis - will queue in redis'
|
||||
)
|
||||
return ProjectHistoryRedisManager.queueResyncDocContent(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
pathname,
|
||||
callback
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
getDocWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, lines, version) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.getDoc,
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
getDocAndRecentOpsWithLock(project_id, doc_id, fromVersion, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ops,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId
|
||||
) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.getDocAndRecentOps,
|
||||
project_id,
|
||||
doc_id,
|
||||
fromVersion,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
getDocAndFlushIfOldWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, doc) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.getDocAndFlushIfOld,
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
setDocWithLock(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
source,
|
||||
user_id,
|
||||
undoing,
|
||||
callback
|
||||
) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.setDoc,
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
source,
|
||||
user_id,
|
||||
undoing,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
flushDocIfLoadedWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.flushDocIfLoaded,
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
flushAndDeleteDocWithLock(project_id, doc_id, options, callback) {
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.flushAndDeleteDoc,
|
||||
project_id,
|
||||
doc_id,
|
||||
options,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
acceptChangesWithLock(project_id, doc_id, change_ids, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.acceptChanges,
|
||||
project_id,
|
||||
doc_id,
|
||||
change_ids,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
deleteCommentWithLock(project_id, doc_id, thread_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.deleteComment,
|
||||
project_id,
|
||||
doc_id,
|
||||
thread_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
renameDocWithLock(
|
||||
project_id,
|
||||
doc_id,
|
||||
user_id,
|
||||
update,
|
||||
projectHistoryId,
|
||||
callback
|
||||
) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.renameDoc,
|
||||
project_id,
|
||||
doc_id,
|
||||
user_id,
|
||||
update,
|
||||
projectHistoryId,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
resyncDocContentsWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const UpdateManager = require('./UpdateManager')
|
||||
return UpdateManager.lockUpdatesAndDo(
|
||||
DocumentManager.resyncDocContents,
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
}
|
45
services/document-updater/app/js/Errors.js
Normal file
45
services/document-updater/app/js/Errors.js
Normal file
|
@ -0,0 +1,45 @@
|
|||
/* eslint-disable
|
||||
no-proto,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
let Errors
|
||||
var NotFoundError = function (message) {
|
||||
const error = new Error(message)
|
||||
error.name = 'NotFoundError'
|
||||
error.__proto__ = NotFoundError.prototype
|
||||
return error
|
||||
}
|
||||
NotFoundError.prototype.__proto__ = Error.prototype
|
||||
|
||||
var OpRangeNotAvailableError = function (message) {
|
||||
const error = new Error(message)
|
||||
error.name = 'OpRangeNotAvailableError'
|
||||
error.__proto__ = OpRangeNotAvailableError.prototype
|
||||
return error
|
||||
}
|
||||
OpRangeNotAvailableError.prototype.__proto__ = Error.prototype
|
||||
|
||||
var ProjectStateChangedError = function (message) {
|
||||
const error = new Error(message)
|
||||
error.name = 'ProjectStateChangedError'
|
||||
error.__proto__ = ProjectStateChangedError.prototype
|
||||
return error
|
||||
}
|
||||
ProjectStateChangedError.prototype.__proto__ = Error.prototype
|
||||
|
||||
var DeleteMismatchError = function (message) {
|
||||
const error = new Error(message)
|
||||
error.name = 'DeleteMismatchError'
|
||||
error.__proto__ = DeleteMismatchError.prototype
|
||||
return error
|
||||
}
|
||||
DeleteMismatchError.prototype.__proto__ = Error.prototype
|
||||
|
||||
module.exports = Errors = {
|
||||
NotFoundError,
|
||||
OpRangeNotAvailableError,
|
||||
ProjectStateChangedError,
|
||||
DeleteMismatchError,
|
||||
}
|
263
services/document-updater/app/js/HistoryManager.js
Normal file
263
services/document-updater/app/js/HistoryManager.js
Normal file
|
@ -0,0 +1,263 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let HistoryManager
|
||||
const async = require('async')
|
||||
const logger = require('logger-sharelatex')
|
||||
const request = require('request')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const HistoryRedisManager = require('./HistoryRedisManager')
|
||||
const ProjectHistoryRedisManager = require('./ProjectHistoryRedisManager')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const metrics = require('./Metrics')
|
||||
|
||||
module.exports = HistoryManager = {
|
||||
flushDocChangesAsync(project_id, doc_id) {
|
||||
if (
|
||||
(Settings.apis != null ? Settings.apis.trackchanges : undefined) == null
|
||||
) {
|
||||
logger.warn(
|
||||
{ doc_id },
|
||||
'track changes API is not configured, so not flushing'
|
||||
)
|
||||
return
|
||||
}
|
||||
return RedisManager.getHistoryType(
|
||||
doc_id,
|
||||
function (err, projectHistoryType) {
|
||||
if (err != null) {
|
||||
logger.warn({ err, doc_id }, 'error getting history type')
|
||||
}
|
||||
// if there's an error continue and flush to track-changes for safety
|
||||
if (
|
||||
Settings.disableDoubleFlush &&
|
||||
projectHistoryType === 'project-history'
|
||||
) {
|
||||
return logger.debug(
|
||||
{ doc_id, projectHistoryType },
|
||||
'skipping track-changes flush'
|
||||
)
|
||||
} else {
|
||||
metrics.inc('history-flush', 1, { status: 'track-changes' })
|
||||
const url = `${Settings.apis.trackchanges.url}/project/${project_id}/doc/${doc_id}/flush`
|
||||
logger.log(
|
||||
{ project_id, doc_id, url, projectHistoryType },
|
||||
'flushing doc in track changes api'
|
||||
)
|
||||
return request.post(url, function (error, res, body) {
|
||||
if (error != null) {
|
||||
return logger.error(
|
||||
{ error, doc_id, project_id },
|
||||
'track changes doc to track changes api'
|
||||
)
|
||||
} else if (res.statusCode < 200 && res.statusCode >= 300) {
|
||||
return logger.error(
|
||||
{ doc_id, project_id },
|
||||
`track changes api returned a failure status code: ${res.statusCode}`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
// flush changes in the background
|
||||
flushProjectChangesAsync(project_id) {
|
||||
if (
|
||||
!__guard__(
|
||||
Settings.apis != null ? Settings.apis.project_history : undefined,
|
||||
x => x.enabled
|
||||
)
|
||||
) {
|
||||
return
|
||||
}
|
||||
return HistoryManager.flushProjectChanges(
|
||||
project_id,
|
||||
{ background: true },
|
||||
function () {}
|
||||
)
|
||||
},
|
||||
|
||||
// flush changes and callback (for when we need to know the queue is flushed)
|
||||
flushProjectChanges(project_id, options, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
if (
|
||||
!__guard__(
|
||||
Settings.apis != null ? Settings.apis.project_history : undefined,
|
||||
x => x.enabled
|
||||
)
|
||||
) {
|
||||
return callback()
|
||||
}
|
||||
if (options.skip_history_flush) {
|
||||
logger.log({ project_id }, 'skipping flush of project history')
|
||||
return callback()
|
||||
}
|
||||
metrics.inc('history-flush', 1, { status: 'project-history' })
|
||||
const url = `${Settings.apis.project_history.url}/project/${project_id}/flush`
|
||||
const qs = {}
|
||||
if (options.background) {
|
||||
qs.background = true
|
||||
} // pass on the background flush option if present
|
||||
logger.log({ project_id, url, qs }, 'flushing doc in project history api')
|
||||
return request.post({ url, qs }, function (error, res, body) {
|
||||
if (error != null) {
|
||||
logger.error(
|
||||
{ error, project_id },
|
||||
'project history doc to track changes api'
|
||||
)
|
||||
return callback(error)
|
||||
} else if (res.statusCode < 200 && res.statusCode >= 300) {
|
||||
logger.error(
|
||||
{ project_id },
|
||||
`project history api returned a failure status code: ${res.statusCode}`
|
||||
)
|
||||
return callback(error)
|
||||
} else {
|
||||
return callback()
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
FLUSH_DOC_EVERY_N_OPS: 100,
|
||||
FLUSH_PROJECT_EVERY_N_OPS: 500,
|
||||
|
||||
recordAndFlushHistoryOps(
|
||||
project_id,
|
||||
doc_id,
|
||||
ops,
|
||||
doc_ops_length,
|
||||
project_ops_length,
|
||||
callback
|
||||
) {
|
||||
if (ops == null) {
|
||||
ops = []
|
||||
}
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
if (ops.length === 0) {
|
||||
return callback()
|
||||
}
|
||||
|
||||
// record updates for project history
|
||||
if (
|
||||
__guard__(
|
||||
Settings.apis != null ? Settings.apis.project_history : undefined,
|
||||
x => x.enabled
|
||||
)
|
||||
) {
|
||||
if (
|
||||
HistoryManager.shouldFlushHistoryOps(
|
||||
project_ops_length,
|
||||
ops.length,
|
||||
HistoryManager.FLUSH_PROJECT_EVERY_N_OPS
|
||||
)
|
||||
) {
|
||||
// Do this in the background since it uses HTTP and so may be too
|
||||
// slow to wait for when processing a doc update.
|
||||
logger.log(
|
||||
{ project_ops_length, project_id },
|
||||
'flushing project history api'
|
||||
)
|
||||
HistoryManager.flushProjectChangesAsync(project_id)
|
||||
}
|
||||
}
|
||||
|
||||
// if the doc_ops_length is undefined it means the project is not using track-changes
|
||||
// so we can bail out here
|
||||
if (typeof doc_ops_length === 'undefined') {
|
||||
logger.debug(
|
||||
{ project_id, doc_id },
|
||||
'skipping flush to track-changes, only using project-history'
|
||||
)
|
||||
return callback()
|
||||
}
|
||||
|
||||
// record updates for track-changes
|
||||
return HistoryRedisManager.recordDocHasHistoryOps(
|
||||
project_id,
|
||||
doc_id,
|
||||
ops,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (
|
||||
HistoryManager.shouldFlushHistoryOps(
|
||||
doc_ops_length,
|
||||
ops.length,
|
||||
HistoryManager.FLUSH_DOC_EVERY_N_OPS
|
||||
)
|
||||
) {
|
||||
// Do this in the background since it uses HTTP and so may be too
|
||||
// slow to wait for when processing a doc update.
|
||||
logger.log(
|
||||
{ doc_ops_length, doc_id, project_id },
|
||||
'flushing track changes api'
|
||||
)
|
||||
HistoryManager.flushDocChangesAsync(project_id, doc_id)
|
||||
}
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
shouldFlushHistoryOps(length, ops_length, threshold) {
|
||||
if (!length) {
|
||||
return false
|
||||
} // don't flush unless we know the length
|
||||
// We want to flush every 100 ops, i.e. 100, 200, 300, etc
|
||||
// Find out which 'block' (i.e. 0-99, 100-199) we were in before and after pushing these
|
||||
// ops. If we've changed, then we've gone over a multiple of 100 and should flush.
|
||||
// (Most of the time, we will only hit 100 and then flushing will put us back to 0)
|
||||
const previousLength = length - ops_length
|
||||
const prevBlock = Math.floor(previousLength / threshold)
|
||||
const newBlock = Math.floor(length / threshold)
|
||||
return newBlock !== prevBlock
|
||||
},
|
||||
|
||||
MAX_PARALLEL_REQUESTS: 4,
|
||||
|
||||
resyncProjectHistory(project_id, projectHistoryId, docs, files, callback) {
|
||||
return ProjectHistoryRedisManager.queueResyncProjectStructure(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
docs,
|
||||
files,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
const DocumentManager = require('./DocumentManager')
|
||||
const resyncDoc = (doc, cb) =>
|
||||
DocumentManager.resyncDocContentsWithLock(project_id, doc.doc, cb)
|
||||
return async.eachLimit(
|
||||
docs,
|
||||
HistoryManager.MAX_PARALLEL_REQUESTS,
|
||||
resyncDoc,
|
||||
callback
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
45
services/document-updater/app/js/HistoryRedisManager.js
Normal file
45
services/document-updater/app/js/HistoryRedisManager.js
Normal file
|
@ -0,0 +1,45 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let HistoryRedisManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const rclient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.history
|
||||
)
|
||||
const Keys = Settings.redis.history.key_schema
|
||||
const logger = require('logger-sharelatex')
|
||||
|
||||
module.exports = HistoryRedisManager = {
|
||||
recordDocHasHistoryOps(project_id, doc_id, ops, callback) {
|
||||
if (ops == null) {
|
||||
ops = []
|
||||
}
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
if (ops.length === 0) {
|
||||
return callback(new Error('cannot push no ops')) // This should never be called with no ops, but protect against a redis error if we sent an empty array to rpush
|
||||
}
|
||||
logger.log({ project_id, doc_id }, 'marking doc in project for history ops')
|
||||
return rclient.sadd(
|
||||
Keys.docsWithHistoryOps({ project_id }),
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
},
|
||||
}
|
407
services/document-updater/app/js/HttpController.js
Normal file
407
services/document-updater/app/js/HttpController.js
Normal file
|
@ -0,0 +1,407 @@
|
|||
const DocumentManager = require('./DocumentManager')
|
||||
const HistoryManager = require('./HistoryManager')
|
||||
const ProjectManager = require('./ProjectManager')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const Errors = require('./Errors')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const Metrics = require('./Metrics')
|
||||
const ProjectFlusher = require('./ProjectFlusher')
|
||||
const DeleteQueueManager = require('./DeleteQueueManager')
|
||||
const async = require('async')
|
||||
|
||||
module.exports = {
|
||||
getDoc,
|
||||
peekDoc,
|
||||
getProjectDocsAndFlushIfOld,
|
||||
clearProjectState,
|
||||
setDoc,
|
||||
flushDocIfLoaded,
|
||||
deleteDoc,
|
||||
flushProject,
|
||||
deleteProject,
|
||||
deleteMultipleProjects,
|
||||
acceptChanges,
|
||||
deleteComment,
|
||||
updateProject,
|
||||
resyncProjectHistory,
|
||||
flushAllProjects,
|
||||
flushQueuedProjects,
|
||||
}
|
||||
|
||||
function getDoc(req, res, next) {
|
||||
let fromVersion
|
||||
const docId = req.params.doc_id
|
||||
const projectId = req.params.project_id
|
||||
logger.log({ projectId, docId }, 'getting doc via http')
|
||||
const timer = new Metrics.Timer('http.getDoc')
|
||||
|
||||
if (req.query.fromVersion != null) {
|
||||
fromVersion = parseInt(req.query.fromVersion, 10)
|
||||
} else {
|
||||
fromVersion = -1
|
||||
}
|
||||
|
||||
DocumentManager.getDocAndRecentOpsWithLock(
|
||||
projectId,
|
||||
docId,
|
||||
fromVersion,
|
||||
(error, lines, version, ops, ranges, pathname) => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId, docId }, 'got doc via http')
|
||||
if (lines == null || version == null) {
|
||||
return next(new Errors.NotFoundError('document not found'))
|
||||
}
|
||||
res.json({
|
||||
id: docId,
|
||||
lines,
|
||||
version,
|
||||
ops,
|
||||
ranges,
|
||||
pathname,
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// return the doc from redis if present, but don't load it from mongo
|
||||
function peekDoc(req, res, next) {
|
||||
const docId = req.params.doc_id
|
||||
const projectId = req.params.project_id
|
||||
logger.log({ projectId, docId }, 'peeking at doc via http')
|
||||
RedisManager.getDoc(projectId, docId, function (error, lines, version) {
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
return next(new Errors.NotFoundError('document not found'))
|
||||
}
|
||||
res.json({ id: docId, lines, version })
|
||||
})
|
||||
}
|
||||
|
||||
function _getTotalSizeOfLines(lines) {
|
||||
let size = 0
|
||||
for (const line of lines) {
|
||||
size += line.length + 1
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
function getProjectDocsAndFlushIfOld(req, res, next) {
|
||||
const projectId = req.params.project_id
|
||||
const projectStateHash = req.query.state
|
||||
// exclude is string of existing docs "id:version,id:version,..."
|
||||
const excludeItems =
|
||||
req.query.exclude != null ? req.query.exclude.split(',') : []
|
||||
logger.log({ projectId, exclude: excludeItems }, 'getting docs via http')
|
||||
const timer = new Metrics.Timer('http.getAllDocs')
|
||||
const excludeVersions = {}
|
||||
for (const item of excludeItems) {
|
||||
const [id, version] = item.split(':')
|
||||
excludeVersions[id] = version
|
||||
}
|
||||
logger.log(
|
||||
{ projectId, projectStateHash, excludeVersions },
|
||||
'excluding versions'
|
||||
)
|
||||
ProjectManager.getProjectDocsAndFlushIfOld(
|
||||
projectId,
|
||||
projectStateHash,
|
||||
excludeVersions,
|
||||
(error, result) => {
|
||||
timer.done()
|
||||
if (error instanceof Errors.ProjectStateChangedError) {
|
||||
res.sendStatus(409) // conflict
|
||||
} else if (error) {
|
||||
next(error)
|
||||
} else {
|
||||
logger.log(
|
||||
{
|
||||
projectId,
|
||||
result: result.map(doc => `${doc._id}:${doc.v}`),
|
||||
},
|
||||
'got docs via http'
|
||||
)
|
||||
res.send(result)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function clearProjectState(req, res, next) {
|
||||
const projectId = req.params.project_id
|
||||
const timer = new Metrics.Timer('http.clearProjectState')
|
||||
logger.log({ projectId }, 'clearing project state via http')
|
||||
ProjectManager.clearProjectState(projectId, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
next(error)
|
||||
} else {
|
||||
res.sendStatus(200)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function setDoc(req, res, next) {
|
||||
const docId = req.params.doc_id
|
||||
const projectId = req.params.project_id
|
||||
const { lines, source, user_id: userId, undoing } = req.body
|
||||
const lineSize = _getTotalSizeOfLines(lines)
|
||||
if (lineSize > Settings.max_doc_length) {
|
||||
logger.log(
|
||||
{ projectId, docId, source, lineSize, userId },
|
||||
'document too large, returning 406 response'
|
||||
)
|
||||
return res.sendStatus(406)
|
||||
}
|
||||
logger.log(
|
||||
{ projectId, docId, lines, source, userId, undoing },
|
||||
'setting doc via http'
|
||||
)
|
||||
const timer = new Metrics.Timer('http.setDoc')
|
||||
DocumentManager.setDocWithLock(
|
||||
projectId,
|
||||
docId,
|
||||
lines,
|
||||
source,
|
||||
userId,
|
||||
undoing,
|
||||
error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId, docId }, 'set doc via http')
|
||||
res.sendStatus(204) // No Content
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function flushDocIfLoaded(req, res, next) {
|
||||
const docId = req.params.doc_id
|
||||
const projectId = req.params.project_id
|
||||
logger.log({ projectId, docId }, 'flushing doc via http')
|
||||
const timer = new Metrics.Timer('http.flushDoc')
|
||||
DocumentManager.flushDocIfLoadedWithLock(projectId, docId, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId, docId }, 'flushed doc via http')
|
||||
res.sendStatus(204) // No Content
|
||||
})
|
||||
}
|
||||
|
||||
function deleteDoc(req, res, next) {
|
||||
const docId = req.params.doc_id
|
||||
const projectId = req.params.project_id
|
||||
const ignoreFlushErrors = req.query.ignore_flush_errors === 'true'
|
||||
const timer = new Metrics.Timer('http.deleteDoc')
|
||||
logger.log({ projectId, docId }, 'deleting doc via http')
|
||||
DocumentManager.flushAndDeleteDocWithLock(
|
||||
projectId,
|
||||
docId,
|
||||
{ ignoreFlushErrors },
|
||||
error => {
|
||||
timer.done()
|
||||
// There is no harm in flushing project history if the previous call
|
||||
// failed and sometimes it is required
|
||||
HistoryManager.flushProjectChangesAsync(projectId)
|
||||
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId, docId }, 'deleted doc via http')
|
||||
res.sendStatus(204) // No Content
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function flushProject(req, res, next) {
|
||||
const projectId = req.params.project_id
|
||||
logger.log({ projectId }, 'flushing project via http')
|
||||
const timer = new Metrics.Timer('http.flushProject')
|
||||
ProjectManager.flushProjectWithLocks(projectId, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId }, 'flushed project via http')
|
||||
res.sendStatus(204) // No Content
|
||||
})
|
||||
}
|
||||
|
||||
function deleteProject(req, res, next) {
|
||||
const projectId = req.params.project_id
|
||||
logger.log({ projectId }, 'deleting project via http')
|
||||
const options = {}
|
||||
if (req.query.background) {
|
||||
options.background = true
|
||||
} // allow non-urgent flushes to be queued
|
||||
if (req.query.shutdown) {
|
||||
options.skip_history_flush = true
|
||||
} // don't flush history when realtime shuts down
|
||||
if (req.query.background) {
|
||||
ProjectManager.queueFlushAndDeleteProject(projectId, error => {
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId }, 'queue delete of project via http')
|
||||
res.sendStatus(204)
|
||||
}) // No Content
|
||||
} else {
|
||||
const timer = new Metrics.Timer('http.deleteProject')
|
||||
ProjectManager.flushAndDeleteProjectWithLocks(projectId, options, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId }, 'deleted project via http')
|
||||
res.sendStatus(204) // No Content
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function deleteMultipleProjects(req, res, next) {
|
||||
const projectIds = req.body.project_ids || []
|
||||
logger.log({ projectIds }, 'deleting multiple projects via http')
|
||||
async.eachSeries(
|
||||
projectIds,
|
||||
(projectId, cb) => {
|
||||
logger.log({ projectId }, 'queue delete of project via http')
|
||||
ProjectManager.queueFlushAndDeleteProject(projectId, cb)
|
||||
},
|
||||
error => {
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
res.sendStatus(204) // No Content
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function acceptChanges(req, res, next) {
|
||||
const { project_id: projectId, doc_id: docId } = req.params
|
||||
let changeIds = req.body.change_ids
|
||||
if (changeIds == null) {
|
||||
changeIds = [req.params.change_id]
|
||||
}
|
||||
logger.log(
|
||||
{ projectId, docId },
|
||||
`accepting ${changeIds.length} changes via http`
|
||||
)
|
||||
const timer = new Metrics.Timer('http.acceptChanges')
|
||||
DocumentManager.acceptChangesWithLock(projectId, docId, changeIds, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log(
|
||||
{ projectId, docId },
|
||||
`accepted ${changeIds.length} changes via http`
|
||||
)
|
||||
res.sendStatus(204) // No Content
|
||||
})
|
||||
}
|
||||
|
||||
function deleteComment(req, res, next) {
|
||||
const {
|
||||
project_id: projectId,
|
||||
doc_id: docId,
|
||||
comment_id: commentId,
|
||||
} = req.params
|
||||
logger.log({ projectId, docId, commentId }, 'deleting comment via http')
|
||||
const timer = new Metrics.Timer('http.deleteComment')
|
||||
DocumentManager.deleteCommentWithLock(projectId, docId, commentId, error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId, docId, commentId }, 'deleted comment via http')
|
||||
res.sendStatus(204) // No Content
|
||||
})
|
||||
}
|
||||
|
||||
function updateProject(req, res, next) {
|
||||
const timer = new Metrics.Timer('http.updateProject')
|
||||
const projectId = req.params.project_id
|
||||
const { projectHistoryId, userId, updates = [], version } = req.body
|
||||
logger.log({ projectId, updates, version }, 'updating project via http')
|
||||
ProjectManager.updateProjectWithLocks(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
userId,
|
||||
updates,
|
||||
version,
|
||||
error => {
|
||||
timer.done()
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId }, 'updated project via http')
|
||||
res.sendStatus(204) // No Content
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function resyncProjectHistory(req, res, next) {
|
||||
const projectId = req.params.project_id
|
||||
const { projectHistoryId, docs, files } = req.body
|
||||
|
||||
logger.log(
|
||||
{ projectId, docs, files },
|
||||
'queuing project history resync via http'
|
||||
)
|
||||
HistoryManager.resyncProjectHistory(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
docs,
|
||||
files,
|
||||
error => {
|
||||
if (error) {
|
||||
return next(error)
|
||||
}
|
||||
logger.log({ projectId }, 'queued project history resync via http')
|
||||
res.sendStatus(204)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function flushAllProjects(req, res, next) {
|
||||
res.setTimeout(5 * 60 * 1000)
|
||||
const options = {
|
||||
limit: req.query.limit || 1000,
|
||||
concurrency: req.query.concurrency || 5,
|
||||
dryRun: req.query.dryRun || false,
|
||||
}
|
||||
ProjectFlusher.flushAllProjects(options, (err, projectIds) => {
|
||||
if (err) {
|
||||
logger.err({ err }, 'error bulk flushing projects')
|
||||
res.sendStatus(500)
|
||||
} else {
|
||||
res.send(projectIds)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function flushQueuedProjects(req, res, next) {
|
||||
res.setTimeout(10 * 60 * 1000)
|
||||
const options = {
|
||||
limit: req.query.limit || 1000,
|
||||
timeout: 5 * 60 * 1000,
|
||||
min_delete_age: req.query.min_delete_age || 5 * 60 * 1000,
|
||||
}
|
||||
DeleteQueueManager.flushAndDeleteOldProjects(options, (err, flushed) => {
|
||||
if (err) {
|
||||
logger.err({ err }, 'error flushing old projects')
|
||||
res.sendStatus(500)
|
||||
} else {
|
||||
logger.log({ flushed }, 'flush of queued projects completed')
|
||||
res.send({ flushed })
|
||||
}
|
||||
})
|
||||
}
|
177
services/document-updater/app/js/LockManager.js
Normal file
177
services/document-updater/app/js/LockManager.js
Normal file
|
@ -0,0 +1,177 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let LockManager
|
||||
const metrics = require('./Metrics')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const redis = require('@overleaf/redis-wrapper')
|
||||
const rclient = redis.createClient(Settings.redis.lock)
|
||||
const keys = Settings.redis.lock.key_schema
|
||||
const logger = require('logger-sharelatex')
|
||||
const os = require('os')
|
||||
const crypto = require('crypto')
|
||||
|
||||
const Profiler = require('./Profiler')
|
||||
|
||||
const HOST = os.hostname()
|
||||
const PID = process.pid
|
||||
const RND = crypto.randomBytes(4).toString('hex')
|
||||
let COUNT = 0
|
||||
|
||||
const MAX_REDIS_REQUEST_LENGTH = 5000 // 5 seconds
|
||||
|
||||
module.exports = LockManager = {
|
||||
LOCK_TEST_INTERVAL: 50, // 50ms between each test of the lock
|
||||
MAX_TEST_INTERVAL: 1000, // back off to 1s between each test of the lock
|
||||
MAX_LOCK_WAIT_TIME: 10000, // 10s maximum time to spend trying to get the lock
|
||||
LOCK_TTL: 30, // seconds. Time until lock auto expires in redis.
|
||||
|
||||
// Use a signed lock value as described in
|
||||
// http://redis.io/topics/distlock#correct-implementation-with-a-single-instance
|
||||
// to prevent accidental unlocking by multiple processes
|
||||
randomLock() {
|
||||
const time = Date.now()
|
||||
return `locked:host=${HOST}:pid=${PID}:random=${RND}:time=${time}:count=${COUNT++}`
|
||||
},
|
||||
|
||||
unlockScript:
|
||||
'if redis.call("get", KEYS[1]) == ARGV[1] then return redis.call("del", KEYS[1]) else return 0 end',
|
||||
|
||||
tryLock(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (err, isFree) {}
|
||||
}
|
||||
const lockValue = LockManager.randomLock()
|
||||
const key = keys.blockingKey({ doc_id })
|
||||
const profile = new Profiler('tryLock', { doc_id, key, lockValue })
|
||||
return rclient.set(
|
||||
key,
|
||||
lockValue,
|
||||
'EX',
|
||||
this.LOCK_TTL,
|
||||
'NX',
|
||||
function (err, gotLock) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
if (gotLock === 'OK') {
|
||||
metrics.inc('doc-not-blocking')
|
||||
const timeTaken = profile.log('got lock').end()
|
||||
if (timeTaken > MAX_REDIS_REQUEST_LENGTH) {
|
||||
// took too long, so try to free the lock
|
||||
return LockManager.releaseLock(
|
||||
doc_id,
|
||||
lockValue,
|
||||
function (err, result) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
} // error freeing lock
|
||||
return callback(null, false)
|
||||
}
|
||||
) // tell caller they didn't get the lock
|
||||
} else {
|
||||
return callback(null, true, lockValue)
|
||||
}
|
||||
} else {
|
||||
metrics.inc('doc-blocking')
|
||||
profile.log('doc is locked').end()
|
||||
return callback(null, false)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
getLock(doc_id, callback) {
|
||||
let attempt
|
||||
if (callback == null) {
|
||||
callback = function (error, lockValue) {}
|
||||
}
|
||||
const startTime = Date.now()
|
||||
let testInterval = LockManager.LOCK_TEST_INTERVAL
|
||||
const profile = new Profiler('getLock', { doc_id })
|
||||
return (attempt = function () {
|
||||
if (Date.now() - startTime > LockManager.MAX_LOCK_WAIT_TIME) {
|
||||
const e = new Error('Timeout')
|
||||
e.doc_id = doc_id
|
||||
profile.log('timeout').end()
|
||||
return callback(e)
|
||||
}
|
||||
|
||||
return LockManager.tryLock(doc_id, function (error, gotLock, lockValue) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
profile.log('tryLock')
|
||||
if (gotLock) {
|
||||
profile.end()
|
||||
return callback(null, lockValue)
|
||||
} else {
|
||||
setTimeout(attempt, testInterval)
|
||||
// back off when the lock is taken to avoid overloading
|
||||
return (testInterval = Math.min(
|
||||
testInterval * 2,
|
||||
LockManager.MAX_TEST_INTERVAL
|
||||
))
|
||||
}
|
||||
})
|
||||
})()
|
||||
},
|
||||
|
||||
checkLock(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (err, isFree) {}
|
||||
}
|
||||
const key = keys.blockingKey({ doc_id })
|
||||
return rclient.exists(key, function (err, exists) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
exists = parseInt(exists)
|
||||
if (exists === 1) {
|
||||
metrics.inc('doc-blocking')
|
||||
return callback(null, false)
|
||||
} else {
|
||||
metrics.inc('doc-not-blocking')
|
||||
return callback(null, true)
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
releaseLock(doc_id, lockValue, callback) {
|
||||
const key = keys.blockingKey({ doc_id })
|
||||
const profile = new Profiler('releaseLock', { doc_id, key, lockValue })
|
||||
return rclient.eval(
|
||||
LockManager.unlockScript,
|
||||
1,
|
||||
key,
|
||||
lockValue,
|
||||
function (err, result) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
} else if (result != null && result !== 1) {
|
||||
// successful unlock should release exactly one key
|
||||
profile.log('unlockScript:expired-lock').end()
|
||||
logger.error(
|
||||
{ doc_id, key, lockValue, redis_err: err, redis_result: result },
|
||||
'unlocking error'
|
||||
)
|
||||
metrics.inc('unlock-error')
|
||||
return callback(new Error('tried to release timed out lock'))
|
||||
} else {
|
||||
profile.log('unlockScript:ok').end()
|
||||
return callback(null, result)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
}
|
67
services/document-updater/app/js/LoggerSerializers.js
Normal file
67
services/document-updater/app/js/LoggerSerializers.js
Normal file
|
@ -0,0 +1,67 @@
|
|||
/* eslint-disable
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const _ = require('lodash')
|
||||
|
||||
const showLength = function (thing) {
|
||||
if (thing != null ? thing.length : undefined) {
|
||||
return thing.length
|
||||
} else {
|
||||
return thing
|
||||
}
|
||||
}
|
||||
|
||||
const showUpdateLength = function (update) {
|
||||
if ((update != null ? update.op : undefined) instanceof Array) {
|
||||
const copy = _.cloneDeep(update)
|
||||
copy.op.forEach(function (element, index) {
|
||||
if (
|
||||
__guard__(element != null ? element.i : undefined, x => x.length) !=
|
||||
null
|
||||
) {
|
||||
copy.op[index].i = element.i.length
|
||||
}
|
||||
if (
|
||||
__guard__(element != null ? element.d : undefined, x1 => x1.length) !=
|
||||
null
|
||||
) {
|
||||
copy.op[index].d = element.d.length
|
||||
}
|
||||
if (
|
||||
__guard__(element != null ? element.c : undefined, x2 => x2.length) !=
|
||||
null
|
||||
) {
|
||||
return (copy.op[index].c = element.c.length)
|
||||
}
|
||||
})
|
||||
return copy
|
||||
} else {
|
||||
return update
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
// replace long values with their length
|
||||
lines: showLength,
|
||||
oldLines: showLength,
|
||||
newLines: showLength,
|
||||
docLines: showLength,
|
||||
newDocLines: showLength,
|
||||
ranges: showLength,
|
||||
update: showUpdateLength,
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
3
services/document-updater/app/js/Metrics.js
Normal file
3
services/document-updater/app/js/Metrics.js
Normal file
|
@ -0,0 +1,3 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
module.exports = require('@overleaf/metrics')
|
200
services/document-updater/app/js/PersistenceManager.js
Normal file
200
services/document-updater/app/js/PersistenceManager.js
Normal file
|
@ -0,0 +1,200 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unsafe-negation,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let PersistenceManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const Errors = require('./Errors')
|
||||
const Metrics = require('./Metrics')
|
||||
const logger = require('logger-sharelatex')
|
||||
const request = require('requestretry').defaults({
|
||||
maxAttempts: 2,
|
||||
retryDelay: 10,
|
||||
})
|
||||
|
||||
// We have to be quick with HTTP calls because we're holding a lock that
|
||||
// expires after 30 seconds. We can't let any errors in the rest of the stack
|
||||
// hold us up, and need to bail out quickly if there is a problem.
|
||||
const MAX_HTTP_REQUEST_LENGTH = 5000 // 5 seconds
|
||||
|
||||
const updateMetric = function (method, error, response) {
|
||||
// find the status, with special handling for connection timeouts
|
||||
// https://github.com/request/request#timeouts
|
||||
const status = (() => {
|
||||
if ((error != null ? error.connect : undefined) === true) {
|
||||
return `${error.code} (connect)`
|
||||
} else if (error != null) {
|
||||
return error.code
|
||||
} else if (response != null) {
|
||||
return response.statusCode
|
||||
}
|
||||
})()
|
||||
Metrics.inc(method, 1, { status })
|
||||
if ((error != null ? error.attempts : undefined) > 1) {
|
||||
Metrics.inc(`${method}-retries`, 1, { status: 'error' })
|
||||
}
|
||||
if ((response != null ? response.attempts : undefined) > 1) {
|
||||
return Metrics.inc(`${method}-retries`, 1, { status: 'success' })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = PersistenceManager = {
|
||||
getDoc(project_id, doc_id, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
projectHistoryType
|
||||
) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('persistenceManager.getDoc')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
const urlPath = `/project/${project_id}/doc/${doc_id}`
|
||||
return request(
|
||||
{
|
||||
url: `${Settings.apis.web.url}${urlPath}`,
|
||||
method: 'GET',
|
||||
headers: {
|
||||
accept: 'application/json',
|
||||
},
|
||||
auth: {
|
||||
user: Settings.apis.web.user,
|
||||
pass: Settings.apis.web.pass,
|
||||
sendImmediately: true,
|
||||
},
|
||||
jar: false,
|
||||
timeout: MAX_HTTP_REQUEST_LENGTH,
|
||||
},
|
||||
function (error, res, body) {
|
||||
updateMetric('getDoc', error, res)
|
||||
if (error != null) {
|
||||
logger.error(
|
||||
{ err: error, project_id, doc_id },
|
||||
'web API request failed'
|
||||
)
|
||||
return callback(new Error('error connecting to web API'))
|
||||
}
|
||||
if (res.statusCode >= 200 && res.statusCode < 300) {
|
||||
try {
|
||||
body = JSON.parse(body)
|
||||
} catch (e) {
|
||||
return callback(e)
|
||||
}
|
||||
if (body.lines == null) {
|
||||
return callback(new Error('web API response had no doc lines'))
|
||||
}
|
||||
if (body.version == null || !body.version instanceof Number) {
|
||||
return callback(
|
||||
new Error('web API response had no valid doc version')
|
||||
)
|
||||
}
|
||||
if (body.pathname == null) {
|
||||
return callback(
|
||||
new Error('web API response had no valid doc pathname')
|
||||
)
|
||||
}
|
||||
return callback(
|
||||
null,
|
||||
body.lines,
|
||||
body.version,
|
||||
body.ranges,
|
||||
body.pathname,
|
||||
body.projectHistoryId,
|
||||
body.projectHistoryType
|
||||
)
|
||||
} else if (res.statusCode === 404) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(`doc not not found: ${urlPath}`)
|
||||
)
|
||||
} else {
|
||||
return callback(
|
||||
new Error(`error accessing web API: ${urlPath} ${res.statusCode}`)
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
setDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy,
|
||||
_callback
|
||||
) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('persistenceManager.setDoc')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
return _callback(...Array.from(args || []))
|
||||
}
|
||||
|
||||
const urlPath = `/project/${project_id}/doc/${doc_id}`
|
||||
return request(
|
||||
{
|
||||
url: `${Settings.apis.web.url}${urlPath}`,
|
||||
method: 'POST',
|
||||
json: {
|
||||
lines,
|
||||
ranges,
|
||||
version,
|
||||
lastUpdatedBy,
|
||||
lastUpdatedAt,
|
||||
},
|
||||
auth: {
|
||||
user: Settings.apis.web.user,
|
||||
pass: Settings.apis.web.pass,
|
||||
sendImmediately: true,
|
||||
},
|
||||
jar: false,
|
||||
timeout: MAX_HTTP_REQUEST_LENGTH,
|
||||
},
|
||||
function (error, res, body) {
|
||||
updateMetric('setDoc', error, res)
|
||||
if (error != null) {
|
||||
logger.error(
|
||||
{ err: error, project_id, doc_id },
|
||||
'web API request failed'
|
||||
)
|
||||
return callback(new Error('error connecting to web API'))
|
||||
}
|
||||
if (res.statusCode >= 200 && res.statusCode < 300) {
|
||||
return callback(null)
|
||||
} else if (res.statusCode === 404) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(`doc not not found: ${urlPath}`)
|
||||
)
|
||||
} else {
|
||||
return callback(
|
||||
new Error(`error accessing web API: ${urlPath} ${res.statusCode}`)
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
}
|
62
services/document-updater/app/js/Profiler.js
Normal file
62
services/document-updater/app/js/Profiler.js
Normal file
|
@ -0,0 +1,62 @@
|
|||
/* eslint-disable
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS206: Consider reworking classes to avoid initClass
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let Profiler
|
||||
const Settings = require('@overleaf/settings')
|
||||
const logger = require('logger-sharelatex')
|
||||
|
||||
const deltaMs = function (ta, tb) {
|
||||
const nanoSeconds = (ta[0] - tb[0]) * 1e9 + (ta[1] - tb[1])
|
||||
const milliSeconds = Math.floor(nanoSeconds * 1e-6)
|
||||
return milliSeconds
|
||||
}
|
||||
|
||||
module.exports = Profiler = (function () {
|
||||
Profiler = class Profiler {
|
||||
static initClass() {
|
||||
this.prototype.LOG_CUTOFF_TIME = 1000
|
||||
}
|
||||
|
||||
constructor(name, args) {
|
||||
this.name = name
|
||||
this.args = args
|
||||
this.t0 = this.t = process.hrtime()
|
||||
this.start = new Date()
|
||||
this.updateTimes = []
|
||||
}
|
||||
|
||||
log(label) {
|
||||
const t1 = process.hrtime()
|
||||
const dtMilliSec = deltaMs(t1, this.t)
|
||||
this.t = t1
|
||||
this.updateTimes.push([label, dtMilliSec]) // timings in ms
|
||||
return this // make it chainable
|
||||
}
|
||||
|
||||
end(message) {
|
||||
const totalTime = deltaMs(this.t, this.t0)
|
||||
if (totalTime > this.LOG_CUTOFF_TIME) {
|
||||
// log anything greater than cutoff
|
||||
const args = {}
|
||||
for (const k in this.args) {
|
||||
const v = this.args[k]
|
||||
args[k] = v
|
||||
}
|
||||
args.updateTimes = this.updateTimes
|
||||
args.start = this.start
|
||||
args.end = new Date()
|
||||
logger.log(args, this.name)
|
||||
}
|
||||
return totalTime
|
||||
}
|
||||
}
|
||||
Profiler.initClass()
|
||||
return Profiler
|
||||
})()
|
135
services/document-updater/app/js/ProjectFlusher.js
Normal file
135
services/document-updater/app/js/ProjectFlusher.js
Normal file
|
@ -0,0 +1,135 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const request = require('request')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const { rclient } = RedisManager
|
||||
const docUpdaterKeys = Settings.redis.documentupdater.key_schema
|
||||
const async = require('async')
|
||||
const ProjectManager = require('./ProjectManager')
|
||||
const _ = require('lodash')
|
||||
const logger = require('logger-sharelatex')
|
||||
|
||||
var ProjectFlusher = {
|
||||
// iterate over keys asynchronously using redis scan (non-blocking)
|
||||
// handle all the cluster nodes or single redis server
|
||||
_getKeys(pattern, limit, callback) {
|
||||
const nodes = (typeof rclient.nodes === 'function'
|
||||
? rclient.nodes('master')
|
||||
: undefined) || [rclient]
|
||||
const doKeyLookupForNode = (node, cb) =>
|
||||
ProjectFlusher._getKeysFromNode(node, pattern, limit, cb)
|
||||
return async.concatSeries(nodes, doKeyLookupForNode, callback)
|
||||
},
|
||||
|
||||
_getKeysFromNode(node, pattern, limit, callback) {
|
||||
if (limit == null) {
|
||||
limit = 1000
|
||||
}
|
||||
let cursor = 0 // redis iterator
|
||||
const keySet = {} // use hash to avoid duplicate results
|
||||
const batchSize = limit != null ? Math.min(limit, 1000) : 1000
|
||||
// scan over all keys looking for pattern
|
||||
var doIteration = (
|
||||
cb // avoid hitting redis too hard
|
||||
) =>
|
||||
node.scan(
|
||||
cursor,
|
||||
'MATCH',
|
||||
pattern,
|
||||
'COUNT',
|
||||
batchSize,
|
||||
function (error, reply) {
|
||||
let keys
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
;[cursor, keys] = Array.from(reply)
|
||||
for (const key of Array.from(keys)) {
|
||||
keySet[key] = true
|
||||
}
|
||||
keys = Object.keys(keySet)
|
||||
const noResults = cursor === '0' // redis returns string results not numeric
|
||||
const limitReached = limit != null && keys.length >= limit
|
||||
if (noResults || limitReached) {
|
||||
return callback(null, keys)
|
||||
} else {
|
||||
return setTimeout(doIteration, 10)
|
||||
}
|
||||
}
|
||||
)
|
||||
return doIteration()
|
||||
},
|
||||
|
||||
// extract ids from keys like DocsWithHistoryOps:57fd0b1f53a8396d22b2c24b
|
||||
// or docsInProject:{57fd0b1f53a8396d22b2c24b} (for redis cluster)
|
||||
_extractIds(keyList) {
|
||||
const ids = (() => {
|
||||
const result = []
|
||||
for (const key of Array.from(keyList)) {
|
||||
const m = key.match(/:\{?([0-9a-f]{24})\}?/) // extract object id
|
||||
result.push(m[1])
|
||||
}
|
||||
return result
|
||||
})()
|
||||
return ids
|
||||
},
|
||||
|
||||
flushAllProjects(options, callback) {
|
||||
logger.log({ options }, 'flushing all projects')
|
||||
return ProjectFlusher._getKeys(
|
||||
docUpdaterKeys.docsInProject({ project_id: '*' }),
|
||||
options.limit,
|
||||
function (error, project_keys) {
|
||||
if (error != null) {
|
||||
logger.err({ err: error }, 'error getting keys for flushing')
|
||||
return callback(error)
|
||||
}
|
||||
const project_ids = ProjectFlusher._extractIds(project_keys)
|
||||
if (options.dryRun) {
|
||||
return callback(null, project_ids)
|
||||
}
|
||||
const jobs = _.map(
|
||||
project_ids,
|
||||
project_id => cb =>
|
||||
ProjectManager.flushAndDeleteProjectWithLocks(
|
||||
project_id,
|
||||
{ background: true },
|
||||
cb
|
||||
)
|
||||
)
|
||||
return async.parallelLimit(
|
||||
async.reflectAll(jobs),
|
||||
options.concurrency,
|
||||
function (error, results) {
|
||||
const success = []
|
||||
const failure = []
|
||||
_.each(results, function (result, i) {
|
||||
if (result.error != null) {
|
||||
return failure.push(project_ids[i])
|
||||
} else {
|
||||
return success.push(project_ids[i])
|
||||
}
|
||||
})
|
||||
logger.log({ success, failure }, 'finished flushing all projects')
|
||||
return callback(error, { success, failure })
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
module.exports = ProjectFlusher
|
176
services/document-updater/app/js/ProjectHistoryRedisManager.js
Normal file
176
services/document-updater/app/js/ProjectHistoryRedisManager.js
Normal file
|
@ -0,0 +1,176 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS201: Simplify complex destructure assignments
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let ProjectHistoryRedisManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const projectHistoryKeys = __guard__(
|
||||
Settings.redis != null ? Settings.redis.project_history : undefined,
|
||||
x => x.key_schema
|
||||
)
|
||||
const rclient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.project_history
|
||||
)
|
||||
const logger = require('logger-sharelatex')
|
||||
const metrics = require('./Metrics')
|
||||
|
||||
module.exports = ProjectHistoryRedisManager = {
|
||||
queueOps(project_id, ...rest) {
|
||||
// Record metric for ops pushed onto queue
|
||||
const adjustedLength = Math.max(rest.length, 1)
|
||||
const ops = rest.slice(0, adjustedLength - 1)
|
||||
const val = rest[adjustedLength - 1]
|
||||
const callback = val != null ? val : function (error, projectUpdateCount) {}
|
||||
for (const op of Array.from(ops)) {
|
||||
metrics.summary('redis.projectHistoryOps', op.length, { status: 'push' })
|
||||
}
|
||||
const multi = rclient.multi()
|
||||
// Push the ops onto the project history queue
|
||||
multi.rpush(
|
||||
projectHistoryKeys.projectHistoryOps({ project_id }),
|
||||
...Array.from(ops)
|
||||
)
|
||||
// To record the age of the oldest op on the queue set a timestamp if not
|
||||
// already present (SETNX).
|
||||
multi.setnx(
|
||||
projectHistoryKeys.projectHistoryFirstOpTimestamp({ project_id }),
|
||||
Date.now()
|
||||
)
|
||||
return multi.exec(function (error, result) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
// return the number of entries pushed onto the project history queue
|
||||
return callback(null, result[0])
|
||||
})
|
||||
},
|
||||
|
||||
queueRenameEntity(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
entity_type,
|
||||
entity_id,
|
||||
user_id,
|
||||
projectUpdate,
|
||||
callback
|
||||
) {
|
||||
projectUpdate = {
|
||||
pathname: projectUpdate.pathname,
|
||||
new_pathname: projectUpdate.newPathname,
|
||||
meta: {
|
||||
user_id,
|
||||
ts: new Date(),
|
||||
},
|
||||
version: projectUpdate.version,
|
||||
projectHistoryId,
|
||||
}
|
||||
projectUpdate[entity_type] = entity_id
|
||||
|
||||
logger.log(
|
||||
{ project_id, projectUpdate },
|
||||
'queue rename operation to project-history'
|
||||
)
|
||||
const jsonUpdate = JSON.stringify(projectUpdate)
|
||||
|
||||
return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback)
|
||||
},
|
||||
|
||||
queueAddEntity(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
entity_type,
|
||||
entitiy_id,
|
||||
user_id,
|
||||
projectUpdate,
|
||||
callback
|
||||
) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
projectUpdate = {
|
||||
pathname: projectUpdate.pathname,
|
||||
docLines: projectUpdate.docLines,
|
||||
url: projectUpdate.url,
|
||||
meta: {
|
||||
user_id,
|
||||
ts: new Date(),
|
||||
},
|
||||
version: projectUpdate.version,
|
||||
projectHistoryId,
|
||||
}
|
||||
projectUpdate[entity_type] = entitiy_id
|
||||
|
||||
logger.log(
|
||||
{ project_id, projectUpdate },
|
||||
'queue add operation to project-history'
|
||||
)
|
||||
const jsonUpdate = JSON.stringify(projectUpdate)
|
||||
|
||||
return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback)
|
||||
},
|
||||
|
||||
queueResyncProjectStructure(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
docs,
|
||||
files,
|
||||
callback
|
||||
) {
|
||||
logger.log({ project_id, docs, files }, 'queue project structure resync')
|
||||
const projectUpdate = {
|
||||
resyncProjectStructure: { docs, files },
|
||||
projectHistoryId,
|
||||
meta: {
|
||||
ts: new Date(),
|
||||
},
|
||||
}
|
||||
const jsonUpdate = JSON.stringify(projectUpdate)
|
||||
return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback)
|
||||
},
|
||||
|
||||
queueResyncDocContent(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
pathname,
|
||||
callback
|
||||
) {
|
||||
logger.log(
|
||||
{ project_id, doc_id, lines, version, pathname },
|
||||
'queue doc content resync'
|
||||
)
|
||||
const projectUpdate = {
|
||||
resyncDocContent: {
|
||||
content: lines.join('\n'),
|
||||
version,
|
||||
},
|
||||
projectHistoryId,
|
||||
path: pathname,
|
||||
doc: doc_id,
|
||||
meta: {
|
||||
ts: new Date(),
|
||||
},
|
||||
}
|
||||
const jsonUpdate = JSON.stringify(projectUpdate)
|
||||
return ProjectHistoryRedisManager.queueOps(project_id, jsonUpdate, callback)
|
||||
},
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
301
services/document-updater/app/js/ProjectManager.js
Normal file
301
services/document-updater/app/js/ProjectManager.js
Normal file
|
@ -0,0 +1,301 @@
|
|||
const RedisManager = require('./RedisManager')
|
||||
const ProjectHistoryRedisManager = require('./ProjectHistoryRedisManager')
|
||||
const DocumentManager = require('./DocumentManager')
|
||||
const HistoryManager = require('./HistoryManager')
|
||||
const async = require('async')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Metrics = require('./Metrics')
|
||||
const Errors = require('./Errors')
|
||||
|
||||
module.exports = {
|
||||
flushProjectWithLocks,
|
||||
flushAndDeleteProjectWithLocks,
|
||||
queueFlushAndDeleteProject,
|
||||
getProjectDocsTimestamps,
|
||||
getProjectDocsAndFlushIfOld,
|
||||
clearProjectState,
|
||||
updateProjectWithLocks,
|
||||
}
|
||||
|
||||
function flushProjectWithLocks(projectId, _callback) {
|
||||
const timer = new Metrics.Timer('projectManager.flushProjectWithLocks')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
_callback(...args)
|
||||
}
|
||||
|
||||
RedisManager.getDocIdsInProject(projectId, (error, docIds) => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
const errors = []
|
||||
const jobs = docIds.map(docId => callback => {
|
||||
DocumentManager.flushDocIfLoadedWithLock(projectId, docId, error => {
|
||||
if (error instanceof Errors.NotFoundError) {
|
||||
logger.warn(
|
||||
{ err: error, projectId, docId },
|
||||
'found deleted doc when flushing'
|
||||
)
|
||||
callback()
|
||||
} else if (error) {
|
||||
logger.error({ err: error, projectId, docId }, 'error flushing doc')
|
||||
errors.push(error)
|
||||
callback()
|
||||
} else {
|
||||
callback()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
logger.log({ projectId, docIds }, 'flushing docs')
|
||||
async.series(jobs, () => {
|
||||
if (errors.length > 0) {
|
||||
callback(new Error('Errors flushing docs. See log for details'))
|
||||
} else {
|
||||
callback(null)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function flushAndDeleteProjectWithLocks(projectId, options, _callback) {
|
||||
const timer = new Metrics.Timer(
|
||||
'projectManager.flushAndDeleteProjectWithLocks'
|
||||
)
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
_callback(...args)
|
||||
}
|
||||
|
||||
RedisManager.getDocIdsInProject(projectId, (error, docIds) => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
const errors = []
|
||||
const jobs = docIds.map(docId => callback => {
|
||||
DocumentManager.flushAndDeleteDocWithLock(projectId, docId, {}, error => {
|
||||
if (error) {
|
||||
logger.error({ err: error, projectId, docId }, 'error deleting doc')
|
||||
errors.push(error)
|
||||
}
|
||||
callback()
|
||||
})
|
||||
})
|
||||
|
||||
logger.log({ projectId, docIds }, 'deleting docs')
|
||||
async.series(jobs, () =>
|
||||
// When deleting the project here we want to ensure that project
|
||||
// history is completely flushed because the project may be
|
||||
// deleted in web after this call completes, and so further
|
||||
// attempts to flush would fail after that.
|
||||
HistoryManager.flushProjectChanges(projectId, options, error => {
|
||||
if (errors.length > 0) {
|
||||
callback(new Error('Errors deleting docs. See log for details'))
|
||||
} else if (error) {
|
||||
callback(error)
|
||||
} else {
|
||||
callback(null)
|
||||
}
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
function queueFlushAndDeleteProject(projectId, callback) {
|
||||
RedisManager.queueFlushAndDeleteProject(projectId, error => {
|
||||
if (error) {
|
||||
logger.error(
|
||||
{ projectId, error },
|
||||
'error adding project to flush and delete queue'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
Metrics.inc('queued-delete')
|
||||
callback()
|
||||
})
|
||||
}
|
||||
|
||||
function getProjectDocsTimestamps(projectId, callback) {
|
||||
RedisManager.getDocIdsInProject(projectId, (error, docIds) => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
if (docIds.length === 0) {
|
||||
return callback(null, [])
|
||||
}
|
||||
RedisManager.getDocTimestamps(docIds, (error, timestamps) => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
callback(null, timestamps)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function getProjectDocsAndFlushIfOld(
|
||||
projectId,
|
||||
projectStateHash,
|
||||
excludeVersions,
|
||||
_callback
|
||||
) {
|
||||
const timer = new Metrics.Timer('projectManager.getProjectDocsAndFlushIfOld')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
_callback(...args)
|
||||
}
|
||||
|
||||
RedisManager.checkOrSetProjectState(
|
||||
projectId,
|
||||
projectStateHash,
|
||||
(error, projectStateChanged) => {
|
||||
if (error) {
|
||||
logger.error(
|
||||
{ err: error, projectId },
|
||||
'error getting/setting project state in getProjectDocsAndFlushIfOld'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
// we can't return docs if project structure has changed
|
||||
if (projectStateChanged) {
|
||||
return callback(
|
||||
Errors.ProjectStateChangedError('project state changed')
|
||||
)
|
||||
}
|
||||
// project structure hasn't changed, return doc content from redis
|
||||
RedisManager.getDocIdsInProject(projectId, (error, docIds) => {
|
||||
if (error) {
|
||||
logger.error(
|
||||
{ err: error, projectId },
|
||||
'error getting doc ids in getProjectDocs'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
// get the doc lines from redis
|
||||
const jobs = docIds.map(docId => cb => {
|
||||
DocumentManager.getDocAndFlushIfOldWithLock(
|
||||
projectId,
|
||||
docId,
|
||||
(err, lines, version) => {
|
||||
if (err) {
|
||||
logger.error(
|
||||
{ err, projectId, docId },
|
||||
'error getting project doc lines in getProjectDocsAndFlushIfOld'
|
||||
)
|
||||
return cb(err)
|
||||
}
|
||||
const doc = { _id: docId, lines, v: version } // create a doc object to return
|
||||
cb(null, doc)
|
||||
}
|
||||
)
|
||||
})
|
||||
async.series(jobs, (error, docs) => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
callback(null, docs)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function clearProjectState(projectId, callback) {
|
||||
RedisManager.clearProjectState(projectId, callback)
|
||||
}
|
||||
|
||||
function updateProjectWithLocks(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
userId,
|
||||
updates,
|
||||
projectVersion,
|
||||
_callback
|
||||
) {
|
||||
const timer = new Metrics.Timer('projectManager.updateProject')
|
||||
const callback = function (...args) {
|
||||
timer.done()
|
||||
_callback(...args)
|
||||
}
|
||||
|
||||
let projectSubversion = 0 // project versions can have multiple operations
|
||||
let projectOpsLength = 0
|
||||
|
||||
function handleUpdate(update, cb) {
|
||||
update.version = `${projectVersion}.${projectSubversion++}`
|
||||
switch (update.type) {
|
||||
case 'add-doc':
|
||||
ProjectHistoryRedisManager.queueAddEntity(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
'doc',
|
||||
update.id,
|
||||
userId,
|
||||
update,
|
||||
(error, count) => {
|
||||
projectOpsLength = count
|
||||
cb(error)
|
||||
}
|
||||
)
|
||||
break
|
||||
case 'rename-doc':
|
||||
DocumentManager.renameDocWithLock(
|
||||
projectId,
|
||||
update.id,
|
||||
userId,
|
||||
update,
|
||||
projectHistoryId,
|
||||
(error, count) => {
|
||||
projectOpsLength = count
|
||||
cb(error)
|
||||
}
|
||||
)
|
||||
break
|
||||
case 'add-file':
|
||||
ProjectHistoryRedisManager.queueAddEntity(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
'file',
|
||||
update.id,
|
||||
userId,
|
||||
update,
|
||||
(error, count) => {
|
||||
projectOpsLength = count
|
||||
cb(error)
|
||||
}
|
||||
)
|
||||
break
|
||||
case 'rename-file':
|
||||
ProjectHistoryRedisManager.queueRenameEntity(
|
||||
projectId,
|
||||
projectHistoryId,
|
||||
'file',
|
||||
update.id,
|
||||
userId,
|
||||
update,
|
||||
(error, count) => {
|
||||
projectOpsLength = count
|
||||
cb(error)
|
||||
}
|
||||
)
|
||||
break
|
||||
default:
|
||||
cb(new Error(`Unknown update type: ${update.type}`))
|
||||
}
|
||||
}
|
||||
|
||||
async.eachSeries(updates, handleUpdate, error => {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
if (
|
||||
HistoryManager.shouldFlushHistoryOps(
|
||||
projectOpsLength,
|
||||
updates.length,
|
||||
HistoryManager.FLUSH_PROJECT_EVERY_N_OPS
|
||||
)
|
||||
) {
|
||||
HistoryManager.flushProjectChangesAsync(projectId)
|
||||
}
|
||||
callback()
|
||||
})
|
||||
}
|
163
services/document-updater/app/js/RangesManager.js
Normal file
163
services/document-updater/app/js/RangesManager.js
Normal file
|
@ -0,0 +1,163 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let RangesManager
|
||||
const RangesTracker = require('./RangesTracker')
|
||||
const logger = require('logger-sharelatex')
|
||||
const _ = require('lodash')
|
||||
|
||||
module.exports = RangesManager = {
|
||||
MAX_COMMENTS: 500,
|
||||
MAX_CHANGES: 2000,
|
||||
|
||||
applyUpdate(project_id, doc_id, entries, updates, newDocLines, callback) {
|
||||
let error
|
||||
if (entries == null) {
|
||||
entries = {}
|
||||
}
|
||||
if (updates == null) {
|
||||
updates = []
|
||||
}
|
||||
if (callback == null) {
|
||||
callback = function (error, new_entries, ranges_were_collapsed) {}
|
||||
}
|
||||
const { changes, comments } = _.cloneDeep(entries)
|
||||
const rangesTracker = new RangesTracker(changes, comments)
|
||||
const emptyRangeCountBefore = RangesManager._emptyRangesCount(rangesTracker)
|
||||
for (const update of Array.from(updates)) {
|
||||
rangesTracker.track_changes = !!update.meta.tc
|
||||
if (update.meta.tc) {
|
||||
rangesTracker.setIdSeed(update.meta.tc)
|
||||
}
|
||||
for (const op of Array.from(update.op)) {
|
||||
try {
|
||||
rangesTracker.applyOp(op, {
|
||||
user_id: update.meta != null ? update.meta.user_id : undefined,
|
||||
})
|
||||
} catch (error1) {
|
||||
error = error1
|
||||
return callback(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
(rangesTracker.changes != null
|
||||
? rangesTracker.changes.length
|
||||
: undefined) > RangesManager.MAX_CHANGES ||
|
||||
(rangesTracker.comments != null
|
||||
? rangesTracker.comments.length
|
||||
: undefined) > RangesManager.MAX_COMMENTS
|
||||
) {
|
||||
return callback(new Error('too many comments or tracked changes'))
|
||||
}
|
||||
|
||||
try {
|
||||
// This is a consistency check that all of our ranges and
|
||||
// comments still match the corresponding text
|
||||
rangesTracker.validate(newDocLines.join('\n'))
|
||||
} catch (error2) {
|
||||
error = error2
|
||||
logger.error(
|
||||
{ err: error, project_id, doc_id, newDocLines, updates },
|
||||
'error validating ranges'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
const emptyRangeCountAfter = RangesManager._emptyRangesCount(rangesTracker)
|
||||
const rangesWereCollapsed = emptyRangeCountAfter > emptyRangeCountBefore
|
||||
const response = RangesManager._getRanges(rangesTracker)
|
||||
logger.log(
|
||||
{
|
||||
project_id,
|
||||
doc_id,
|
||||
changesCount:
|
||||
response.changes != null ? response.changes.length : undefined,
|
||||
commentsCount:
|
||||
response.comments != null ? response.comments.length : undefined,
|
||||
rangesWereCollapsed,
|
||||
},
|
||||
'applied updates to ranges'
|
||||
)
|
||||
return callback(null, response, rangesWereCollapsed)
|
||||
},
|
||||
|
||||
acceptChanges(change_ids, ranges, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, ranges) {}
|
||||
}
|
||||
const { changes, comments } = ranges
|
||||
logger.log(`accepting ${change_ids.length} changes in ranges`)
|
||||
const rangesTracker = new RangesTracker(changes, comments)
|
||||
rangesTracker.removeChangeIds(change_ids)
|
||||
const response = RangesManager._getRanges(rangesTracker)
|
||||
return callback(null, response)
|
||||
},
|
||||
|
||||
deleteComment(comment_id, ranges, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, ranges) {}
|
||||
}
|
||||
const { changes, comments } = ranges
|
||||
logger.log({ comment_id }, 'deleting comment in ranges')
|
||||
const rangesTracker = new RangesTracker(changes, comments)
|
||||
rangesTracker.removeCommentId(comment_id)
|
||||
const response = RangesManager._getRanges(rangesTracker)
|
||||
return callback(null, response)
|
||||
},
|
||||
|
||||
_getRanges(rangesTracker) {
|
||||
// Return the minimal data structure needed, since most documents won't have any
|
||||
// changes or comments
|
||||
let response = {}
|
||||
if (
|
||||
(rangesTracker.changes != null
|
||||
? rangesTracker.changes.length
|
||||
: undefined) > 0
|
||||
) {
|
||||
if (response == null) {
|
||||
response = {}
|
||||
}
|
||||
response.changes = rangesTracker.changes
|
||||
}
|
||||
if (
|
||||
(rangesTracker.comments != null
|
||||
? rangesTracker.comments.length
|
||||
: undefined) > 0
|
||||
) {
|
||||
if (response == null) {
|
||||
response = {}
|
||||
}
|
||||
response.comments = rangesTracker.comments
|
||||
}
|
||||
return response
|
||||
},
|
||||
|
||||
_emptyRangesCount(ranges) {
|
||||
let count = 0
|
||||
for (const comment of Array.from(ranges.comments || [])) {
|
||||
if (comment.op.c === '') {
|
||||
count++
|
||||
}
|
||||
}
|
||||
for (const change of Array.from(ranges.changes || [])) {
|
||||
if (change.op.i != null) {
|
||||
if (change.op.i === '') {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
return count
|
||||
},
|
||||
}
|
849
services/document-updater/app/js/RangesTracker.js
Normal file
849
services/document-updater/app/js/RangesTracker.js
Normal file
|
@ -0,0 +1,849 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This file is shared between document-updater and web, so that the server and client share
|
||||
// an identical track changes implementation. Do not edit it directly in web or document-updater,
|
||||
// instead edit it at https://github.com/sharelatex/ranges-tracker, where it has a suite of tests
|
||||
const load = function () {
|
||||
let RangesTracker
|
||||
return (RangesTracker = class RangesTracker {
|
||||
// The purpose of this class is to track a set of inserts and deletes to a document, like
|
||||
// track changes in Word. We store these as a set of ShareJs style ranges:
|
||||
// {i: "foo", p: 42} # Insert 'foo' at offset 42
|
||||
// {d: "bar", p: 37} # Delete 'bar' at offset 37
|
||||
// We only track the inserts and deletes, not the whole document, but by being given all
|
||||
// updates that are applied to a document, we can update these appropriately.
|
||||
//
|
||||
// Note that the set of inserts and deletes we store applies to the document as-is at the moment.
|
||||
// So inserts correspond to text which is in the document, while deletes correspond to text which
|
||||
// is no longer there, so their lengths do not affect the position of later offsets.
|
||||
// E.g.
|
||||
// this is the current text of the document
|
||||
// |-----| |
|
||||
// {i: "current ", p:12} -^ ^- {d: "old ", p: 31}
|
||||
//
|
||||
// Track changes rules (should be consistent with Word):
|
||||
// * When text is inserted at a delete, the text goes to the left of the delete
|
||||
// I.e. "foo|bar" -> "foobaz|bar", where | is the delete, and 'baz' is inserted
|
||||
// * Deleting content flagged as 'inserted' does not create a new delete marker, it only
|
||||
// removes the insert marker. E.g.
|
||||
// * "abdefghijkl" -> "abfghijkl" when 'de' is deleted. No delete marker added
|
||||
// |---| <- inserted |-| <- inserted
|
||||
// * Deletes overlapping regular text and inserted text will insert a delete marker for the
|
||||
// regular text:
|
||||
// "abcdefghijkl" -> "abcdejkl" when 'fghi' is deleted
|
||||
// |----| |--||
|
||||
// ^- inserted 'bcdefg' \ ^- deleted 'hi'
|
||||
// \--inserted 'bcde'
|
||||
// * Deletes overlapping other deletes are merged. E.g.
|
||||
// "abcghijkl" -> "ahijkl" when 'bcg is deleted'
|
||||
// | <- delete 'def' | <- delete 'bcdefg'
|
||||
// * Deletes by another user will consume deletes by the first user
|
||||
// * Inserts by another user will not combine with inserts by the first user. If they are in the
|
||||
// middle of a previous insert by the first user, the original insert will be split into two.
|
||||
constructor(changes, comments) {
|
||||
if (changes == null) {
|
||||
changes = []
|
||||
}
|
||||
this.changes = changes
|
||||
if (comments == null) {
|
||||
comments = []
|
||||
}
|
||||
this.comments = comments
|
||||
this.setIdSeed(RangesTracker.generateIdSeed())
|
||||
this.resetDirtyState()
|
||||
}
|
||||
|
||||
getIdSeed() {
|
||||
return this.id_seed
|
||||
}
|
||||
|
||||
setIdSeed(seed) {
|
||||
this.id_seed = seed
|
||||
return (this.id_increment = 0)
|
||||
}
|
||||
|
||||
static generateIdSeed() {
|
||||
// Generate a the first 18 characters of Mongo ObjectId, leaving 6 for the increment part
|
||||
// Reference: https://github.com/dreampulse/ObjectId.js/blob/master/src/main/javascript/Objectid.js
|
||||
const pid = Math.floor(Math.random() * 32767).toString(16)
|
||||
const machine = Math.floor(Math.random() * 16777216).toString(16)
|
||||
const timestamp = Math.floor(new Date().valueOf() / 1000).toString(16)
|
||||
return (
|
||||
'00000000'.substr(0, 8 - timestamp.length) +
|
||||
timestamp +
|
||||
'000000'.substr(0, 6 - machine.length) +
|
||||
machine +
|
||||
'0000'.substr(0, 4 - pid.length) +
|
||||
pid
|
||||
)
|
||||
}
|
||||
|
||||
static generateId() {
|
||||
return this.generateIdSeed() + '000001'
|
||||
}
|
||||
|
||||
newId() {
|
||||
this.id_increment++
|
||||
const increment = this.id_increment.toString(16)
|
||||
const id =
|
||||
this.id_seed + '000000'.substr(0, 6 - increment.length) + increment
|
||||
return id
|
||||
}
|
||||
|
||||
getComment(comment_id) {
|
||||
let comment = null
|
||||
for (const c of Array.from(this.comments)) {
|
||||
if (c.id === comment_id) {
|
||||
comment = c
|
||||
break
|
||||
}
|
||||
}
|
||||
return comment
|
||||
}
|
||||
|
||||
removeCommentId(comment_id) {
|
||||
const comment = this.getComment(comment_id)
|
||||
if (comment == null) {
|
||||
return
|
||||
}
|
||||
this.comments = this.comments.filter(c => c.id !== comment_id)
|
||||
return this._markAsDirty(comment, 'comment', 'removed')
|
||||
}
|
||||
|
||||
moveCommentId(comment_id, position, text) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const comment of Array.from(this.comments)) {
|
||||
if (comment.id === comment_id) {
|
||||
comment.op.p = position
|
||||
comment.op.c = text
|
||||
result.push(this._markAsDirty(comment, 'comment', 'moved'))
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
getChange(change_id) {
|
||||
let change = null
|
||||
for (const c of Array.from(this.changes)) {
|
||||
if (c.id === change_id) {
|
||||
change = c
|
||||
break
|
||||
}
|
||||
}
|
||||
return change
|
||||
}
|
||||
|
||||
getChanges(change_ids) {
|
||||
const changes_response = []
|
||||
const ids_map = {}
|
||||
|
||||
for (const change_id of Array.from(change_ids)) {
|
||||
ids_map[change_id] = true
|
||||
}
|
||||
|
||||
for (const change of Array.from(this.changes)) {
|
||||
if (ids_map[change.id]) {
|
||||
delete ids_map[change.id]
|
||||
changes_response.push(change)
|
||||
}
|
||||
}
|
||||
|
||||
return changes_response
|
||||
}
|
||||
|
||||
removeChangeId(change_id) {
|
||||
const change = this.getChange(change_id)
|
||||
if (change == null) {
|
||||
return
|
||||
}
|
||||
return this._removeChange(change)
|
||||
}
|
||||
|
||||
removeChangeIds(change_to_remove_ids) {
|
||||
if (
|
||||
!(change_to_remove_ids != null
|
||||
? change_to_remove_ids.length
|
||||
: undefined) > 0
|
||||
) {
|
||||
return
|
||||
}
|
||||
const i = this.changes.length
|
||||
const remove_change_id = {}
|
||||
for (const change_id of Array.from(change_to_remove_ids)) {
|
||||
remove_change_id[change_id] = true
|
||||
}
|
||||
|
||||
const remaining_changes = []
|
||||
|
||||
for (const change of Array.from(this.changes)) {
|
||||
if (remove_change_id[change.id]) {
|
||||
delete remove_change_id[change.id]
|
||||
this._markAsDirty(change, 'change', 'removed')
|
||||
} else {
|
||||
remaining_changes.push(change)
|
||||
}
|
||||
}
|
||||
|
||||
return (this.changes = remaining_changes)
|
||||
}
|
||||
|
||||
validate(text) {
|
||||
let content
|
||||
for (const change of Array.from(this.changes)) {
|
||||
if (change.op.i != null) {
|
||||
content = text.slice(change.op.p, change.op.p + change.op.i.length)
|
||||
if (content !== change.op.i) {
|
||||
throw new Error(
|
||||
`Change (${JSON.stringify(
|
||||
change
|
||||
)}) doesn't match text (${JSON.stringify(content)})`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const comment of Array.from(this.comments)) {
|
||||
content = text.slice(comment.op.p, comment.op.p + comment.op.c.length)
|
||||
if (content !== comment.op.c) {
|
||||
throw new Error(
|
||||
`Comment (${JSON.stringify(
|
||||
comment
|
||||
)}) doesn't match text (${JSON.stringify(content)})`
|
||||
)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
applyOp(op, metadata) {
|
||||
if (metadata == null) {
|
||||
metadata = {}
|
||||
}
|
||||
if (metadata.ts == null) {
|
||||
metadata.ts = new Date()
|
||||
}
|
||||
// Apply an op that has been applied to the document to our changes to keep them up to date
|
||||
if (op.i != null) {
|
||||
this.applyInsertToChanges(op, metadata)
|
||||
return this.applyInsertToComments(op)
|
||||
} else if (op.d != null) {
|
||||
this.applyDeleteToChanges(op, metadata)
|
||||
return this.applyDeleteToComments(op)
|
||||
} else if (op.c != null) {
|
||||
return this.addComment(op, metadata)
|
||||
} else {
|
||||
throw new Error('unknown op type')
|
||||
}
|
||||
}
|
||||
|
||||
applyOps(ops, metadata) {
|
||||
if (metadata == null) {
|
||||
metadata = {}
|
||||
}
|
||||
return Array.from(ops).map(op => this.applyOp(op, metadata))
|
||||
}
|
||||
|
||||
addComment(op, metadata) {
|
||||
const existing = this.getComment(op.t)
|
||||
if (existing != null) {
|
||||
this.moveCommentId(op.t, op.p, op.c)
|
||||
return existing
|
||||
} else {
|
||||
let comment
|
||||
this.comments.push(
|
||||
(comment = {
|
||||
id: op.t || this.newId(),
|
||||
op: {
|
||||
// Copy because we'll modify in place
|
||||
c: op.c,
|
||||
p: op.p,
|
||||
t: op.t,
|
||||
},
|
||||
metadata,
|
||||
})
|
||||
)
|
||||
this._markAsDirty(comment, 'comment', 'added')
|
||||
return comment
|
||||
}
|
||||
}
|
||||
|
||||
applyInsertToComments(op) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const comment of Array.from(this.comments)) {
|
||||
if (op.p <= comment.op.p) {
|
||||
comment.op.p += op.i.length
|
||||
result.push(this._markAsDirty(comment, 'comment', 'moved'))
|
||||
} else if (op.p < comment.op.p + comment.op.c.length) {
|
||||
const offset = op.p - comment.op.p
|
||||
comment.op.c =
|
||||
comment.op.c.slice(0, +(offset - 1) + 1 || undefined) +
|
||||
op.i +
|
||||
comment.op.c.slice(offset)
|
||||
result.push(this._markAsDirty(comment, 'comment', 'moved'))
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
applyDeleteToComments(op) {
|
||||
const op_start = op.p
|
||||
const op_length = op.d.length
|
||||
const op_end = op.p + op_length
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const comment of Array.from(this.comments)) {
|
||||
const comment_start = comment.op.p
|
||||
const comment_end = comment.op.p + comment.op.c.length
|
||||
const comment_length = comment_end - comment_start
|
||||
if (op_end <= comment_start) {
|
||||
// delete is fully before comment
|
||||
comment.op.p -= op_length
|
||||
result.push(this._markAsDirty(comment, 'comment', 'moved'))
|
||||
} else if (op_start >= comment_end) {
|
||||
// delete is fully after comment, nothing to do
|
||||
} else {
|
||||
// delete and comment overlap
|
||||
var remaining_after, remaining_before
|
||||
if (op_start <= comment_start) {
|
||||
remaining_before = ''
|
||||
} else {
|
||||
remaining_before = comment.op.c.slice(0, op_start - comment_start)
|
||||
}
|
||||
if (op_end >= comment_end) {
|
||||
remaining_after = ''
|
||||
} else {
|
||||
remaining_after = comment.op.c.slice(op_end - comment_start)
|
||||
}
|
||||
|
||||
// Check deleted content matches delete op
|
||||
const deleted_comment = comment.op.c.slice(
|
||||
remaining_before.length,
|
||||
comment_length - remaining_after.length
|
||||
)
|
||||
const offset = Math.max(0, comment_start - op_start)
|
||||
const deleted_op_content = op.d
|
||||
.slice(offset)
|
||||
.slice(0, deleted_comment.length)
|
||||
if (deleted_comment !== deleted_op_content) {
|
||||
throw new Error('deleted content does not match comment content')
|
||||
}
|
||||
|
||||
comment.op.p = Math.min(comment_start, op_start)
|
||||
comment.op.c = remaining_before + remaining_after
|
||||
result.push(this._markAsDirty(comment, 'comment', 'moved'))
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
applyInsertToChanges(op, metadata) {
|
||||
let change
|
||||
const op_start = op.p
|
||||
const op_length = op.i.length
|
||||
const op_end = op.p + op_length
|
||||
const undoing = !!op.u
|
||||
|
||||
let already_merged = false
|
||||
let previous_change = null
|
||||
const moved_changes = []
|
||||
const remove_changes = []
|
||||
const new_changes = []
|
||||
|
||||
for (let i = 0; i < this.changes.length; i++) {
|
||||
change = this.changes[i]
|
||||
const change_start = change.op.p
|
||||
|
||||
if (change.op.d != null) {
|
||||
// Shift any deletes after this along by the length of this insert
|
||||
if (op_start < change_start) {
|
||||
change.op.p += op_length
|
||||
moved_changes.push(change)
|
||||
} else if (op_start === change_start) {
|
||||
// If we are undoing, then we want to cancel any existing delete ranges if we can.
|
||||
// Check if the insert matches the start of the delete, and just remove it from the delete instead if so.
|
||||
if (
|
||||
undoing &&
|
||||
change.op.d.length >= op.i.length &&
|
||||
change.op.d.slice(0, op.i.length) === op.i
|
||||
) {
|
||||
change.op.d = change.op.d.slice(op.i.length)
|
||||
change.op.p += op.i.length
|
||||
if (change.op.d === '') {
|
||||
remove_changes.push(change)
|
||||
} else {
|
||||
moved_changes.push(change)
|
||||
}
|
||||
already_merged = true
|
||||
} else {
|
||||
change.op.p += op_length
|
||||
moved_changes.push(change)
|
||||
}
|
||||
}
|
||||
} else if (change.op.i != null) {
|
||||
var offset
|
||||
const change_end = change_start + change.op.i.length
|
||||
const is_change_overlapping =
|
||||
op_start >= change_start && op_start <= change_end
|
||||
|
||||
// Only merge inserts if they are from the same user
|
||||
const is_same_user = metadata.user_id === change.metadata.user_id
|
||||
|
||||
// If we are undoing, then our changes will be removed from any delete ops just after. In that case, if there is also
|
||||
// an insert op just before, then we shouldn't append it to this insert, but instead only cancel the following delete.
|
||||
// E.g.
|
||||
// foo|<--- about to insert 'b' here
|
||||
// inserted 'foo' --^ ^-- deleted 'bar'
|
||||
// should become just 'foo' not 'foob' (with the delete marker becoming just 'ar'), .
|
||||
const next_change = this.changes[i + 1]
|
||||
const is_op_adjacent_to_next_delete =
|
||||
next_change != null &&
|
||||
next_change.op.d != null &&
|
||||
op.p === change_end &&
|
||||
next_change.op.p === op.p
|
||||
const will_op_cancel_next_delete =
|
||||
undoing &&
|
||||
is_op_adjacent_to_next_delete &&
|
||||
next_change.op.d.slice(0, op.i.length) === op.i
|
||||
|
||||
// If there is a delete at the start of the insert, and we're inserting
|
||||
// at the start, we SHOULDN'T merge since the delete acts as a partition.
|
||||
// The previous op will be the delete, but it's already been shifted by this insert
|
||||
//
|
||||
// I.e.
|
||||
// Originally: |-- existing insert --|
|
||||
// | <- existing delete at same offset
|
||||
//
|
||||
// Now: |-- existing insert --| <- not shifted yet
|
||||
// |-- this insert --|| <- existing delete shifted along to end of this op
|
||||
//
|
||||
// After: |-- existing insert --|
|
||||
// |-- this insert --|| <- existing delete
|
||||
//
|
||||
// Without the delete, the inserts would be merged.
|
||||
const is_insert_blocked_by_delete =
|
||||
previous_change != null &&
|
||||
previous_change.op.d != null &&
|
||||
previous_change.op.p === op_end
|
||||
|
||||
// If the insert is overlapping another insert, either at the beginning in the middle or touching the end,
|
||||
// then we merge them into one.
|
||||
if (
|
||||
this.track_changes &&
|
||||
is_change_overlapping &&
|
||||
!is_insert_blocked_by_delete &&
|
||||
!already_merged &&
|
||||
!will_op_cancel_next_delete &&
|
||||
is_same_user
|
||||
) {
|
||||
offset = op_start - change_start
|
||||
change.op.i =
|
||||
change.op.i.slice(0, offset) + op.i + change.op.i.slice(offset)
|
||||
change.metadata.ts = metadata.ts
|
||||
already_merged = true
|
||||
moved_changes.push(change)
|
||||
} else if (op_start <= change_start) {
|
||||
// If we're fully before the other insert we can just shift the other insert by our length.
|
||||
// If they are touching, and should have been merged, they will have been above.
|
||||
// If not merged above, then it must be blocked by a delete, and will be after this insert, so we shift it along as well
|
||||
change.op.p += op_length
|
||||
moved_changes.push(change)
|
||||
} else if (
|
||||
(!is_same_user || !this.track_changes) &&
|
||||
change_start < op_start &&
|
||||
op_start < change_end
|
||||
) {
|
||||
// This user is inserting inside a change by another user, so we need to split the
|
||||
// other user's change into one before and after this one.
|
||||
offset = op_start - change_start
|
||||
const before_content = change.op.i.slice(0, offset)
|
||||
const after_content = change.op.i.slice(offset)
|
||||
|
||||
// The existing change can become the 'before' change
|
||||
change.op.i = before_content
|
||||
moved_changes.push(change)
|
||||
|
||||
// Create a new op afterwards
|
||||
const after_change = {
|
||||
op: {
|
||||
i: after_content,
|
||||
p: change_start + offset + op_length,
|
||||
},
|
||||
metadata: {},
|
||||
}
|
||||
for (const key in change.metadata) {
|
||||
const value = change.metadata[key]
|
||||
after_change.metadata[key] = value
|
||||
}
|
||||
new_changes.push(after_change)
|
||||
}
|
||||
}
|
||||
|
||||
previous_change = change
|
||||
}
|
||||
|
||||
if (this.track_changes && !already_merged) {
|
||||
this._addOp(op, metadata)
|
||||
}
|
||||
for ({ op, metadata } of Array.from(new_changes)) {
|
||||
this._addOp(op, metadata)
|
||||
}
|
||||
|
||||
for (change of Array.from(remove_changes)) {
|
||||
this._removeChange(change)
|
||||
}
|
||||
|
||||
return (() => {
|
||||
const result = []
|
||||
for (change of Array.from(moved_changes)) {
|
||||
result.push(this._markAsDirty(change, 'change', 'moved'))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
applyDeleteToChanges(op, metadata) {
|
||||
let change
|
||||
const op_start = op.p
|
||||
const op_length = op.d.length
|
||||
const op_end = op.p + op_length
|
||||
const remove_changes = []
|
||||
let moved_changes = []
|
||||
|
||||
// We might end up modifying our delete op if it merges with existing deletes, or cancels out
|
||||
// with an existing insert. Since we might do multiple modifications, we record them and do
|
||||
// all the modifications after looping through the existing changes, so as not to mess up the
|
||||
// offset indexes as we go.
|
||||
const op_modifications = []
|
||||
for (change of Array.from(this.changes)) {
|
||||
var change_start
|
||||
if (change.op.i != null) {
|
||||
change_start = change.op.p
|
||||
const change_end = change_start + change.op.i.length
|
||||
if (op_end <= change_start) {
|
||||
// Shift ops after us back by our length
|
||||
change.op.p -= op_length
|
||||
moved_changes.push(change)
|
||||
} else if (op_start >= change_end) {
|
||||
// Delete is after insert, nothing to do
|
||||
} else {
|
||||
// When the new delete overlaps an insert, we should remove the part of the insert that
|
||||
// is now deleted, and also remove the part of the new delete that overlapped. I.e.
|
||||
// the two cancel out where they overlap.
|
||||
var delete_remaining_after,
|
||||
delete_remaining_before,
|
||||
insert_remaining_after,
|
||||
insert_remaining_before
|
||||
if (op_start >= change_start) {
|
||||
// |-- existing insert --|
|
||||
// insert_remaining_before -> |.....||-- new delete --|
|
||||
delete_remaining_before = ''
|
||||
insert_remaining_before = change.op.i.slice(
|
||||
0,
|
||||
op_start - change_start
|
||||
)
|
||||
} else {
|
||||
// delete_remaining_before -> |.....||-- existing insert --|
|
||||
// |-- new delete --|
|
||||
delete_remaining_before = op.d.slice(0, change_start - op_start)
|
||||
insert_remaining_before = ''
|
||||
}
|
||||
|
||||
if (op_end <= change_end) {
|
||||
// |-- existing insert --|
|
||||
// |-- new delete --||.....| <- insert_remaining_after
|
||||
delete_remaining_after = ''
|
||||
insert_remaining_after = change.op.i.slice(op_end - change_start)
|
||||
} else {
|
||||
// |-- existing insert --||.....| <- delete_remaining_after
|
||||
// |-- new delete --|
|
||||
delete_remaining_after = op.d.slice(change_end - op_start)
|
||||
insert_remaining_after = ''
|
||||
}
|
||||
|
||||
const insert_remaining =
|
||||
insert_remaining_before + insert_remaining_after
|
||||
if (insert_remaining.length > 0) {
|
||||
change.op.i = insert_remaining
|
||||
change.op.p = Math.min(change_start, op_start)
|
||||
change.metadata.ts = metadata.ts
|
||||
moved_changes.push(change)
|
||||
} else {
|
||||
remove_changes.push(change)
|
||||
}
|
||||
|
||||
// We know what we want to preserve of our delete op before (delete_remaining_before) and what we want to preserve
|
||||
// afterwards (delete_remaining_before). Now we need to turn that into a modification which deletes the
|
||||
// chunk in the middle not covered by these.
|
||||
const delete_removed_length =
|
||||
op.d.length -
|
||||
delete_remaining_before.length -
|
||||
delete_remaining_after.length
|
||||
const delete_removed_start = delete_remaining_before.length
|
||||
const modification = {
|
||||
d: op.d.slice(
|
||||
delete_removed_start,
|
||||
delete_removed_start + delete_removed_length
|
||||
),
|
||||
p: delete_removed_start,
|
||||
}
|
||||
if (modification.d.length > 0) {
|
||||
op_modifications.push(modification)
|
||||
}
|
||||
}
|
||||
} else if (change.op.d != null) {
|
||||
change_start = change.op.p
|
||||
if (
|
||||
op_end < change_start ||
|
||||
(!this.track_changes && op_end === change_start)
|
||||
) {
|
||||
// Shift ops after us back by our length.
|
||||
// If we're tracking changes, it must be strictly before, since we'll merge
|
||||
// below if they are touching. Otherwise, touching is fine.
|
||||
change.op.p -= op_length
|
||||
moved_changes.push(change)
|
||||
} else if (op_start <= change_start && change_start <= op_end) {
|
||||
if (this.track_changes) {
|
||||
// If we overlap a delete, add it in our content, and delete the existing change.
|
||||
// It's easier to do it this way, rather than modifying the existing delete in case
|
||||
// we overlap many deletes and we'd need to track that. We have a workaround to
|
||||
// update the delete in place if possible below.
|
||||
const offset = change_start - op_start
|
||||
op_modifications.push({ i: change.op.d, p: offset })
|
||||
remove_changes.push(change)
|
||||
} else {
|
||||
change.op.p = op_start
|
||||
moved_changes.push(change)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy rather than modify because we still need to apply it to comments
|
||||
op = {
|
||||
p: op.p,
|
||||
d: this._applyOpModifications(op.d, op_modifications),
|
||||
}
|
||||
|
||||
for (change of Array.from(remove_changes)) {
|
||||
// This is a bit of hack to avoid removing one delete and replacing it with another.
|
||||
// If we don't do this, it causes the UI to flicker
|
||||
if (
|
||||
op.d.length > 0 &&
|
||||
change.op.d != null &&
|
||||
op.p <= change.op.p &&
|
||||
change.op.p <= op.p + op.d.length
|
||||
) {
|
||||
change.op.p = op.p
|
||||
change.op.d = op.d
|
||||
change.metadata = metadata
|
||||
moved_changes.push(change)
|
||||
op.d = '' // stop it being added
|
||||
} else {
|
||||
this._removeChange(change)
|
||||
}
|
||||
}
|
||||
|
||||
if (this.track_changes && op.d.length > 0) {
|
||||
this._addOp(op, metadata)
|
||||
} else {
|
||||
// It's possible that we deleted an insert between two other inserts. I.e.
|
||||
// If we delete 'user_2 insert' in:
|
||||
// |-- user_1 insert --||-- user_2 insert --||-- user_1 insert --|
|
||||
// it becomes:
|
||||
// |-- user_1 insert --||-- user_1 insert --|
|
||||
// We need to merge these together again
|
||||
const results = this._scanAndMergeAdjacentUpdates()
|
||||
moved_changes = moved_changes.concat(results.moved_changes)
|
||||
for (change of Array.from(results.remove_changes)) {
|
||||
this._removeChange(change)
|
||||
moved_changes = moved_changes.filter(c => c !== change)
|
||||
}
|
||||
}
|
||||
|
||||
return (() => {
|
||||
const result = []
|
||||
for (change of Array.from(moved_changes)) {
|
||||
result.push(this._markAsDirty(change, 'change', 'moved'))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
_addOp(op, metadata) {
|
||||
const change = {
|
||||
id: this.newId(),
|
||||
op: this._clone(op), // Don't take a reference to the existing op since we'll modify this in place with future changes
|
||||
metadata: this._clone(metadata),
|
||||
}
|
||||
this.changes.push(change)
|
||||
|
||||
// Keep ops in order of offset, with deletes before inserts
|
||||
this.changes.sort(function (c1, c2) {
|
||||
const result = c1.op.p - c2.op.p
|
||||
if (result !== 0) {
|
||||
return result
|
||||
} else if (c1.op.i != null && c2.op.d != null) {
|
||||
return 1
|
||||
} else if (c1.op.d != null && c2.op.i != null) {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
return this._markAsDirty(change, 'change', 'added')
|
||||
}
|
||||
|
||||
_removeChange(change) {
|
||||
this.changes = this.changes.filter(c => c.id !== change.id)
|
||||
return this._markAsDirty(change, 'change', 'removed')
|
||||
}
|
||||
|
||||
_applyOpModifications(content, op_modifications) {
|
||||
// Put in descending position order, with deleting first if at the same offset
|
||||
// (Inserting first would modify the content that the delete will delete)
|
||||
op_modifications.sort(function (a, b) {
|
||||
const result = b.p - a.p
|
||||
if (result !== 0) {
|
||||
return result
|
||||
} else if (a.i != null && b.d != null) {
|
||||
return 1
|
||||
} else if (a.d != null && b.i != null) {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
for (const modification of Array.from(op_modifications)) {
|
||||
if (modification.i != null) {
|
||||
content =
|
||||
content.slice(0, modification.p) +
|
||||
modification.i +
|
||||
content.slice(modification.p)
|
||||
} else if (modification.d != null) {
|
||||
if (
|
||||
content.slice(
|
||||
modification.p,
|
||||
modification.p + modification.d.length
|
||||
) !== modification.d
|
||||
) {
|
||||
throw new Error(
|
||||
`deleted content does not match. content: ${JSON.stringify(
|
||||
content
|
||||
)}; modification: ${JSON.stringify(modification)}`
|
||||
)
|
||||
}
|
||||
content =
|
||||
content.slice(0, modification.p) +
|
||||
content.slice(modification.p + modification.d.length)
|
||||
}
|
||||
}
|
||||
return content
|
||||
}
|
||||
|
||||
_scanAndMergeAdjacentUpdates() {
|
||||
// This should only need calling when deleting an update between two
|
||||
// other updates. There's no other way to get two adjacent updates from the
|
||||
// same user, since they would be merged on insert.
|
||||
let previous_change = null
|
||||
const remove_changes = []
|
||||
const moved_changes = []
|
||||
for (const change of Array.from(this.changes)) {
|
||||
if (
|
||||
(previous_change != null ? previous_change.op.i : undefined) !=
|
||||
null &&
|
||||
change.op.i != null
|
||||
) {
|
||||
const previous_change_end =
|
||||
previous_change.op.p + previous_change.op.i.length
|
||||
const previous_change_user_id = previous_change.metadata.user_id
|
||||
const change_start = change.op.p
|
||||
const change_user_id = change.metadata.user_id
|
||||
if (
|
||||
previous_change_end === change_start &&
|
||||
previous_change_user_id === change_user_id
|
||||
) {
|
||||
remove_changes.push(change)
|
||||
previous_change.op.i += change.op.i
|
||||
moved_changes.push(previous_change)
|
||||
}
|
||||
} else if (
|
||||
(previous_change != null ? previous_change.op.d : undefined) !=
|
||||
null &&
|
||||
change.op.d != null &&
|
||||
previous_change.op.p === change.op.p
|
||||
) {
|
||||
// Merge adjacent deletes
|
||||
previous_change.op.d += change.op.d
|
||||
remove_changes.push(change)
|
||||
moved_changes.push(previous_change)
|
||||
} else {
|
||||
// Only update to the current change if we haven't removed it.
|
||||
previous_change = change
|
||||
}
|
||||
}
|
||||
return { moved_changes, remove_changes }
|
||||
}
|
||||
|
||||
resetDirtyState() {
|
||||
return (this._dirtyState = {
|
||||
comment: {
|
||||
moved: {},
|
||||
removed: {},
|
||||
added: {},
|
||||
},
|
||||
change: {
|
||||
moved: {},
|
||||
removed: {},
|
||||
added: {},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
getDirtyState() {
|
||||
return this._dirtyState
|
||||
}
|
||||
|
||||
_markAsDirty(object, type, action) {
|
||||
return (this._dirtyState[type][action][object.id] = object)
|
||||
}
|
||||
|
||||
_clone(object) {
|
||||
const clone = {}
|
||||
for (const k in object) {
|
||||
const v = object[k]
|
||||
clone[k] = v
|
||||
}
|
||||
return clone
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (typeof define !== 'undefined' && define !== null) {
|
||||
define([], load)
|
||||
} else {
|
||||
module.exports = load()
|
||||
}
|
80
services/document-updater/app/js/RateLimitManager.js
Normal file
80
services/document-updater/app/js/RateLimitManager.js
Normal file
|
@ -0,0 +1,80 @@
|
|||
/* eslint-disable
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let RateLimiter
|
||||
const Settings = require('@overleaf/settings')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Metrics = require('./Metrics')
|
||||
|
||||
module.exports = RateLimiter = class RateLimiter {
|
||||
constructor(number) {
|
||||
if (number == null) {
|
||||
number = 10
|
||||
}
|
||||
this.ActiveWorkerCount = 0
|
||||
this.CurrentWorkerLimit = number
|
||||
this.BaseWorkerCount = number
|
||||
}
|
||||
|
||||
_adjustLimitUp() {
|
||||
this.CurrentWorkerLimit += 0.1 // allow target worker limit to increase gradually
|
||||
return Metrics.gauge('currentLimit', Math.ceil(this.CurrentWorkerLimit))
|
||||
}
|
||||
|
||||
_adjustLimitDown() {
|
||||
this.CurrentWorkerLimit = Math.max(
|
||||
this.BaseWorkerCount,
|
||||
this.CurrentWorkerLimit * 0.9
|
||||
)
|
||||
logger.log(
|
||||
{ currentLimit: Math.ceil(this.CurrentWorkerLimit) },
|
||||
'reducing rate limit'
|
||||
)
|
||||
return Metrics.gauge('currentLimit', Math.ceil(this.CurrentWorkerLimit))
|
||||
}
|
||||
|
||||
_trackAndRun(task, callback) {
|
||||
if (callback == null) {
|
||||
callback = function () {}
|
||||
}
|
||||
this.ActiveWorkerCount++
|
||||
Metrics.gauge('processingUpdates', this.ActiveWorkerCount)
|
||||
return task(err => {
|
||||
this.ActiveWorkerCount--
|
||||
Metrics.gauge('processingUpdates', this.ActiveWorkerCount)
|
||||
return callback(err)
|
||||
})
|
||||
}
|
||||
|
||||
run(task, callback) {
|
||||
if (this.ActiveWorkerCount < this.CurrentWorkerLimit) {
|
||||
this._trackAndRun(task) // below the limit, just put the task in the background
|
||||
callback() // return immediately
|
||||
if (this.CurrentWorkerLimit > this.BaseWorkerCount) {
|
||||
return this._adjustLimitDown()
|
||||
}
|
||||
} else {
|
||||
logger.log(
|
||||
{
|
||||
active: this.ActiveWorkerCount,
|
||||
currentLimit: Math.ceil(this.CurrentWorkerLimit),
|
||||
},
|
||||
'hit rate limit'
|
||||
)
|
||||
return this._trackAndRun(task, err => {
|
||||
if (err == null) {
|
||||
this._adjustLimitUp()
|
||||
} // don't increment rate limit if there was an error
|
||||
return callback(err)
|
||||
}) // only return after task completes
|
||||
}
|
||||
}
|
||||
}
|
87
services/document-updater/app/js/RealTimeRedisManager.js
Normal file
87
services/document-updater/app/js/RealTimeRedisManager.js
Normal file
|
@ -0,0 +1,87 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let RealTimeRedisManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const rclient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.documentupdater
|
||||
)
|
||||
const pubsubClient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.pubsub
|
||||
)
|
||||
const Keys = Settings.redis.documentupdater.key_schema
|
||||
const logger = require('logger-sharelatex')
|
||||
const os = require('os')
|
||||
const crypto = require('crypto')
|
||||
const metrics = require('./Metrics')
|
||||
|
||||
const HOST = os.hostname()
|
||||
const RND = crypto.randomBytes(4).toString('hex') // generate a random key for this process
|
||||
let COUNT = 0
|
||||
|
||||
const MAX_OPS_PER_ITERATION = 8 // process a limited number of ops for safety
|
||||
|
||||
module.exports = RealTimeRedisManager = {
|
||||
getPendingUpdatesForDoc(doc_id, callback) {
|
||||
const multi = rclient.multi()
|
||||
multi.lrange(Keys.pendingUpdates({ doc_id }), 0, MAX_OPS_PER_ITERATION - 1)
|
||||
multi.ltrim(Keys.pendingUpdates({ doc_id }), MAX_OPS_PER_ITERATION, -1)
|
||||
return multi.exec(function (error, replys) {
|
||||
let jsonUpdate
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
const jsonUpdates = replys[0]
|
||||
for (jsonUpdate of Array.from(jsonUpdates)) {
|
||||
// record metric for each update removed from queue
|
||||
metrics.summary('redis.pendingUpdates', jsonUpdate.length, {
|
||||
status: 'pop',
|
||||
})
|
||||
}
|
||||
const updates = []
|
||||
for (jsonUpdate of Array.from(jsonUpdates)) {
|
||||
var update
|
||||
try {
|
||||
update = JSON.parse(jsonUpdate)
|
||||
} catch (e) {
|
||||
return callback(e)
|
||||
}
|
||||
updates.push(update)
|
||||
}
|
||||
return callback(error, updates)
|
||||
})
|
||||
},
|
||||
|
||||
getUpdatesLength(doc_id, callback) {
|
||||
return rclient.llen(Keys.pendingUpdates({ doc_id }), callback)
|
||||
},
|
||||
|
||||
sendData(data) {
|
||||
// create a unique message id using a counter
|
||||
const message_id = `doc:${HOST}:${RND}-${COUNT++}`
|
||||
if (data != null) {
|
||||
data._id = message_id
|
||||
}
|
||||
|
||||
const blob = JSON.stringify(data)
|
||||
metrics.summary('redis.publish.applied-ops', blob.length)
|
||||
|
||||
// publish on separate channels for individual projects and docs when
|
||||
// configured (needs realtime to be configured for this too).
|
||||
if (Settings.publishOnIndividualChannels) {
|
||||
return pubsubClient.publish(`applied-ops:${data.doc_id}`, blob)
|
||||
} else {
|
||||
return pubsubClient.publish('applied-ops', blob)
|
||||
}
|
||||
},
|
||||
}
|
744
services/document-updater/app/js/RedisManager.js
Normal file
744
services/document-updater/app/js/RedisManager.js
Normal file
|
@ -0,0 +1,744 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS201: Simplify complex destructure assignments
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let RedisManager
|
||||
const Settings = require('@overleaf/settings')
|
||||
const rclient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.documentupdater
|
||||
)
|
||||
const logger = require('logger-sharelatex')
|
||||
const metrics = require('./Metrics')
|
||||
const Errors = require('./Errors')
|
||||
const crypto = require('crypto')
|
||||
const async = require('async')
|
||||
const ProjectHistoryRedisManager = require('./ProjectHistoryRedisManager')
|
||||
|
||||
// Sometimes Redis calls take an unexpectedly long time. We have to be
|
||||
// quick with Redis calls because we're holding a lock that expires
|
||||
// after 30 seconds. We can't let any errors in the rest of the stack
|
||||
// hold us up, and need to bail out quickly if there is a problem.
|
||||
const MAX_REDIS_REQUEST_LENGTH = 5000 // 5 seconds
|
||||
|
||||
// Make times easy to read
|
||||
const minutes = 60 // seconds for Redis expire
|
||||
|
||||
const logHashErrors =
|
||||
Settings.documentupdater != null
|
||||
? Settings.documentupdater.logHashErrors
|
||||
: undefined
|
||||
const logHashReadErrors = logHashErrors != null ? logHashErrors.read : undefined
|
||||
|
||||
const MEGABYTES = 1024 * 1024
|
||||
const MAX_RANGES_SIZE = 3 * MEGABYTES
|
||||
|
||||
const keys = Settings.redis.documentupdater.key_schema
|
||||
const historyKeys = Settings.redis.history.key_schema // note: this is track changes, not project-history
|
||||
|
||||
module.exports = RedisManager = {
|
||||
rclient,
|
||||
|
||||
putDocInMemory(
|
||||
project_id,
|
||||
doc_id,
|
||||
docLines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
_callback
|
||||
) {
|
||||
const timer = new metrics.Timer('redis.put-doc')
|
||||
const callback = function (error) {
|
||||
timer.done()
|
||||
return _callback(error)
|
||||
}
|
||||
docLines = JSON.stringify(docLines)
|
||||
if (docLines.indexOf('\u0000') !== -1) {
|
||||
const error = new Error('null bytes found in doc lines')
|
||||
// this check was added to catch memory corruption in JSON.stringify.
|
||||
// It sometimes returned null bytes at the end of the string.
|
||||
logger.error({ err: error, doc_id, docLines }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
// Do a cheap size check on the serialized blob.
|
||||
if (docLines.length > Settings.max_doc_length) {
|
||||
const docSize = docLines.length
|
||||
const err = new Error('blocking doc insert into redis: doc is too large')
|
||||
logger.error({ project_id, doc_id, err, docSize }, err.message)
|
||||
return callback(err)
|
||||
}
|
||||
const docHash = RedisManager._computeHash(docLines)
|
||||
// record bytes sent to redis
|
||||
metrics.summary('redis.docLines', docLines.length, { status: 'set' })
|
||||
logger.log(
|
||||
{ project_id, doc_id, version, docHash, pathname, projectHistoryId },
|
||||
'putting doc in redis'
|
||||
)
|
||||
return RedisManager._serializeRanges(ranges, function (error, ranges) {
|
||||
if (error != null) {
|
||||
logger.error({ err: error, doc_id, project_id }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
// update docsInProject set before writing doc contents
|
||||
rclient.sadd(keys.docsInProject({ project_id }), doc_id, error => {
|
||||
if (error) return callback(error)
|
||||
|
||||
rclient.mset(
|
||||
{
|
||||
[keys.docLines({ doc_id })]: docLines,
|
||||
[keys.projectKey({ doc_id })]: project_id,
|
||||
[keys.docVersion({ doc_id })]: version,
|
||||
[keys.docHash({ doc_id })]: docHash,
|
||||
[keys.ranges({ doc_id })]: ranges,
|
||||
[keys.pathname({ doc_id })]: pathname,
|
||||
[keys.projectHistoryId({ doc_id })]: projectHistoryId,
|
||||
},
|
||||
callback
|
||||
)
|
||||
})
|
||||
})
|
||||
},
|
||||
|
||||
removeDocFromMemory(project_id, doc_id, _callback) {
|
||||
logger.log({ project_id, doc_id }, 'removing doc from redis')
|
||||
const callback = function (err) {
|
||||
if (err != null) {
|
||||
logger.err({ project_id, doc_id, err }, 'error removing doc from redis')
|
||||
return _callback(err)
|
||||
} else {
|
||||
logger.log({ project_id, doc_id }, 'removed doc from redis')
|
||||
return _callback()
|
||||
}
|
||||
}
|
||||
|
||||
let multi = rclient.multi()
|
||||
multi.strlen(keys.docLines({ doc_id }))
|
||||
multi.del(
|
||||
keys.docLines({ doc_id }),
|
||||
keys.projectKey({ doc_id }),
|
||||
keys.docVersion({ doc_id }),
|
||||
keys.docHash({ doc_id }),
|
||||
keys.ranges({ doc_id }),
|
||||
keys.pathname({ doc_id }),
|
||||
keys.projectHistoryId({ doc_id }),
|
||||
keys.projectHistoryType({ doc_id }),
|
||||
keys.unflushedTime({ doc_id }),
|
||||
keys.lastUpdatedAt({ doc_id }),
|
||||
keys.lastUpdatedBy({ doc_id })
|
||||
)
|
||||
return multi.exec(function (error, response) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
const length = response != null ? response[0] : undefined
|
||||
if (length > 0) {
|
||||
// record bytes freed in redis
|
||||
metrics.summary('redis.docLines', length, { status: 'del' })
|
||||
}
|
||||
multi = rclient.multi()
|
||||
multi.srem(keys.docsInProject({ project_id }), doc_id)
|
||||
multi.del(keys.projectState({ project_id }))
|
||||
return multi.exec(callback)
|
||||
})
|
||||
},
|
||||
|
||||
checkOrSetProjectState(project_id, newState, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, stateChanged) {}
|
||||
}
|
||||
const multi = rclient.multi()
|
||||
multi.getset(keys.projectState({ project_id }), newState)
|
||||
multi.expire(keys.projectState({ project_id }), 30 * minutes)
|
||||
return multi.exec(function (error, response) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
logger.log(
|
||||
{ project_id, newState, oldState: response[0] },
|
||||
'checking project state'
|
||||
)
|
||||
return callback(null, response[0] !== newState)
|
||||
})
|
||||
},
|
||||
|
||||
clearProjectState(project_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return rclient.del(keys.projectState({ project_id }), callback)
|
||||
},
|
||||
|
||||
getDoc(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (
|
||||
error,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime
|
||||
) {}
|
||||
}
|
||||
const timer = new metrics.Timer('redis.get-doc')
|
||||
const collectKeys = [
|
||||
keys.docLines({ doc_id }),
|
||||
keys.docVersion({ doc_id }),
|
||||
keys.docHash({ doc_id }),
|
||||
keys.projectKey({ doc_id }),
|
||||
keys.ranges({ doc_id }),
|
||||
keys.pathname({ doc_id }),
|
||||
keys.projectHistoryId({ doc_id }),
|
||||
keys.unflushedTime({ doc_id }),
|
||||
keys.lastUpdatedAt({ doc_id }),
|
||||
keys.lastUpdatedBy({ doc_id }),
|
||||
]
|
||||
rclient.mget(...collectKeys, (error, ...rest) => {
|
||||
let [
|
||||
docLines,
|
||||
version,
|
||||
storedHash,
|
||||
doc_project_id,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy,
|
||||
] = Array.from(rest[0])
|
||||
const timeSpan = timer.done()
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
// check if request took too long and bail out. only do this for
|
||||
// get, because it is the first call in each update, so if this
|
||||
// passes we'll assume others have a reasonable chance to succeed.
|
||||
if (timeSpan > MAX_REDIS_REQUEST_LENGTH) {
|
||||
error = new Error('redis getDoc exceeded timeout')
|
||||
return callback(error)
|
||||
}
|
||||
// record bytes loaded from redis
|
||||
if (docLines != null) {
|
||||
metrics.summary('redis.docLines', docLines.length, { status: 'get' })
|
||||
}
|
||||
// check sha1 hash value if present
|
||||
if (docLines != null && storedHash != null) {
|
||||
const computedHash = RedisManager._computeHash(docLines)
|
||||
if (logHashReadErrors && computedHash !== storedHash) {
|
||||
logger.error(
|
||||
{
|
||||
project_id,
|
||||
doc_id,
|
||||
doc_project_id,
|
||||
computedHash,
|
||||
storedHash,
|
||||
docLines,
|
||||
},
|
||||
'hash mismatch on retrieved document'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
docLines = JSON.parse(docLines)
|
||||
ranges = RedisManager._deserializeRanges(ranges)
|
||||
} catch (e) {
|
||||
return callback(e)
|
||||
}
|
||||
|
||||
version = parseInt(version || 0, 10)
|
||||
// check doc is in requested project
|
||||
if (doc_project_id != null && doc_project_id !== project_id) {
|
||||
logger.error(
|
||||
{ project_id, doc_id, doc_project_id },
|
||||
'doc not in project'
|
||||
)
|
||||
return callback(new Errors.NotFoundError('document not found'))
|
||||
}
|
||||
|
||||
if (projectHistoryId != null) {
|
||||
projectHistoryId = parseInt(projectHistoryId)
|
||||
}
|
||||
|
||||
callback(
|
||||
null,
|
||||
docLines,
|
||||
version,
|
||||
ranges,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
unflushedTime,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy
|
||||
)
|
||||
})
|
||||
},
|
||||
|
||||
getDocVersion(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, version, projectHistoryType) {}
|
||||
}
|
||||
return rclient.mget(
|
||||
keys.docVersion({ doc_id }),
|
||||
keys.projectHistoryType({ doc_id }),
|
||||
function (error, result) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
let [version, projectHistoryType] = Array.from(result || [])
|
||||
version = parseInt(version, 10)
|
||||
return callback(null, version, projectHistoryType)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
getDocLines(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, version) {}
|
||||
}
|
||||
return rclient.get(keys.docLines({ doc_id }), function (error, docLines) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null, docLines)
|
||||
})
|
||||
},
|
||||
|
||||
getPreviousDocOps(doc_id, start, end, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, jsonOps) {}
|
||||
}
|
||||
const timer = new metrics.Timer('redis.get-prev-docops')
|
||||
return rclient.llen(keys.docOps({ doc_id }), function (error, length) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return rclient.get(
|
||||
keys.docVersion({ doc_id }),
|
||||
function (error, version) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
version = parseInt(version, 10)
|
||||
const first_version_in_redis = version - length
|
||||
|
||||
if (start < first_version_in_redis || end > version) {
|
||||
error = new Errors.OpRangeNotAvailableError(
|
||||
'doc ops range is not loaded in redis'
|
||||
)
|
||||
logger.warn(
|
||||
{ err: error, doc_id, length, version, start, end },
|
||||
'doc ops range is not loaded in redis'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
start = start - first_version_in_redis
|
||||
if (end > -1) {
|
||||
end = end - first_version_in_redis
|
||||
}
|
||||
|
||||
if (isNaN(start) || isNaN(end)) {
|
||||
error = new Error('inconsistent version or lengths')
|
||||
logger.error(
|
||||
{ err: error, doc_id, length, version, start, end },
|
||||
'inconsistent version or length'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
return rclient.lrange(
|
||||
keys.docOps({ doc_id }),
|
||||
start,
|
||||
end,
|
||||
function (error, jsonOps) {
|
||||
let ops
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
try {
|
||||
ops = jsonOps.map(jsonOp => JSON.parse(jsonOp))
|
||||
} catch (e) {
|
||||
return callback(e)
|
||||
}
|
||||
const timeSpan = timer.done()
|
||||
if (timeSpan > MAX_REDIS_REQUEST_LENGTH) {
|
||||
error = new Error('redis getPreviousDocOps exceeded timeout')
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null, ops)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
},
|
||||
|
||||
getHistoryType(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, projectHistoryType) {}
|
||||
}
|
||||
return rclient.get(
|
||||
keys.projectHistoryType({ doc_id }),
|
||||
function (error, projectHistoryType) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback(null, projectHistoryType)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
setHistoryType(doc_id, projectHistoryType, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return rclient.set(
|
||||
keys.projectHistoryType({ doc_id }),
|
||||
projectHistoryType,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
DOC_OPS_TTL: 60 * minutes,
|
||||
DOC_OPS_MAX_LENGTH: 100,
|
||||
updateDocument(
|
||||
project_id,
|
||||
doc_id,
|
||||
docLines,
|
||||
newVersion,
|
||||
appliedOps,
|
||||
ranges,
|
||||
updateMeta,
|
||||
callback
|
||||
) {
|
||||
if (appliedOps == null) {
|
||||
appliedOps = []
|
||||
}
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return RedisManager.getDocVersion(
|
||||
doc_id,
|
||||
function (error, currentVersion, projectHistoryType) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (currentVersion + appliedOps.length !== newVersion) {
|
||||
error = new Error(`Version mismatch. '${doc_id}' is corrupted.`)
|
||||
logger.error(
|
||||
{
|
||||
err: error,
|
||||
doc_id,
|
||||
currentVersion,
|
||||
newVersion,
|
||||
opsLength: appliedOps.length,
|
||||
},
|
||||
'version mismatch'
|
||||
)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
const jsonOps = appliedOps.map(op => JSON.stringify(op))
|
||||
for (const op of Array.from(jsonOps)) {
|
||||
if (op.indexOf('\u0000') !== -1) {
|
||||
error = new Error('null bytes found in jsonOps')
|
||||
// this check was added to catch memory corruption in JSON.stringify
|
||||
logger.error({ err: error, doc_id, jsonOps }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
}
|
||||
|
||||
const newDocLines = JSON.stringify(docLines)
|
||||
if (newDocLines.indexOf('\u0000') !== -1) {
|
||||
error = new Error('null bytes found in doc lines')
|
||||
// this check was added to catch memory corruption in JSON.stringify
|
||||
logger.error({ err: error, doc_id, newDocLines }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
// Do a cheap size check on the serialized blob.
|
||||
if (newDocLines.length > Settings.max_doc_length) {
|
||||
const err = new Error('blocking doc update: doc is too large')
|
||||
const docSize = newDocLines.length
|
||||
logger.error({ project_id, doc_id, err, docSize }, err.message)
|
||||
return callback(err)
|
||||
}
|
||||
const newHash = RedisManager._computeHash(newDocLines)
|
||||
|
||||
const opVersions = appliedOps.map(op => (op != null ? op.v : undefined))
|
||||
logger.log(
|
||||
{
|
||||
doc_id,
|
||||
version: newVersion,
|
||||
hash: newHash,
|
||||
op_versions: opVersions,
|
||||
},
|
||||
'updating doc in redis'
|
||||
)
|
||||
// record bytes sent to redis in update
|
||||
metrics.summary('redis.docLines', newDocLines.length, {
|
||||
status: 'update',
|
||||
})
|
||||
return RedisManager._serializeRanges(ranges, function (error, ranges) {
|
||||
if (error != null) {
|
||||
logger.error({ err: error, doc_id }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
if (ranges != null && ranges.indexOf('\u0000') !== -1) {
|
||||
error = new Error('null bytes found in ranges')
|
||||
// this check was added to catch memory corruption in JSON.stringify
|
||||
logger.error({ err: error, doc_id, ranges }, error.message)
|
||||
return callback(error)
|
||||
}
|
||||
const multi = rclient.multi()
|
||||
multi.mset({
|
||||
[keys.docLines({ doc_id })]: newDocLines,
|
||||
[keys.docVersion({ doc_id })]: newVersion,
|
||||
[keys.docHash({ doc_id })]: newHash,
|
||||
[keys.ranges({ doc_id })]: ranges,
|
||||
[keys.lastUpdatedAt({ doc_id })]: Date.now(),
|
||||
[keys.lastUpdatedBy({ doc_id })]: updateMeta && updateMeta.user_id,
|
||||
})
|
||||
multi.ltrim(
|
||||
keys.docOps({ doc_id }),
|
||||
-RedisManager.DOC_OPS_MAX_LENGTH,
|
||||
-1
|
||||
) // index 3
|
||||
// push the ops last so we can get the lengths at fixed index position 7
|
||||
if (jsonOps.length > 0) {
|
||||
multi.rpush(keys.docOps({ doc_id }), ...Array.from(jsonOps)) // index 5
|
||||
// expire must come after rpush since before it will be a no-op if the list is empty
|
||||
multi.expire(keys.docOps({ doc_id }), RedisManager.DOC_OPS_TTL) // index 6
|
||||
if (projectHistoryType === 'project-history') {
|
||||
metrics.inc('history-queue', 1, { status: 'skip-track-changes' })
|
||||
logger.log(
|
||||
{ doc_id },
|
||||
'skipping push of uncompressed ops for project using project-history'
|
||||
)
|
||||
} else {
|
||||
// project is using old track-changes history service
|
||||
metrics.inc('history-queue', 1, { status: 'track-changes' })
|
||||
multi.rpush(
|
||||
historyKeys.uncompressedHistoryOps({ doc_id }),
|
||||
...Array.from(jsonOps)
|
||||
) // index 7
|
||||
}
|
||||
// Set the unflushed timestamp to the current time if the doc
|
||||
// hasn't been modified before (the content in mongo has been
|
||||
// valid up to this point). Otherwise leave it alone ("NX" flag).
|
||||
multi.set(keys.unflushedTime({ doc_id }), Date.now(), 'NX')
|
||||
}
|
||||
return multi.exec(function (error, result) {
|
||||
let docUpdateCount
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (projectHistoryType === 'project-history') {
|
||||
docUpdateCount = undefined // only using project history, don't bother with track-changes
|
||||
} else {
|
||||
// project is using old track-changes history service
|
||||
docUpdateCount = result[4]
|
||||
}
|
||||
|
||||
if (
|
||||
jsonOps.length > 0 &&
|
||||
__guard__(
|
||||
Settings.apis != null
|
||||
? Settings.apis.project_history
|
||||
: undefined,
|
||||
x => x.enabled
|
||||
)
|
||||
) {
|
||||
metrics.inc('history-queue', 1, { status: 'project-history' })
|
||||
return ProjectHistoryRedisManager.queueOps(
|
||||
project_id,
|
||||
...Array.from(jsonOps),
|
||||
(error, projectUpdateCount) =>
|
||||
callback(null, docUpdateCount, projectUpdateCount)
|
||||
)
|
||||
} else {
|
||||
return callback(null, docUpdateCount)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
renameDoc(project_id, doc_id, user_id, update, projectHistoryId, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return RedisManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (lines != null && version != null) {
|
||||
return rclient.set(
|
||||
keys.pathname({ doc_id }),
|
||||
update.newPathname,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return ProjectHistoryRedisManager.queueRenameEntity(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
'doc',
|
||||
doc_id,
|
||||
user_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
return ProjectHistoryRedisManager.queueRenameEntity(
|
||||
project_id,
|
||||
projectHistoryId,
|
||||
'doc',
|
||||
doc_id,
|
||||
user_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
clearUnflushedTime(doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return rclient.del(keys.unflushedTime({ doc_id }), callback)
|
||||
},
|
||||
|
||||
getDocIdsInProject(project_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, doc_ids) {}
|
||||
}
|
||||
return rclient.smembers(keys.docsInProject({ project_id }), callback)
|
||||
},
|
||||
|
||||
getDocTimestamps(doc_ids, callback) {
|
||||
// get lastupdatedat timestamps for an array of doc_ids
|
||||
if (callback == null) {
|
||||
callback = function (error, result) {}
|
||||
}
|
||||
return async.mapSeries(
|
||||
doc_ids,
|
||||
(doc_id, cb) => rclient.get(keys.lastUpdatedAt({ doc_id }), cb),
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
queueFlushAndDeleteProject(project_id, callback) {
|
||||
// store the project id in a sorted set ordered by time with a random offset to smooth out spikes
|
||||
const SMOOTHING_OFFSET =
|
||||
Settings.smoothingOffset > 0
|
||||
? Math.round(Settings.smoothingOffset * Math.random())
|
||||
: 0
|
||||
return rclient.zadd(
|
||||
keys.flushAndDeleteQueue(),
|
||||
Date.now() + SMOOTHING_OFFSET,
|
||||
project_id,
|
||||
callback
|
||||
)
|
||||
},
|
||||
|
||||
getNextProjectToFlushAndDelete(cutoffTime, callback) {
|
||||
// find the oldest queued flush that is before the cutoff time
|
||||
if (callback == null) {
|
||||
callback = function (error, key, timestamp) {}
|
||||
}
|
||||
return rclient.zrangebyscore(
|
||||
keys.flushAndDeleteQueue(),
|
||||
0,
|
||||
cutoffTime,
|
||||
'WITHSCORES',
|
||||
'LIMIT',
|
||||
0,
|
||||
1,
|
||||
function (err, reply) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
if (!(reply != null ? reply.length : undefined)) {
|
||||
return callback()
|
||||
} // return if no projects ready to be processed
|
||||
// pop the oldest entry (get and remove in a multi)
|
||||
const multi = rclient.multi()
|
||||
// Poor man's version of ZPOPMIN, which is only available in Redis 5.
|
||||
multi.zrange(keys.flushAndDeleteQueue(), 0, 0, 'WITHSCORES')
|
||||
multi.zremrangebyrank(keys.flushAndDeleteQueue(), 0, 0)
|
||||
multi.zcard(keys.flushAndDeleteQueue()) // the total length of the queue (for metrics)
|
||||
return multi.exec(function (err, reply) {
|
||||
if (err != null) {
|
||||
return callback(err)
|
||||
}
|
||||
if (!(reply != null ? reply.length : undefined)) {
|
||||
return callback()
|
||||
}
|
||||
const [key, timestamp] = Array.from(reply[0])
|
||||
const queueLength = reply[2]
|
||||
return callback(null, key, timestamp, queueLength)
|
||||
})
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
_serializeRanges(ranges, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, serializedRanges) {}
|
||||
}
|
||||
let jsonRanges = JSON.stringify(ranges)
|
||||
if (jsonRanges != null && jsonRanges.length > MAX_RANGES_SIZE) {
|
||||
return callback(new Error('ranges are too large'))
|
||||
}
|
||||
if (jsonRanges === '{}') {
|
||||
// Most doc will have empty ranges so don't fill redis with lots of '{}' keys
|
||||
jsonRanges = null
|
||||
}
|
||||
return callback(null, jsonRanges)
|
||||
},
|
||||
|
||||
_deserializeRanges(ranges) {
|
||||
if (ranges == null || ranges === '') {
|
||||
return {}
|
||||
} else {
|
||||
return JSON.parse(ranges)
|
||||
}
|
||||
},
|
||||
|
||||
_computeHash(docLines) {
|
||||
// use sha1 checksum of doclines to detect data corruption.
|
||||
//
|
||||
// note: must specify 'utf8' encoding explicitly, as the default is
|
||||
// binary in node < v5
|
||||
return crypto.createHash('sha1').update(docLines, 'utf8').digest('hex')
|
||||
},
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
85
services/document-updater/app/js/ShareJsDB.js
Normal file
85
services/document-updater/app/js/ShareJsDB.js
Normal file
|
@ -0,0 +1,85 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let ShareJsDB
|
||||
const Keys = require('./UpdateKeys')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const Errors = require('./Errors')
|
||||
|
||||
module.exports = ShareJsDB = class ShareJsDB {
|
||||
constructor(project_id, doc_id, lines, version) {
|
||||
this.project_id = project_id
|
||||
this.doc_id = doc_id
|
||||
this.lines = lines
|
||||
this.version = version
|
||||
this.appliedOps = {}
|
||||
// ShareJS calls this detacted from the instance, so we need
|
||||
// bind it to keep our context that can access @appliedOps
|
||||
this.writeOp = this._writeOp.bind(this)
|
||||
}
|
||||
|
||||
getOps(doc_key, start, end, callback) {
|
||||
if (start === end) {
|
||||
return callback(null, [])
|
||||
}
|
||||
|
||||
// In redis, lrange values are inclusive.
|
||||
if (end != null) {
|
||||
end--
|
||||
} else {
|
||||
end = -1
|
||||
}
|
||||
|
||||
const [project_id, doc_id] = Array.from(
|
||||
Keys.splitProjectIdAndDocId(doc_key)
|
||||
)
|
||||
return RedisManager.getPreviousDocOps(doc_id, start, end, callback)
|
||||
}
|
||||
|
||||
_writeOp(doc_key, opData, callback) {
|
||||
if (this.appliedOps[doc_key] == null) {
|
||||
this.appliedOps[doc_key] = []
|
||||
}
|
||||
this.appliedOps[doc_key].push(opData)
|
||||
return callback()
|
||||
}
|
||||
|
||||
getSnapshot(doc_key, callback) {
|
||||
if (
|
||||
doc_key !== Keys.combineProjectIdAndDocId(this.project_id, this.doc_id)
|
||||
) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(
|
||||
`unexpected doc_key ${doc_key}, expected ${Keys.combineProjectIdAndDocId(
|
||||
this.project_id,
|
||||
this.doc_id
|
||||
)}`
|
||||
)
|
||||
)
|
||||
} else {
|
||||
return callback(null, {
|
||||
snapshot: this.lines.join('\n'),
|
||||
v: parseInt(this.version, 10),
|
||||
type: 'text',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// To be able to remove a doc from the ShareJS memory
|
||||
// we need to called Model::delete, which calls this
|
||||
// method on the database. However, we will handle removing
|
||||
// it from Redis ourselves
|
||||
delete(docName, dbMeta, callback) {
|
||||
return callback()
|
||||
}
|
||||
}
|
145
services/document-updater/app/js/ShareJsUpdateManager.js
Normal file
145
services/document-updater/app/js/ShareJsUpdateManager.js
Normal file
|
@ -0,0 +1,145 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let ShareJsUpdateManager
|
||||
const ShareJsModel = require('./sharejs/server/model')
|
||||
const ShareJsDB = require('./ShareJsDB')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const Keys = require('./UpdateKeys')
|
||||
const { EventEmitter } = require('events')
|
||||
const util = require('util')
|
||||
const RealTimeRedisManager = require('./RealTimeRedisManager')
|
||||
const crypto = require('crypto')
|
||||
const metrics = require('./Metrics')
|
||||
const Errors = require('./Errors')
|
||||
|
||||
ShareJsModel.prototype = {}
|
||||
util.inherits(ShareJsModel, EventEmitter)
|
||||
|
||||
const MAX_AGE_OF_OP = 80
|
||||
|
||||
module.exports = ShareJsUpdateManager = {
|
||||
getNewShareJsModel(project_id, doc_id, lines, version) {
|
||||
const db = new ShareJsDB(project_id, doc_id, lines, version)
|
||||
const model = new ShareJsModel(db, {
|
||||
maxDocLength: Settings.max_doc_length,
|
||||
maximumAge: MAX_AGE_OF_OP,
|
||||
})
|
||||
model.db = db
|
||||
return model
|
||||
},
|
||||
|
||||
applyUpdate(project_id, doc_id, update, lines, version, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error, updatedDocLines) {}
|
||||
}
|
||||
logger.log({ project_id, doc_id, update }, 'applying sharejs updates')
|
||||
const jobs = []
|
||||
// record the update version before it is modified
|
||||
const incomingUpdateVersion = update.v
|
||||
// We could use a global model for all docs, but we're hitting issues with the
|
||||
// internal state of ShareJS not being accessible for clearing caches, and
|
||||
// getting stuck due to queued callbacks (line 260 of sharejs/server/model.coffee)
|
||||
// This adds a small but hopefully acceptable overhead (~12ms per 1000 updates on
|
||||
// my 2009 MBP).
|
||||
const model = this.getNewShareJsModel(project_id, doc_id, lines, version)
|
||||
this._listenForOps(model)
|
||||
const doc_key = Keys.combineProjectIdAndDocId(project_id, doc_id)
|
||||
return model.applyOp(doc_key, update, function (error) {
|
||||
if (error != null) {
|
||||
if (error === 'Op already submitted') {
|
||||
metrics.inc('sharejs.already-submitted')
|
||||
logger.warn(
|
||||
{ project_id, doc_id, update },
|
||||
'op has already been submitted'
|
||||
)
|
||||
update.dup = true
|
||||
ShareJsUpdateManager._sendOp(project_id, doc_id, update)
|
||||
} else if (/^Delete component/.test(error)) {
|
||||
metrics.inc('sharejs.delete-mismatch')
|
||||
logger.warn(
|
||||
{ project_id, doc_id, update, shareJsErr: error },
|
||||
'sharejs delete does not match'
|
||||
)
|
||||
error = new Errors.DeleteMismatchError(
|
||||
'Delete component does not match'
|
||||
)
|
||||
return callback(error)
|
||||
} else {
|
||||
metrics.inc('sharejs.other-error')
|
||||
return callback(error)
|
||||
}
|
||||
}
|
||||
logger.log({ project_id, doc_id, error }, 'applied update')
|
||||
return model.getSnapshot(doc_key, (error, data) => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
const docSizeAfter = data.snapshot.length
|
||||
if (docSizeAfter > Settings.max_doc_length) {
|
||||
const docSizeBefore = lines.join('\n').length
|
||||
const err = new Error(
|
||||
'blocking persistence of ShareJs update: doc size exceeds limits'
|
||||
)
|
||||
logger.error(
|
||||
{ project_id, doc_id, err, docSizeBefore, docSizeAfter },
|
||||
err.message
|
||||
)
|
||||
metrics.inc('sharejs.other-error')
|
||||
const publicError = 'Update takes doc over max doc size'
|
||||
return callback(publicError)
|
||||
}
|
||||
// only check hash when present and no other updates have been applied
|
||||
if (update.hash != null && incomingUpdateVersion === version) {
|
||||
const ourHash = ShareJsUpdateManager._computeHash(data.snapshot)
|
||||
if (ourHash !== update.hash) {
|
||||
metrics.inc('sharejs.hash-fail')
|
||||
return callback(new Error('Invalid hash'))
|
||||
} else {
|
||||
metrics.inc('sharejs.hash-pass', 0.001)
|
||||
}
|
||||
}
|
||||
const docLines = data.snapshot.split(/\r\n|\n|\r/)
|
||||
return callback(
|
||||
null,
|
||||
docLines,
|
||||
data.v,
|
||||
model.db.appliedOps[doc_key] || []
|
||||
)
|
||||
})
|
||||
})
|
||||
},
|
||||
|
||||
_listenForOps(model) {
|
||||
return model.on('applyOp', function (doc_key, opData) {
|
||||
const [project_id, doc_id] = Array.from(
|
||||
Keys.splitProjectIdAndDocId(doc_key)
|
||||
)
|
||||
return ShareJsUpdateManager._sendOp(project_id, doc_id, opData)
|
||||
})
|
||||
},
|
||||
|
||||
_sendOp(project_id, doc_id, op) {
|
||||
return RealTimeRedisManager.sendData({ project_id, doc_id, op })
|
||||
},
|
||||
|
||||
_computeHash(content) {
|
||||
return crypto
|
||||
.createHash('sha1')
|
||||
.update('blob ' + content.length + '\x00')
|
||||
.update(content, 'utf8')
|
||||
.digest('hex')
|
||||
},
|
||||
}
|
87
services/document-updater/app/js/SnapshotManager.js
Normal file
87
services/document-updater/app/js/SnapshotManager.js
Normal file
|
@ -0,0 +1,87 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let SnapshotManager
|
||||
const { db, ObjectId } = require('./mongodb')
|
||||
|
||||
module.exports = SnapshotManager = {
|
||||
recordSnapshot(
|
||||
project_id,
|
||||
doc_id,
|
||||
version,
|
||||
pathname,
|
||||
lines,
|
||||
ranges,
|
||||
callback
|
||||
) {
|
||||
try {
|
||||
project_id = ObjectId(project_id)
|
||||
doc_id = ObjectId(doc_id)
|
||||
} catch (error) {
|
||||
return callback(error)
|
||||
}
|
||||
db.docSnapshots.insertOne(
|
||||
{
|
||||
project_id,
|
||||
doc_id,
|
||||
version,
|
||||
lines,
|
||||
pathname,
|
||||
ranges: SnapshotManager.jsonRangesToMongo(ranges),
|
||||
ts: new Date(),
|
||||
},
|
||||
callback
|
||||
)
|
||||
},
|
||||
// Suggested indexes:
|
||||
// db.docSnapshots.createIndex({project_id:1, doc_id:1})
|
||||
// db.docSnapshots.createIndex({ts:1},{expiresAfterSeconds: 30*24*3600)) # expires after 30 days
|
||||
|
||||
jsonRangesToMongo(ranges) {
|
||||
if (ranges == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
const updateMetadata = function (metadata) {
|
||||
if ((metadata != null ? metadata.ts : undefined) != null) {
|
||||
metadata.ts = new Date(metadata.ts)
|
||||
}
|
||||
if ((metadata != null ? metadata.user_id : undefined) != null) {
|
||||
return (metadata.user_id = SnapshotManager._safeObjectId(
|
||||
metadata.user_id
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
for (const change of Array.from(ranges.changes || [])) {
|
||||
change.id = SnapshotManager._safeObjectId(change.id)
|
||||
updateMetadata(change.metadata)
|
||||
}
|
||||
for (const comment of Array.from(ranges.comments || [])) {
|
||||
comment.id = SnapshotManager._safeObjectId(comment.id)
|
||||
if ((comment.op != null ? comment.op.t : undefined) != null) {
|
||||
comment.op.t = SnapshotManager._safeObjectId(comment.op.t)
|
||||
}
|
||||
updateMetadata(comment.metadata)
|
||||
}
|
||||
return ranges
|
||||
},
|
||||
|
||||
_safeObjectId(data) {
|
||||
try {
|
||||
return ObjectId(data)
|
||||
} catch (error) {
|
||||
return data
|
||||
}
|
||||
},
|
||||
}
|
13
services/document-updater/app/js/UpdateKeys.js
Normal file
13
services/document-updater/app/js/UpdateKeys.js
Normal file
|
@ -0,0 +1,13 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
module.exports = {
|
||||
combineProjectIdAndDocId(project_id, doc_id) {
|
||||
return `${project_id}:${doc_id}`
|
||||
},
|
||||
splitProjectIdAndDocId(project_and_doc_id) {
|
||||
return project_and_doc_id.split(':')
|
||||
},
|
||||
}
|
417
services/document-updater/app/js/UpdateManager.js
Normal file
417
services/document-updater/app/js/UpdateManager.js
Normal file
|
@ -0,0 +1,417 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS201: Simplify complex destructure assignments
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
let UpdateManager
|
||||
const LockManager = require('./LockManager')
|
||||
const RedisManager = require('./RedisManager')
|
||||
const RealTimeRedisManager = require('./RealTimeRedisManager')
|
||||
const ShareJsUpdateManager = require('./ShareJsUpdateManager')
|
||||
const HistoryManager = require('./HistoryManager')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const _ = require('lodash')
|
||||
const async = require('async')
|
||||
const logger = require('logger-sharelatex')
|
||||
const Metrics = require('./Metrics')
|
||||
const Errors = require('./Errors')
|
||||
const DocumentManager = require('./DocumentManager')
|
||||
const RangesManager = require('./RangesManager')
|
||||
const SnapshotManager = require('./SnapshotManager')
|
||||
const Profiler = require('./Profiler')
|
||||
|
||||
module.exports = UpdateManager = {
|
||||
processOutstandingUpdates(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const timer = new Metrics.Timer('updateManager.processOutstandingUpdates')
|
||||
return UpdateManager.fetchAndApplyUpdates(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
timer.done()
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
processOutstandingUpdatesWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const profile = new Profiler('processOutstandingUpdatesWithLock', {
|
||||
project_id,
|
||||
doc_id,
|
||||
})
|
||||
return LockManager.tryLock(doc_id, (error, gotLock, lockValue) => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (!gotLock) {
|
||||
return callback()
|
||||
}
|
||||
profile.log('tryLock')
|
||||
return UpdateManager.processOutstandingUpdates(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return UpdateManager._handleErrorInsideLock(
|
||||
doc_id,
|
||||
lockValue,
|
||||
error,
|
||||
callback
|
||||
)
|
||||
}
|
||||
profile.log('processOutstandingUpdates')
|
||||
return LockManager.releaseLock(doc_id, lockValue, error => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
profile.log('releaseLock').end()
|
||||
return UpdateManager.continueProcessingUpdatesWithLock(
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
},
|
||||
|
||||
continueProcessingUpdatesWithLock(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return RealTimeRedisManager.getUpdatesLength(doc_id, (error, length) => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (length > 0) {
|
||||
return UpdateManager.processOutstandingUpdatesWithLock(
|
||||
project_id,
|
||||
doc_id,
|
||||
callback
|
||||
)
|
||||
} else {
|
||||
return callback()
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
fetchAndApplyUpdates(project_id, doc_id, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
const profile = new Profiler('fetchAndApplyUpdates', { project_id, doc_id })
|
||||
return RealTimeRedisManager.getPendingUpdatesForDoc(
|
||||
doc_id,
|
||||
(error, updates) => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
logger.log(
|
||||
{ project_id, doc_id, count: updates.length },
|
||||
'processing updates'
|
||||
)
|
||||
if (updates.length === 0) {
|
||||
return callback()
|
||||
}
|
||||
profile.log('getPendingUpdatesForDoc')
|
||||
const doUpdate = (update, cb) =>
|
||||
UpdateManager.applyUpdate(project_id, doc_id, update, function (err) {
|
||||
profile.log('applyUpdate')
|
||||
return cb(err)
|
||||
})
|
||||
const finalCallback = function (err) {
|
||||
profile.log('async done').end()
|
||||
return callback(err)
|
||||
}
|
||||
return async.eachSeries(updates, doUpdate, finalCallback)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
applyUpdate(project_id, doc_id, update, _callback) {
|
||||
if (_callback == null) {
|
||||
_callback = function (error) {}
|
||||
}
|
||||
const callback = function (error) {
|
||||
if (error != null) {
|
||||
RealTimeRedisManager.sendData({
|
||||
project_id,
|
||||
doc_id,
|
||||
error: error.message || error,
|
||||
})
|
||||
profile.log('sendData')
|
||||
}
|
||||
profile.end()
|
||||
return _callback(error)
|
||||
}
|
||||
|
||||
var profile = new Profiler('applyUpdate', { project_id, doc_id })
|
||||
UpdateManager._sanitizeUpdate(update)
|
||||
profile.log('sanitizeUpdate')
|
||||
return DocumentManager.getDoc(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error, lines, version, ranges, pathname, projectHistoryId) {
|
||||
profile.log('getDoc')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (lines == null || version == null) {
|
||||
return callback(
|
||||
new Errors.NotFoundError(`document not found: ${doc_id}`)
|
||||
)
|
||||
}
|
||||
const previousVersion = version
|
||||
return ShareJsUpdateManager.applyUpdate(
|
||||
project_id,
|
||||
doc_id,
|
||||
update,
|
||||
lines,
|
||||
version,
|
||||
function (error, updatedDocLines, version, appliedOps) {
|
||||
profile.log('sharejs.applyUpdate')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RangesManager.applyUpdate(
|
||||
project_id,
|
||||
doc_id,
|
||||
ranges,
|
||||
appliedOps,
|
||||
updatedDocLines,
|
||||
function (error, new_ranges, ranges_were_collapsed) {
|
||||
UpdateManager._addProjectHistoryMetadataToOps(
|
||||
appliedOps,
|
||||
pathname,
|
||||
projectHistoryId,
|
||||
lines
|
||||
)
|
||||
profile.log('RangesManager.applyUpdate')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return RedisManager.updateDocument(
|
||||
project_id,
|
||||
doc_id,
|
||||
updatedDocLines,
|
||||
version,
|
||||
appliedOps,
|
||||
new_ranges,
|
||||
update.meta,
|
||||
function (error, doc_ops_length, project_ops_length) {
|
||||
profile.log('RedisManager.updateDocument')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return HistoryManager.recordAndFlushHistoryOps(
|
||||
project_id,
|
||||
doc_id,
|
||||
appliedOps,
|
||||
doc_ops_length,
|
||||
project_ops_length,
|
||||
function (error) {
|
||||
profile.log('recordAndFlushHistoryOps')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
if (ranges_were_collapsed) {
|
||||
logger.log(
|
||||
{
|
||||
project_id,
|
||||
doc_id,
|
||||
previousVersion,
|
||||
lines,
|
||||
ranges,
|
||||
update,
|
||||
},
|
||||
'update collapsed some ranges, snapshotting previous content'
|
||||
)
|
||||
// Do this last, since it's a mongo call, and so potentially longest running
|
||||
// If it overruns the lock, it's ok, since all of our redis work is done
|
||||
return SnapshotManager.recordSnapshot(
|
||||
project_id,
|
||||
doc_id,
|
||||
previousVersion,
|
||||
pathname,
|
||||
lines,
|
||||
ranges,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
logger.error(
|
||||
{
|
||||
err: error,
|
||||
project_id,
|
||||
doc_id,
|
||||
version,
|
||||
lines,
|
||||
ranges,
|
||||
},
|
||||
'error recording snapshot'
|
||||
)
|
||||
return callback(error)
|
||||
} else {
|
||||
return callback()
|
||||
}
|
||||
}
|
||||
)
|
||||
} else {
|
||||
return callback()
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
},
|
||||
|
||||
lockUpdatesAndDo(method, project_id, doc_id, ...rest) {
|
||||
const adjustedLength = Math.max(rest.length, 1)
|
||||
const args = rest.slice(0, adjustedLength - 1)
|
||||
const callback = rest[adjustedLength - 1]
|
||||
const profile = new Profiler('lockUpdatesAndDo', { project_id, doc_id })
|
||||
return LockManager.getLock(doc_id, function (error, lockValue) {
|
||||
profile.log('getLock')
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return UpdateManager.processOutstandingUpdates(
|
||||
project_id,
|
||||
doc_id,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return UpdateManager._handleErrorInsideLock(
|
||||
doc_id,
|
||||
lockValue,
|
||||
error,
|
||||
callback
|
||||
)
|
||||
}
|
||||
profile.log('processOutstandingUpdates')
|
||||
return method(
|
||||
project_id,
|
||||
doc_id,
|
||||
...Array.from(args),
|
||||
function (error, ...response_args) {
|
||||
if (error != null) {
|
||||
return UpdateManager._handleErrorInsideLock(
|
||||
doc_id,
|
||||
lockValue,
|
||||
error,
|
||||
callback
|
||||
)
|
||||
}
|
||||
profile.log('method')
|
||||
return LockManager.releaseLock(
|
||||
doc_id,
|
||||
lockValue,
|
||||
function (error) {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
profile.log('releaseLock').end()
|
||||
callback(null, ...Array.from(response_args))
|
||||
// We held the lock for a while so updates might have queued up
|
||||
return UpdateManager.continueProcessingUpdatesWithLock(
|
||||
project_id,
|
||||
doc_id
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
},
|
||||
|
||||
_handleErrorInsideLock(doc_id, lockValue, original_error, callback) {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
return LockManager.releaseLock(doc_id, lockValue, lock_error =>
|
||||
callback(original_error)
|
||||
)
|
||||
},
|
||||
|
||||
_sanitizeUpdate(update) {
|
||||
// In Javascript, characters are 16-bits wide. It does not understand surrogates as characters.
|
||||
//
|
||||
// From Wikipedia (http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane):
|
||||
// "The High Surrogates (U+D800–U+DBFF) and Low Surrogate (U+DC00–U+DFFF) codes are reserved
|
||||
// for encoding non-BMP characters in UTF-16 by using a pair of 16-bit codes: one High Surrogate
|
||||
// and one Low Surrogate. A single surrogate code point will never be assigned a character.""
|
||||
//
|
||||
// The main offender seems to be \uD835 as a stand alone character, which would be the first
|
||||
// 16-bit character of a blackboard bold character (http://www.fileformat.info/info/unicode/char/1d400/index.htm).
|
||||
// Something must be going on client side that is screwing up the encoding and splitting the
|
||||
// two 16-bit characters so that \uD835 is standalone.
|
||||
for (const op of Array.from(update.op || [])) {
|
||||
if (op.i != null) {
|
||||
// Replace high and low surrogate characters with 'replacement character' (\uFFFD)
|
||||
op.i = op.i.replace(/[\uD800-\uDFFF]/g, '\uFFFD')
|
||||
}
|
||||
}
|
||||
return update
|
||||
},
|
||||
|
||||
_addProjectHistoryMetadataToOps(updates, pathname, projectHistoryId, lines) {
|
||||
let doc_length = _.reduce(lines, (chars, line) => chars + line.length, 0)
|
||||
doc_length += lines.length - 1 // count newline characters
|
||||
return updates.forEach(function (update) {
|
||||
update.projectHistoryId = projectHistoryId
|
||||
if (!update.meta) {
|
||||
update.meta = {}
|
||||
}
|
||||
update.meta.pathname = pathname
|
||||
update.meta.doc_length = doc_length
|
||||
// Each update may contain multiple ops, i.e.
|
||||
// [{
|
||||
// ops: [{i: "foo", p: 4}, {d: "bar", p:8}]
|
||||
// }, {
|
||||
// ops: [{d: "baz", p: 40}, {i: "qux", p:8}]
|
||||
// }]
|
||||
// We want to include the doc_length at the start of each update,
|
||||
// before it's ops are applied. However, we need to track any
|
||||
// changes to it for the next update.
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const op of Array.from(update.op)) {
|
||||
if (op.i != null) {
|
||||
doc_length += op.i.length
|
||||
}
|
||||
if (op.d != null) {
|
||||
result.push((doc_length -= op.d.length))
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
},
|
||||
}
|
37
services/document-updater/app/js/mongodb.js
Normal file
37
services/document-updater/app/js/mongodb.js
Normal file
|
@ -0,0 +1,37 @@
|
|||
const Settings = require('@overleaf/settings')
|
||||
const { MongoClient, ObjectId } = require('mongodb')
|
||||
|
||||
const clientPromise = MongoClient.connect(
|
||||
Settings.mongo.url,
|
||||
Settings.mongo.options
|
||||
)
|
||||
|
||||
async function healthCheck() {
|
||||
const internalDb = (await clientPromise).db()
|
||||
const res = await internalDb.command({ ping: 1 })
|
||||
if (!res.ok) {
|
||||
throw new Error('failed mongo ping')
|
||||
}
|
||||
}
|
||||
|
||||
let setupDbPromise
|
||||
async function waitForDb() {
|
||||
if (!setupDbPromise) {
|
||||
setupDbPromise = setupDb()
|
||||
}
|
||||
await setupDbPromise
|
||||
}
|
||||
|
||||
const db = {}
|
||||
async function setupDb() {
|
||||
const internalDb = (await clientPromise).db()
|
||||
|
||||
db.docSnapshots = internalDb.collection('docSnapshots')
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
db,
|
||||
ObjectId,
|
||||
healthCheck: require('util').callbackify(healthCheck),
|
||||
waitForDb,
|
||||
}
|
48
services/document-updater/app/js/sharejs/README.md
Normal file
48
services/document-updater/app/js/sharejs/README.md
Normal file
|
@ -0,0 +1,48 @@
|
|||
This directory contains all the operational transform code. Each file defines a type.
|
||||
|
||||
Most of the types in here are for testing or demonstration. The only types which are sent to the webclient
|
||||
are `text` and `json`.
|
||||
|
||||
|
||||
# An OT type
|
||||
|
||||
All OT types have the following fields:
|
||||
|
||||
`name`: _(string)_ Name of the type. Should match the filename.
|
||||
`create() -> snapshot`: Function which creates and returns a new document snapshot
|
||||
|
||||
`apply(snapshot, op) -> snapshot`: A function which creates a new document snapshot with the op applied
|
||||
`transform(op1, op2, side) -> op1'`: OT transform function.
|
||||
|
||||
Given op1, op2, `apply(s, op2, transform(op1, op2, 'left')) == apply(s, op1, transform(op2, op1, 'right'))`.
|
||||
|
||||
Transform and apply must never modify their arguments.
|
||||
|
||||
|
||||
Optional properties:
|
||||
|
||||
`tp2`: _(bool)_ True if the transform function supports TP2. This allows p2p architectures to work.
|
||||
`compose(op1, op2) -> op`: Create and return a new op which has the same effect as op1 + op2.
|
||||
`serialize(snapshot) -> JSON object`: Serialize a document to something we can JSON.stringify()
|
||||
`deserialize(object) -> snapshot`: Deserialize a JSON object into the document's internal snapshot format
|
||||
`prune(op1', op2, side) -> op1`: Inserse transform function. Only required for TP2 types.
|
||||
`normalize(op) -> op`: Fix up an op to make it valid. Eg, remove skips of size zero.
|
||||
`api`: _(object)_ Set of helper methods which will be mixed in to the client document object for manipulating documents. See below.
|
||||
|
||||
|
||||
# Examples
|
||||
|
||||
`count` and `simple` are two trivial OT type definitions if you want to take a look. JSON defines
|
||||
the ot-for-JSON type (see the wiki for documentation) and all the text types define different text
|
||||
implementations. (I still have no idea which one I like the most, and they're fun to write!)
|
||||
|
||||
|
||||
# API
|
||||
|
||||
Types can also define API functions. These methods are mixed into the client's Doc object when a document is created.
|
||||
You can use them to help construct ops programatically (so users don't need to understand how ops are structured).
|
||||
|
||||
For example, the three text types defined here (text, text-composable and text-tp2) all provide the text API, supplying
|
||||
`.insert()`, `.del()`, `.getLength` and `.getText` methods.
|
||||
|
||||
See text-api.coffee for an example.
|
37
services/document-updater/app/js/sharejs/count.js
Normal file
37
services/document-updater/app/js/sharejs/count.js
Normal file
|
@ -0,0 +1,37 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment]
|
||||
|
||||
exports.name = 'count'
|
||||
exports.create = () => 1
|
||||
|
||||
exports.apply = function (snapshot, op) {
|
||||
const [v, inc] = Array.from(op)
|
||||
if (snapshot !== v) {
|
||||
throw new Error(`Op ${v} != snapshot ${snapshot}`)
|
||||
}
|
||||
return snapshot + inc
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
exports.transform = function (op1, op2) {
|
||||
if (op1[0] !== op2[0]) {
|
||||
throw new Error(`Op1 ${op1[0]} != op2 ${op2[0]}`)
|
||||
}
|
||||
return [op1[0] + op2[1], op1[1]]
|
||||
}
|
||||
|
||||
exports.compose = function (op1, op2) {
|
||||
if (op1[0] + op1[1] !== op2[0]) {
|
||||
throw new Error(`Op1 ${op1} + 1 != op2 ${op2}`)
|
||||
}
|
||||
return [op1[0], op1[1] + op2[1]]
|
||||
}
|
||||
|
||||
exports.generateRandomOp = doc => [[doc, 1], doc + 1]
|
116
services/document-updater/app/js/sharejs/helpers.js
Normal file
116
services/document-updater/app/js/sharejs/helpers.js
Normal file
|
@ -0,0 +1,116 @@
|
|||
/* eslint-disable
|
||||
no-return-assign,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// These methods let you build a transform function from a transformComponent function
|
||||
// for OT types like text and JSON in which operations are lists of components
|
||||
// and transforming them requires N^2 work.
|
||||
|
||||
// Add transform and transformX functions for an OT type which has transformComponent defined.
|
||||
// transformComponent(destination array, component, other component, side)
|
||||
let bootstrapTransform
|
||||
exports._bt = bootstrapTransform = function (
|
||||
type,
|
||||
transformComponent,
|
||||
checkValidOp,
|
||||
append
|
||||
) {
|
||||
let transformX
|
||||
const transformComponentX = function (left, right, destLeft, destRight) {
|
||||
transformComponent(destLeft, left, right, 'left')
|
||||
return transformComponent(destRight, right, left, 'right')
|
||||
}
|
||||
|
||||
// Transforms rightOp by leftOp. Returns ['rightOp', clientOp']
|
||||
type.transformX =
|
||||
type.transformX =
|
||||
transformX =
|
||||
function (leftOp, rightOp) {
|
||||
checkValidOp(leftOp)
|
||||
checkValidOp(rightOp)
|
||||
|
||||
const newRightOp = []
|
||||
|
||||
for (let rightComponent of Array.from(rightOp)) {
|
||||
// Generate newLeftOp by composing leftOp by rightComponent
|
||||
const newLeftOp = []
|
||||
|
||||
let k = 0
|
||||
while (k < leftOp.length) {
|
||||
var l
|
||||
const nextC = []
|
||||
transformComponentX(leftOp[k], rightComponent, newLeftOp, nextC)
|
||||
k++
|
||||
|
||||
if (nextC.length === 1) {
|
||||
rightComponent = nextC[0]
|
||||
} else if (nextC.length === 0) {
|
||||
for (l of Array.from(leftOp.slice(k))) {
|
||||
append(newLeftOp, l)
|
||||
}
|
||||
rightComponent = null
|
||||
break
|
||||
} else {
|
||||
// Recurse.
|
||||
const [l_, r_] = Array.from(transformX(leftOp.slice(k), nextC))
|
||||
for (l of Array.from(l_)) {
|
||||
append(newLeftOp, l)
|
||||
}
|
||||
for (const r of Array.from(r_)) {
|
||||
append(newRightOp, r)
|
||||
}
|
||||
rightComponent = null
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (rightComponent != null) {
|
||||
append(newRightOp, rightComponent)
|
||||
}
|
||||
leftOp = newLeftOp
|
||||
}
|
||||
|
||||
return [leftOp, newRightOp]
|
||||
}
|
||||
|
||||
// Transforms op with specified type ('left' or 'right') by otherOp.
|
||||
return (type.transform = type.transform =
|
||||
function (op, otherOp, type) {
|
||||
let _
|
||||
if (type !== 'left' && type !== 'right') {
|
||||
throw new Error("type must be 'left' or 'right'")
|
||||
}
|
||||
|
||||
if (otherOp.length === 0) {
|
||||
return op
|
||||
}
|
||||
|
||||
// TODO: Benchmark with and without this line. I _think_ it'll make a big difference...?
|
||||
if (op.length === 1 && otherOp.length === 1) {
|
||||
return transformComponent([], op[0], otherOp[0], type)
|
||||
}
|
||||
|
||||
if (type === 'left') {
|
||||
let left
|
||||
;[left, _] = Array.from(transformX(op, otherOp))
|
||||
return left
|
||||
} else {
|
||||
let right
|
||||
;[_, right] = Array.from(transformX(otherOp, op))
|
||||
return right
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (typeof WEB === 'undefined') {
|
||||
exports.bootstrapTransform = bootstrapTransform
|
||||
}
|
25
services/document-updater/app/js/sharejs/index.js
Normal file
25
services/document-updater/app/js/sharejs/index.js
Normal file
|
@ -0,0 +1,25 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
|
||||
const register = function (file) {
|
||||
const type = require(file)
|
||||
exports[type.name] = type
|
||||
try {
|
||||
return require(`${file}-api`)
|
||||
} catch (error) {}
|
||||
}
|
||||
|
||||
// Import all the built-in types.
|
||||
register('./simple')
|
||||
register('./count')
|
||||
|
||||
register('./text')
|
||||
register('./text-composable')
|
||||
register('./text-tp2')
|
||||
|
||||
register('./json')
|
357
services/document-updater/app/js/sharejs/json-api.js
Normal file
357
services/document-updater/app/js/sharejs/json-api.js
Normal file
|
@ -0,0 +1,357 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// API for JSON OT
|
||||
|
||||
let json
|
||||
if (typeof WEB === 'undefined') {
|
||||
json = require('./json')
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
const { extendDoc } = exports
|
||||
exports.extendDoc = function (name, fn) {
|
||||
SubDoc.prototype[name] = fn
|
||||
return extendDoc(name, fn)
|
||||
}
|
||||
}
|
||||
|
||||
const depath = function (path) {
|
||||
if (path.length === 1 && path[0].constructor === Array) {
|
||||
return path[0]
|
||||
} else {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
class SubDoc {
|
||||
constructor(doc, path) {
|
||||
this.doc = doc
|
||||
this.path = path
|
||||
}
|
||||
|
||||
at(...path) {
|
||||
return this.doc.at(this.path.concat(depath(path)))
|
||||
}
|
||||
|
||||
get() {
|
||||
return this.doc.getAt(this.path)
|
||||
}
|
||||
|
||||
// for objects and lists
|
||||
set(value, cb) {
|
||||
return this.doc.setAt(this.path, value, cb)
|
||||
}
|
||||
|
||||
// for strings and lists.
|
||||
insert(pos, value, cb) {
|
||||
return this.doc.insertAt(this.path, pos, value, cb)
|
||||
}
|
||||
|
||||
// for strings
|
||||
del(pos, length, cb) {
|
||||
return this.doc.deleteTextAt(this.path, length, pos, cb)
|
||||
}
|
||||
|
||||
// for objects and lists
|
||||
remove(cb) {
|
||||
return this.doc.removeAt(this.path, cb)
|
||||
}
|
||||
|
||||
push(value, cb) {
|
||||
return this.insert(this.get().length, value, cb)
|
||||
}
|
||||
|
||||
move(from, to, cb) {
|
||||
return this.doc.moveAt(this.path, from, to, cb)
|
||||
}
|
||||
|
||||
add(amount, cb) {
|
||||
return this.doc.addAt(this.path, amount, cb)
|
||||
}
|
||||
|
||||
on(event, cb) {
|
||||
return this.doc.addListener(this.path, event, cb)
|
||||
}
|
||||
|
||||
removeListener(l) {
|
||||
return this.doc.removeListener(l)
|
||||
}
|
||||
|
||||
// text API compatibility
|
||||
getLength() {
|
||||
return this.get().length
|
||||
}
|
||||
|
||||
getText() {
|
||||
return this.get()
|
||||
}
|
||||
}
|
||||
|
||||
const traverse = function (snapshot, path) {
|
||||
const container = { data: snapshot }
|
||||
let key = 'data'
|
||||
let elem = container
|
||||
for (const p of Array.from(path)) {
|
||||
elem = elem[key]
|
||||
key = p
|
||||
if (typeof elem === 'undefined') {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
}
|
||||
return { elem, key }
|
||||
}
|
||||
|
||||
const pathEquals = function (p1, p2) {
|
||||
if (p1.length !== p2.length) {
|
||||
return false
|
||||
}
|
||||
for (let i = 0; i < p1.length; i++) {
|
||||
const e = p1[i]
|
||||
if (e !== p2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
json.api = {
|
||||
provides: { json: true },
|
||||
|
||||
at(...path) {
|
||||
return new SubDoc(this, depath(path))
|
||||
},
|
||||
|
||||
get() {
|
||||
return this.snapshot
|
||||
},
|
||||
set(value, cb) {
|
||||
return this.setAt([], value, cb)
|
||||
},
|
||||
|
||||
getAt(path) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
return elem[key]
|
||||
},
|
||||
|
||||
setAt(path, value, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = { p: path }
|
||||
if (elem.constructor === Array) {
|
||||
op.li = value
|
||||
if (typeof elem[key] !== 'undefined') {
|
||||
op.ld = elem[key]
|
||||
}
|
||||
} else if (typeof elem === 'object') {
|
||||
op.oi = value
|
||||
if (typeof elem[key] !== 'undefined') {
|
||||
op.od = elem[key]
|
||||
}
|
||||
} else {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
removeAt(path, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
if (typeof elem[key] === 'undefined') {
|
||||
throw new Error('no element at that path')
|
||||
}
|
||||
const op = { p: path }
|
||||
if (elem.constructor === Array) {
|
||||
op.ld = elem[key]
|
||||
} else if (typeof elem === 'object') {
|
||||
op.od = elem[key]
|
||||
} else {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
insertAt(path, pos, value, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = { p: path.concat(pos) }
|
||||
if (elem[key].constructor === Array) {
|
||||
op.li = value
|
||||
} else if (typeof elem[key] === 'string') {
|
||||
op.si = value
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
moveAt(path, from, to, cb) {
|
||||
const op = [{ p: path.concat(from), lm: to }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
addAt(path, amount, cb) {
|
||||
const op = [{ p: path, na: amount }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
deleteTextAt(path, length, pos, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = [{ p: path.concat(pos), sd: elem[key].slice(pos, pos + length) }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
addListener(path, event, cb) {
|
||||
const l = { path, event, cb }
|
||||
this._listeners.push(l)
|
||||
return l
|
||||
},
|
||||
removeListener(l) {
|
||||
const i = this._listeners.indexOf(l)
|
||||
if (i < 0) {
|
||||
return false
|
||||
}
|
||||
this._listeners.splice(i, 1)
|
||||
return true
|
||||
},
|
||||
_register() {
|
||||
this._listeners = []
|
||||
this.on('change', function (op) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
var i
|
||||
if (c.na !== undefined || c.si !== undefined || c.sd !== undefined) {
|
||||
// no change to structure
|
||||
continue
|
||||
}
|
||||
var to_remove = []
|
||||
for (i = 0; i < this._listeners.length; i++) {
|
||||
// Transform a dummy op by the incoming op to work out what
|
||||
// should happen to the listener.
|
||||
const l = this._listeners[i]
|
||||
const dummy = { p: l.path, na: 0 }
|
||||
const xformed = this.type.transformComponent([], dummy, c, 'left')
|
||||
if (xformed.length === 0) {
|
||||
// The op was transformed to noop, so we should delete the listener.
|
||||
to_remove.push(i)
|
||||
} else if (xformed.length === 1) {
|
||||
// The op remained, so grab its new path into the listener.
|
||||
l.path = xformed[0].p
|
||||
} else {
|
||||
throw new Error(
|
||||
"Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."
|
||||
)
|
||||
}
|
||||
}
|
||||
to_remove.sort((a, b) => b - a)
|
||||
result.push(
|
||||
(() => {
|
||||
const result1 = []
|
||||
for (i of Array.from(to_remove)) {
|
||||
result1.push(this._listeners.splice(i, 1))
|
||||
}
|
||||
return result1
|
||||
})()
|
||||
)
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
return this.on('remoteop', function (op) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (var c of Array.from(op)) {
|
||||
var match_path =
|
||||
c.na === undefined ? c.p.slice(0, c.p.length - 1) : c.p
|
||||
result.push(
|
||||
(() => {
|
||||
const result1 = []
|
||||
for (const { path, event, cb } of Array.from(this._listeners)) {
|
||||
var common
|
||||
if (pathEquals(path, match_path)) {
|
||||
switch (event) {
|
||||
case 'insert':
|
||||
if (c.li !== undefined && c.ld === undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.li))
|
||||
} else if (c.oi !== undefined && c.od === undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.oi))
|
||||
} else if (c.si !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.si))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'delete':
|
||||
if (c.li === undefined && c.ld !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.ld))
|
||||
} else if (c.oi === undefined && c.od !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.od))
|
||||
} else if (c.sd !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.sd))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'replace':
|
||||
if (c.li !== undefined && c.ld !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.ld, c.li))
|
||||
} else if (c.oi !== undefined && c.od !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.od, c.oi))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'move':
|
||||
if (c.lm !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.lm))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'add':
|
||||
if (c.na !== undefined) {
|
||||
result1.push(cb(c.na))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
default:
|
||||
result1.push(undefined)
|
||||
}
|
||||
} else if (
|
||||
(common = this.type.commonPath(match_path, path)) != null
|
||||
) {
|
||||
if (event === 'child op') {
|
||||
if (
|
||||
match_path.length === path.length &&
|
||||
path.length === common
|
||||
) {
|
||||
throw new Error(
|
||||
"paths match length and have commonality, but aren't equal?"
|
||||
)
|
||||
}
|
||||
const child_path = c.p.slice(common + 1)
|
||||
result1.push(cb(child_path, c))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
}
|
||||
return result1
|
||||
})()
|
||||
)
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
},
|
||||
}
|
630
services/document-updater/app/js/sharejs/json.js
Normal file
630
services/document-updater/app/js/sharejs/json.js
Normal file
|
@ -0,0 +1,630 @@
|
|||
/* eslint-disable
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-useless-catch,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is the implementation of the JSON OT type.
|
||||
//
|
||||
// Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations
|
||||
|
||||
let text
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
;({ text } = exports.types)
|
||||
} else {
|
||||
text = require('./text')
|
||||
}
|
||||
|
||||
const json = {}
|
||||
|
||||
json.name = 'json'
|
||||
|
||||
json.create = () => null
|
||||
|
||||
json.invertComponent = function (c) {
|
||||
const c_ = { p: c.p }
|
||||
if (c.si !== undefined) {
|
||||
c_.sd = c.si
|
||||
}
|
||||
if (c.sd !== undefined) {
|
||||
c_.si = c.sd
|
||||
}
|
||||
if (c.oi !== undefined) {
|
||||
c_.od = c.oi
|
||||
}
|
||||
if (c.od !== undefined) {
|
||||
c_.oi = c.od
|
||||
}
|
||||
if (c.li !== undefined) {
|
||||
c_.ld = c.li
|
||||
}
|
||||
if (c.ld !== undefined) {
|
||||
c_.li = c.ld
|
||||
}
|
||||
if (c.na !== undefined) {
|
||||
c_.na = -c.na
|
||||
}
|
||||
if (c.lm !== undefined) {
|
||||
c_.lm = c.p[c.p.length - 1]
|
||||
c_.p = c.p.slice(0, c.p.length - 1).concat([c.lm])
|
||||
}
|
||||
return c_
|
||||
}
|
||||
|
||||
json.invert = op =>
|
||||
Array.from(op.slice().reverse()).map(c => json.invertComponent(c))
|
||||
|
||||
json.checkValidOp = function (op) {}
|
||||
|
||||
const isArray = o => Object.prototype.toString.call(o) === '[object Array]'
|
||||
json.checkList = function (elem) {
|
||||
if (!isArray(elem)) {
|
||||
throw new Error('Referenced element not a list')
|
||||
}
|
||||
}
|
||||
|
||||
json.checkObj = function (elem) {
|
||||
if (elem.constructor !== Object) {
|
||||
throw new Error(
|
||||
`Referenced element not an object (it was ${JSON.stringify(elem)})`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
json.apply = function (snapshot, op) {
|
||||
json.checkValidOp(op)
|
||||
op = clone(op)
|
||||
|
||||
const container = { data: clone(snapshot) }
|
||||
|
||||
try {
|
||||
for (let i = 0; i < op.length; i++) {
|
||||
const c = op[i]
|
||||
let parent = null
|
||||
let parentkey = null
|
||||
let elem = container
|
||||
let key = 'data'
|
||||
|
||||
for (const p of Array.from(c.p)) {
|
||||
parent = elem
|
||||
parentkey = key
|
||||
elem = elem[key]
|
||||
key = p
|
||||
|
||||
if (parent == null) {
|
||||
throw new Error('Path invalid')
|
||||
}
|
||||
}
|
||||
|
||||
if (c.na !== undefined) {
|
||||
// Number add
|
||||
if (typeof elem[key] !== 'number') {
|
||||
throw new Error('Referenced element not a number')
|
||||
}
|
||||
elem[key] += c.na
|
||||
} else if (c.si !== undefined) {
|
||||
// String insert
|
||||
if (typeof elem !== 'string') {
|
||||
throw new Error(
|
||||
`Referenced element not a string (it was ${JSON.stringify(elem)})`
|
||||
)
|
||||
}
|
||||
parent[parentkey] = elem.slice(0, key) + c.si + elem.slice(key)
|
||||
} else if (c.sd !== undefined) {
|
||||
// String delete
|
||||
if (typeof elem !== 'string') {
|
||||
throw new Error('Referenced element not a string')
|
||||
}
|
||||
if (elem.slice(key, key + c.sd.length) !== c.sd) {
|
||||
throw new Error('Deleted string does not match')
|
||||
}
|
||||
parent[parentkey] = elem.slice(0, key) + elem.slice(key + c.sd.length)
|
||||
} else if (c.li !== undefined && c.ld !== undefined) {
|
||||
// List replace
|
||||
json.checkList(elem)
|
||||
|
||||
// Should check the list element matches c.ld
|
||||
elem[key] = c.li
|
||||
} else if (c.li !== undefined) {
|
||||
// List insert
|
||||
json.checkList(elem)
|
||||
|
||||
elem.splice(key, 0, c.li)
|
||||
} else if (c.ld !== undefined) {
|
||||
// List delete
|
||||
json.checkList(elem)
|
||||
|
||||
// Should check the list element matches c.ld here too.
|
||||
elem.splice(key, 1)
|
||||
} else if (c.lm !== undefined) {
|
||||
// List move
|
||||
json.checkList(elem)
|
||||
if (c.lm !== key) {
|
||||
const e = elem[key]
|
||||
// Remove it...
|
||||
elem.splice(key, 1)
|
||||
// And insert it back.
|
||||
elem.splice(c.lm, 0, e)
|
||||
}
|
||||
} else if (c.oi !== undefined) {
|
||||
// Object insert / replace
|
||||
json.checkObj(elem)
|
||||
|
||||
// Should check that elem[key] == c.od
|
||||
elem[key] = c.oi
|
||||
} else if (c.od !== undefined) {
|
||||
// Object delete
|
||||
json.checkObj(elem)
|
||||
|
||||
// Should check that elem[key] == c.od
|
||||
delete elem[key]
|
||||
} else {
|
||||
throw new Error('invalid / missing instruction in op')
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// TODO: Roll back all already applied changes. Write tests before implementing this code.
|
||||
throw error
|
||||
}
|
||||
|
||||
return container.data
|
||||
}
|
||||
|
||||
// Checks if two paths, p1 and p2 match.
|
||||
json.pathMatches = function (p1, p2, ignoreLast) {
|
||||
if (p1.length !== p2.length) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (let i = 0; i < p1.length; i++) {
|
||||
const p = p1[i]
|
||||
if (p !== p2[i] && (!ignoreLast || i !== p1.length - 1)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
json.append = function (dest, c) {
|
||||
let last
|
||||
c = clone(c)
|
||||
if (
|
||||
dest.length !== 0 &&
|
||||
json.pathMatches(c.p, (last = dest[dest.length - 1]).p)
|
||||
) {
|
||||
if (last.na !== undefined && c.na !== undefined) {
|
||||
return (dest[dest.length - 1] = { p: last.p, na: last.na + c.na })
|
||||
} else if (
|
||||
last.li !== undefined &&
|
||||
c.li === undefined &&
|
||||
c.ld === last.li
|
||||
) {
|
||||
// insert immediately followed by delete becomes a noop.
|
||||
if (last.ld !== undefined) {
|
||||
// leave the delete part of the replace
|
||||
return delete last.li
|
||||
} else {
|
||||
return dest.pop()
|
||||
}
|
||||
} else if (
|
||||
last.od !== undefined &&
|
||||
last.oi === undefined &&
|
||||
c.oi !== undefined &&
|
||||
c.od === undefined
|
||||
) {
|
||||
return (last.oi = c.oi)
|
||||
} else if (c.lm !== undefined && c.p[c.p.length - 1] === c.lm) {
|
||||
return null // don't do anything
|
||||
} else {
|
||||
return dest.push(c)
|
||||
}
|
||||
} else {
|
||||
return dest.push(c)
|
||||
}
|
||||
}
|
||||
|
||||
json.compose = function (op1, op2) {
|
||||
json.checkValidOp(op1)
|
||||
json.checkValidOp(op2)
|
||||
|
||||
const newOp = clone(op1)
|
||||
for (const c of Array.from(op2)) {
|
||||
json.append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
json.normalize = function (op) {
|
||||
const newOp = []
|
||||
|
||||
if (!isArray(op)) {
|
||||
op = [op]
|
||||
}
|
||||
|
||||
for (const c of Array.from(op)) {
|
||||
if (c.p == null) {
|
||||
c.p = []
|
||||
}
|
||||
json.append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming
|
||||
// we have browser support for JSON.
|
||||
// http://jsperf.com/cloning-an-object/12
|
||||
var clone = o => JSON.parse(JSON.stringify(o))
|
||||
|
||||
json.commonPath = function (p1, p2) {
|
||||
p1 = p1.slice()
|
||||
p2 = p2.slice()
|
||||
p1.unshift('data')
|
||||
p2.unshift('data')
|
||||
p1 = p1.slice(0, p1.length - 1)
|
||||
p2 = p2.slice(0, p2.length - 1)
|
||||
if (p2.length === 0) {
|
||||
return -1
|
||||
}
|
||||
let i = 0
|
||||
while (p1[i] === p2[i] && i < p1.length) {
|
||||
i++
|
||||
if (i === p2.length) {
|
||||
return i - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// transform c so it applies to a document with otherC applied.
|
||||
json.transformComponent = function (dest, c, otherC, type) {
|
||||
let oc
|
||||
c = clone(c)
|
||||
if (c.na !== undefined) {
|
||||
c.p.push(0)
|
||||
}
|
||||
if (otherC.na !== undefined) {
|
||||
otherC.p.push(0)
|
||||
}
|
||||
|
||||
const common = json.commonPath(c.p, otherC.p)
|
||||
const common2 = json.commonPath(otherC.p, c.p)
|
||||
|
||||
const cplength = c.p.length
|
||||
const otherCplength = otherC.p.length
|
||||
|
||||
if (c.na !== undefined) {
|
||||
c.p.pop()
|
||||
} // hax
|
||||
if (otherC.na !== undefined) {
|
||||
otherC.p.pop()
|
||||
}
|
||||
|
||||
if (otherC.na) {
|
||||
if (
|
||||
common2 != null &&
|
||||
otherCplength >= cplength &&
|
||||
otherC.p[common2] === c.p[common2]
|
||||
) {
|
||||
if (c.ld !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.ld = json.apply(clone(c.ld), [oc])
|
||||
} else if (c.od !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.od = json.apply(clone(c.od), [oc])
|
||||
}
|
||||
}
|
||||
json.append(dest, c)
|
||||
return dest
|
||||
}
|
||||
|
||||
if (
|
||||
common2 != null &&
|
||||
otherCplength > cplength &&
|
||||
c.p[common2] === otherC.p[common2]
|
||||
) {
|
||||
// transform based on c
|
||||
if (c.ld !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.ld = json.apply(clone(c.ld), [oc])
|
||||
} else if (c.od !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.od = json.apply(clone(c.od), [oc])
|
||||
}
|
||||
}
|
||||
|
||||
if (common != null) {
|
||||
let from, p, to
|
||||
const commonOperand = cplength === otherCplength
|
||||
// transform based on otherC
|
||||
if (otherC.na !== undefined) {
|
||||
// this case is handled above due to icky path hax
|
||||
} else if (otherC.si !== undefined || otherC.sd !== undefined) {
|
||||
// String op vs string op - pass through to text type
|
||||
if (c.si !== undefined || c.sd !== undefined) {
|
||||
if (!commonOperand) {
|
||||
throw new Error('must be a string?')
|
||||
}
|
||||
|
||||
// Convert an op component to a text op component
|
||||
const convert = function (component) {
|
||||
const newC = { p: component.p[component.p.length - 1] }
|
||||
if (component.si) {
|
||||
newC.i = component.si
|
||||
} else {
|
||||
newC.d = component.sd
|
||||
}
|
||||
return newC
|
||||
}
|
||||
|
||||
const tc1 = convert(c)
|
||||
const tc2 = convert(otherC)
|
||||
|
||||
const res = []
|
||||
text._tc(res, tc1, tc2, type)
|
||||
for (const tc of Array.from(res)) {
|
||||
const jc = { p: c.p.slice(0, common) }
|
||||
jc.p.push(tc.p)
|
||||
if (tc.i != null) {
|
||||
jc.si = tc.i
|
||||
}
|
||||
if (tc.d != null) {
|
||||
jc.sd = tc.d
|
||||
}
|
||||
json.append(dest, jc)
|
||||
}
|
||||
return dest
|
||||
}
|
||||
} else if (otherC.li !== undefined && otherC.ld !== undefined) {
|
||||
if (otherC.p[common] === c.p[common]) {
|
||||
// noop
|
||||
if (!commonOperand) {
|
||||
// we're below the deleted element, so -> noop
|
||||
return dest
|
||||
} else if (c.ld !== undefined) {
|
||||
// we're trying to delete the same element, -> noop
|
||||
if (c.li !== undefined && type === 'left') {
|
||||
// we're both replacing one element with another. only one can
|
||||
// survive!
|
||||
c.ld = clone(otherC.li)
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.li !== undefined) {
|
||||
if (
|
||||
c.li !== undefined &&
|
||||
c.ld === undefined &&
|
||||
commonOperand &&
|
||||
c.p[common] === otherC.p[common]
|
||||
) {
|
||||
// in li vs. li, left wins.
|
||||
if (type === 'right') {
|
||||
c.p[common]++
|
||||
}
|
||||
} else if (otherC.p[common] <= c.p[common]) {
|
||||
c.p[common]++
|
||||
}
|
||||
|
||||
if (c.lm !== undefined) {
|
||||
if (commonOperand) {
|
||||
// otherC edits the same list we edit
|
||||
if (otherC.p[common] <= c.lm) {
|
||||
c.lm++
|
||||
}
|
||||
}
|
||||
}
|
||||
// changing c.from is handled above.
|
||||
} else if (otherC.ld !== undefined) {
|
||||
if (c.lm !== undefined) {
|
||||
if (commonOperand) {
|
||||
if (otherC.p[common] === c.p[common]) {
|
||||
// they deleted the thing we're trying to move
|
||||
return dest
|
||||
}
|
||||
// otherC edits the same list we edit
|
||||
p = otherC.p[common]
|
||||
from = c.p[common]
|
||||
to = c.lm
|
||||
if (p < to || (p === to && from < to)) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (otherC.p[common] < c.p[common]) {
|
||||
c.p[common]--
|
||||
} else if (otherC.p[common] === c.p[common]) {
|
||||
if (otherCplength < cplength) {
|
||||
// we're below the deleted element, so -> noop
|
||||
return dest
|
||||
} else if (c.ld !== undefined) {
|
||||
if (c.li !== undefined) {
|
||||
// we're replacing, they're deleting. we become an insert.
|
||||
delete c.ld
|
||||
} else {
|
||||
// we're trying to delete the same element, -> noop
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.lm !== undefined) {
|
||||
if (c.lm !== undefined && cplength === otherCplength) {
|
||||
// lm vs lm, here we go!
|
||||
from = c.p[common]
|
||||
to = c.lm
|
||||
const otherFrom = otherC.p[common]
|
||||
const otherTo = otherC.lm
|
||||
if (otherFrom !== otherTo) {
|
||||
// if otherFrom == otherTo, we don't need to change our op.
|
||||
|
||||
// where did my thing go?
|
||||
if (from === otherFrom) {
|
||||
// they moved it! tie break.
|
||||
if (type === 'left') {
|
||||
c.p[common] = otherTo
|
||||
if (from === to) {
|
||||
// ugh
|
||||
c.lm = otherTo
|
||||
}
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
} else {
|
||||
// they moved around it
|
||||
if (from > otherFrom) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (from > otherTo) {
|
||||
c.p[common]++
|
||||
} else if (from === otherTo) {
|
||||
if (otherFrom > otherTo) {
|
||||
c.p[common]++
|
||||
if (from === to) {
|
||||
// ugh, again
|
||||
c.lm++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// step 2: where am i going to put it?
|
||||
if (to > otherFrom) {
|
||||
c.lm--
|
||||
} else if (to === otherFrom) {
|
||||
if (to > from) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
if (to > otherTo) {
|
||||
c.lm++
|
||||
} else if (to === otherTo) {
|
||||
// if we're both moving in the same direction, tie break
|
||||
if (
|
||||
(otherTo > otherFrom && to > from) ||
|
||||
(otherTo < otherFrom && to < from)
|
||||
) {
|
||||
if (type === 'right') {
|
||||
c.lm++
|
||||
}
|
||||
} else {
|
||||
if (to > from) {
|
||||
c.lm++
|
||||
} else if (to === otherFrom) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (c.li !== undefined && c.ld === undefined && commonOperand) {
|
||||
// li
|
||||
from = otherC.p[common]
|
||||
to = otherC.lm
|
||||
p = c.p[common]
|
||||
if (p > from) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (p > to) {
|
||||
c.p[common]++
|
||||
}
|
||||
} else {
|
||||
// ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath
|
||||
// the lm
|
||||
//
|
||||
// i.e. things care about where their item is after the move.
|
||||
from = otherC.p[common]
|
||||
to = otherC.lm
|
||||
p = c.p[common]
|
||||
if (p === from) {
|
||||
c.p[common] = to
|
||||
} else {
|
||||
if (p > from) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (p > to) {
|
||||
c.p[common]++
|
||||
} else if (p === to) {
|
||||
if (from > to) {
|
||||
c.p[common]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.oi !== undefined && otherC.od !== undefined) {
|
||||
if (c.p[common] === otherC.p[common]) {
|
||||
if (c.oi !== undefined && commonOperand) {
|
||||
// we inserted where someone else replaced
|
||||
if (type === 'right') {
|
||||
// left wins
|
||||
return dest
|
||||
} else {
|
||||
// we win, make our op replace what they inserted
|
||||
c.od = otherC.oi
|
||||
}
|
||||
} else {
|
||||
// -> noop if the other component is deleting the same object (or any
|
||||
// parent)
|
||||
return dest
|
||||
}
|
||||
}
|
||||
} else if (otherC.oi !== undefined) {
|
||||
if (c.oi !== undefined && c.p[common] === otherC.p[common]) {
|
||||
// left wins if we try to insert at the same place
|
||||
if (type === 'left') {
|
||||
json.append(dest, { p: c.p, od: otherC.oi })
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
} else if (otherC.od !== undefined) {
|
||||
if (c.p[common] === otherC.p[common]) {
|
||||
if (!commonOperand) {
|
||||
return dest
|
||||
}
|
||||
if (c.oi !== undefined) {
|
||||
delete c.od
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json.append(dest, c)
|
||||
return dest
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
if (!exports.types) {
|
||||
exports.types = {}
|
||||
}
|
||||
|
||||
// This is kind of awful - come up with a better way to hook this helper code up.
|
||||
exports._bt(json, json.transformComponent, json.checkValidOp, json.append)
|
||||
|
||||
// [] is used to prevent closure from renaming types.text
|
||||
exports.types.json = json
|
||||
} else {
|
||||
module.exports = json
|
||||
|
||||
require('./helpers').bootstrapTransform(
|
||||
json,
|
||||
json.transformComponent,
|
||||
json.checkValidOp,
|
||||
json.append
|
||||
)
|
||||
}
|
883
services/document-updater/app/js/sharejs/model.js
Normal file
883
services/document-updater/app/js/sharejs/model.js
Normal file
|
@ -0,0 +1,883 @@
|
|||
/* eslint-disable
|
||||
no-console,
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS104: Avoid inline assignments
|
||||
* DS204: Change includes calls to have a more natural evaluation order
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// The model of all the ops. Responsible for applying & transforming remote deltas
|
||||
// and managing the storage layer.
|
||||
//
|
||||
// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||
|
||||
let Model
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const queue = require('./syncqueue')
|
||||
const types = require('../types')
|
||||
|
||||
const isArray = o => Object.prototype.toString.call(o) === '[object Array]'
|
||||
|
||||
// This constructor creates a new Model object. There will be one model object
|
||||
// per server context.
|
||||
//
|
||||
// The model object is responsible for a lot of things:
|
||||
//
|
||||
// - It manages the interactions with the database
|
||||
// - It maintains (in memory) a set of all active documents
|
||||
// - It calls out to the OT functions when necessary
|
||||
//
|
||||
// The model is an event emitter. It emits the following events:
|
||||
//
|
||||
// create(docName, data): A document has been created with the specified name & data
|
||||
module.exports = Model = function (db, options) {
|
||||
// db can be null if the user doesn't want persistance.
|
||||
|
||||
let getOps
|
||||
if (!(this instanceof Model)) {
|
||||
return new Model(db, options)
|
||||
}
|
||||
|
||||
const model = this
|
||||
|
||||
if (options == null) {
|
||||
options = {}
|
||||
}
|
||||
|
||||
// This is a cache of 'live' documents.
|
||||
//
|
||||
// The cache is a map from docName -> {
|
||||
// ops:[{op, meta}]
|
||||
// snapshot
|
||||
// type
|
||||
// v
|
||||
// meta
|
||||
// eventEmitter
|
||||
// reapTimer
|
||||
// committedVersion: v
|
||||
// snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||
// dbMeta: database specific data
|
||||
// opQueue: syncQueue for processing ops
|
||||
// }
|
||||
//
|
||||
// The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||
// of them if we're using a memory store).
|
||||
//
|
||||
// Documents are stored in this set so long as the document has been accessed in
|
||||
// the last few seconds (options.reapTime) OR at least one client has the document
|
||||
// open. I don't know if I should keep open (but not being edited) documents live -
|
||||
// maybe if a client has a document open but the document isn't being edited, I should
|
||||
// flush it from the cache.
|
||||
//
|
||||
// In any case, the API to model is designed such that if we want to change that later
|
||||
// it should be pretty easy to do so without any external-to-the-model code changes.
|
||||
const docs = {}
|
||||
|
||||
// This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||
// cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||
// are added to the callback list and called when db.getSnapshot() returns.
|
||||
//
|
||||
// callback(error, snapshot data)
|
||||
const awaitingGetSnapshot = {}
|
||||
|
||||
// The time that documents which no clients have open will stay in the cache.
|
||||
// Should be > 0.
|
||||
if (options.reapTime == null) {
|
||||
options.reapTime = 3000
|
||||
}
|
||||
|
||||
// The number of operations the cache holds before reusing the space
|
||||
if (options.numCachedOps == null) {
|
||||
options.numCachedOps = 10
|
||||
}
|
||||
|
||||
// This option forces documents to be reaped, even when there's no database backend.
|
||||
// This is useful when you don't care about persistance and don't want to gradually
|
||||
// fill memory.
|
||||
//
|
||||
// You might want to set reapTime to a day or something.
|
||||
if (options.forceReaping == null) {
|
||||
options.forceReaping = false
|
||||
}
|
||||
|
||||
// Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||
// to the database every ~20 submitted ops.
|
||||
if (options.opsBeforeCommit == null) {
|
||||
options.opsBeforeCommit = 20
|
||||
}
|
||||
|
||||
// It takes some processing time to transform client ops. The server will punt ops back to the
|
||||
// client to transform if they're too old.
|
||||
if (options.maximumAge == null) {
|
||||
options.maximumAge = 40
|
||||
}
|
||||
|
||||
// **** Cache API methods
|
||||
|
||||
// Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||
// for a single document. This contains the logic for transforming & applying ops.
|
||||
const makeOpQueue = (docName, doc) =>
|
||||
queue(function (opData, callback) {
|
||||
if (!(opData.v >= 0)) {
|
||||
return callback('Version missing')
|
||||
}
|
||||
if (opData.v > doc.v) {
|
||||
return callback('Op at future version')
|
||||
}
|
||||
|
||||
// Punt the transforming work back to the client if the op is too old.
|
||||
if (opData.v + options.maximumAge < doc.v) {
|
||||
return callback('Op too old')
|
||||
}
|
||||
|
||||
if (!opData.meta) {
|
||||
opData.meta = {}
|
||||
}
|
||||
opData.meta.ts = Date.now()
|
||||
|
||||
// We'll need to transform the op to the current version of the document. This
|
||||
// calls the callback immediately if opVersion == doc.v.
|
||||
return getOps(docName, opData.v, doc.v, function (error, ops) {
|
||||
let snapshot
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (doc.v - opData.v !== ops.length) {
|
||||
// This should never happen. It indicates that we didn't get all the ops we
|
||||
// asked for. Its important that the submitted op is correctly transformed.
|
||||
console.error(
|
||||
`Could not get old ops in model for document ${docName}`
|
||||
)
|
||||
console.error(
|
||||
`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`
|
||||
)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
try {
|
||||
// If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||
for (const oldOp of Array.from(ops)) {
|
||||
// Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||
// If the id matches, we reject it. The client can also detect the op has been submitted
|
||||
// already if it sees its own previous id in the ops it sees when it does catchup.
|
||||
if (
|
||||
oldOp.meta.source &&
|
||||
opData.dupIfSource &&
|
||||
Array.from(opData.dupIfSource).includes(oldOp.meta.source)
|
||||
) {
|
||||
return callback('Op already submitted')
|
||||
}
|
||||
|
||||
opData.op = doc.type.transform(opData.op, oldOp.op, 'left')
|
||||
opData.v++
|
||||
}
|
||||
} catch (error1) {
|
||||
error = error1
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
snapshot = doc.type.apply(doc.snapshot, opData.op)
|
||||
} catch (error2) {
|
||||
error = error2
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
|
||||
// The op data should be at the current version, and the new document data should be at
|
||||
// the next version.
|
||||
//
|
||||
// This should never happen in practice, but its a nice little check to make sure everything
|
||||
// is hunky-dory.
|
||||
if (opData.v !== doc.v) {
|
||||
// This should never happen.
|
||||
console.error(
|
||||
'Version mismatch detected in model. File a ticket - this is a bug.'
|
||||
)
|
||||
console.error(`Expecting ${opData.v} == ${doc.v}`)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
// newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||
const writeOp =
|
||||
(db != null ? db.writeOp : undefined) ||
|
||||
((docName, newOpData, callback) => callback())
|
||||
|
||||
return writeOp(docName, opData, function (error) {
|
||||
if (error) {
|
||||
// The user should probably know about this.
|
||||
console.warn(`Error writing ops to database: ${error}`)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'writeOp', o => o.writeOp())
|
||||
|
||||
// This is needed when we emit the 'change' event, below.
|
||||
const oldSnapshot = doc.snapshot
|
||||
|
||||
// All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||
// and (maybe!) save a new document snapshot to the database.
|
||||
|
||||
doc.v = opData.v + 1
|
||||
doc.snapshot = snapshot
|
||||
|
||||
doc.ops.push(opData)
|
||||
if (db && doc.ops.length > options.numCachedOps) {
|
||||
doc.ops.shift()
|
||||
}
|
||||
|
||||
model.emit('applyOp', docName, opData, snapshot, oldSnapshot)
|
||||
doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot)
|
||||
|
||||
// The callback is called with the version of the document at which the op was applied.
|
||||
// This is the op.v after transformation, and its doc.v - 1.
|
||||
callback(null, opData.v)
|
||||
|
||||
// I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||
//
|
||||
// The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||
// is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||
// ops or something. (Configurable with doc.committedVersion)
|
||||
if (
|
||||
!doc.snapshotWriteLock &&
|
||||
doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||
) {
|
||||
return tryWriteSnapshot(docName, function (error) {
|
||||
if (error) {
|
||||
return console.warn(
|
||||
`Error writing snapshot ${error}. This is nonfatal`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Add the data for the given docName to the cache. The named document shouldn't already
|
||||
// exist in the doc set.
|
||||
//
|
||||
// Returns the new doc.
|
||||
const add = function (docName, error, data, committedVersion, ops, dbMeta) {
|
||||
let callback, doc
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
delete awaitingGetSnapshot[docName]
|
||||
|
||||
if (error) {
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(error)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
doc = docs[docName] = {
|
||||
snapshot: data.snapshot,
|
||||
v: data.v,
|
||||
type: data.type,
|
||||
meta: data.meta,
|
||||
|
||||
// Cache of ops
|
||||
ops: ops || [],
|
||||
|
||||
eventEmitter: new EventEmitter(),
|
||||
|
||||
// Timer before the document will be invalidated from the cache (if the document has no
|
||||
// listeners)
|
||||
reapTimer: null,
|
||||
|
||||
// Version of the snapshot thats in the database
|
||||
committedVersion: committedVersion != null ? committedVersion : data.v,
|
||||
snapshotWriteLock: false,
|
||||
dbMeta,
|
||||
}
|
||||
|
||||
doc.opQueue = makeOpQueue(docName, doc)
|
||||
|
||||
refreshReapingTimeout(docName)
|
||||
model.emit('add', docName, data)
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(null, doc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
// This is a little helper wrapper around db.getOps. It does two things:
|
||||
//
|
||||
// - If there's no database set, it returns an error to the callback
|
||||
// - It adds version numbers to each op returned from the database
|
||||
// (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||
const getOpsInternal = function (docName, start, end, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document does not exist')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return db.getOps(docName, start, end, function (error, ops) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
let v = start
|
||||
for (const op of Array.from(ops)) {
|
||||
op.v = v++
|
||||
}
|
||||
|
||||
return typeof callback === 'function' ? callback(null, ops) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// Load the named document into the cache. This function is re-entrant.
|
||||
//
|
||||
// The callback is called with (error, doc)
|
||||
const load = function (docName, callback) {
|
||||
if (docs[docName]) {
|
||||
// The document is already loaded. Return immediately.
|
||||
__guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot'))
|
||||
return callback(null, docs[docName])
|
||||
}
|
||||
|
||||
// We're a memory store. If we don't have it, nobody does.
|
||||
if (!db) {
|
||||
return callback('Document does not exist')
|
||||
}
|
||||
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
|
||||
// The document is being loaded already. Add ourselves as a callback.
|
||||
if (callbacks) {
|
||||
return callbacks.push(callback)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'cacheMiss', o1 =>
|
||||
o1.cacheMiss('getSnapshot')
|
||||
)
|
||||
|
||||
// The document isn't loaded and isn't being loaded. Load it.
|
||||
awaitingGetSnapshot[docName] = [callback]
|
||||
return db.getSnapshot(docName, function (error, data, dbMeta) {
|
||||
if (error) {
|
||||
return add(docName, error)
|
||||
}
|
||||
|
||||
const type = types[data.type]
|
||||
if (!type) {
|
||||
console.warn(`Type '${data.type}' missing`)
|
||||
return callback('Type not found')
|
||||
}
|
||||
data.type = type
|
||||
|
||||
const committedVersion = data.v
|
||||
|
||||
// The server can close without saving the most recent document snapshot.
|
||||
// In this case, there are extra ops which need to be applied before
|
||||
// returning the snapshot.
|
||||
return getOpsInternal(docName, data.v, null, function (error, ops) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`)
|
||||
|
||||
try {
|
||||
for (const op of Array.from(ops)) {
|
||||
data.snapshot = type.apply(data.snapshot, op.op)
|
||||
data.v++
|
||||
}
|
||||
} catch (e) {
|
||||
// This should never happen - it indicates that whats in the
|
||||
// database is invalid.
|
||||
console.error(`Op data invalid for ${docName}: ${e.stack}`)
|
||||
return callback('Op data invalid')
|
||||
}
|
||||
}
|
||||
|
||||
model.emit('load', docName, data)
|
||||
return add(docName, error, data, committedVersion, ops, dbMeta)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||
// a document, it is loaded from the database and stored.
|
||||
//
|
||||
// Documents are stored so long as either:
|
||||
// - They have been accessed within the past #{PERIOD}
|
||||
// - At least one client has the document open
|
||||
var refreshReapingTimeout = function (docName) {
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
return
|
||||
}
|
||||
|
||||
// I want to let the clients list be updated before this is called.
|
||||
return process.nextTick(function () {
|
||||
// This is an awkward way to find out the number of clients on a document. If this
|
||||
// causes performance issues, add a numClients field to the document.
|
||||
//
|
||||
// The first check is because its possible that between refreshReapingTimeout being called and this
|
||||
// event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||
if (
|
||||
doc === docs[docName] &&
|
||||
doc.eventEmitter.listeners('op').length === 0 &&
|
||||
(db || options.forceReaping) &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
let reapTimer
|
||||
clearTimeout(doc.reapTimer)
|
||||
return (doc.reapTimer = reapTimer =
|
||||
setTimeout(
|
||||
() =>
|
||||
tryWriteSnapshot(docName, function () {
|
||||
// If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||
// in the middle of applying an operation, don't reap.
|
||||
if (
|
||||
docs[docName].reapTimer === reapTimer &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
return delete docs[docName]
|
||||
}
|
||||
}),
|
||||
options.reapTime
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var tryWriteSnapshot = function (docName, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
const doc = docs[docName]
|
||||
|
||||
// The doc is closed
|
||||
if (!doc) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
// The document is already saved.
|
||||
if (doc.committedVersion === doc.v) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (doc.snapshotWriteLock) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Another snapshot write is in progress')
|
||||
: undefined
|
||||
}
|
||||
|
||||
doc.snapshotWriteLock = true
|
||||
|
||||
__guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot())
|
||||
|
||||
const writeSnapshot =
|
||||
(db != null ? db.writeSnapshot : undefined) ||
|
||||
((docName, docData, dbMeta, callback) => callback())
|
||||
|
||||
const data = {
|
||||
v: doc.v,
|
||||
meta: doc.meta,
|
||||
snapshot: doc.snapshot,
|
||||
// The database doesn't know about object types.
|
||||
type: doc.type.name,
|
||||
}
|
||||
|
||||
// Commit snapshot.
|
||||
return writeSnapshot(docName, data, doc.dbMeta, function (error, dbMeta) {
|
||||
doc.snapshotWriteLock = false
|
||||
|
||||
// We have to use data.v here because the version in the doc could
|
||||
// have been updated between the call to writeSnapshot() and now.
|
||||
doc.committedVersion = data.v
|
||||
doc.dbMeta = dbMeta
|
||||
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// *** Model interface methods
|
||||
|
||||
// Create a new document.
|
||||
//
|
||||
// data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||
this.create = function (docName, type, meta, callback) {
|
||||
if (typeof meta === 'function') {
|
||||
;[meta, callback] = Array.from([{}, meta])
|
||||
}
|
||||
|
||||
if (docName.match(/\//)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Invalid document name')
|
||||
: undefined
|
||||
}
|
||||
if (docs[docName]) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document already exists')
|
||||
: undefined
|
||||
}
|
||||
|
||||
if (typeof type === 'string') {
|
||||
type = types[type]
|
||||
}
|
||||
if (!type) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Type not found')
|
||||
: undefined
|
||||
}
|
||||
|
||||
const data = {
|
||||
snapshot: type.create(),
|
||||
type: type.name,
|
||||
meta: meta || {},
|
||||
v: 0,
|
||||
}
|
||||
|
||||
const done = function (error, dbMeta) {
|
||||
// dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
// From here on we'll store the object version of the type name.
|
||||
data.type = type
|
||||
add(docName, null, data, 0, [], dbMeta)
|
||||
model.emit('create', docName, data)
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.create(docName, data, done)
|
||||
} else {
|
||||
return done()
|
||||
}
|
||||
}
|
||||
|
||||
// Perminantly deletes the specified document.
|
||||
// If listeners are attached, they are removed.
|
||||
//
|
||||
// The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||
// document was deleted.
|
||||
//
|
||||
// WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||
// deletion. Subsequent op submissions will fail).
|
||||
this.delete = function (docName, callback) {
|
||||
const doc = docs[docName]
|
||||
|
||||
if (doc) {
|
||||
clearTimeout(doc.reapTimer)
|
||||
delete docs[docName]
|
||||
}
|
||||
|
||||
const done = function (error) {
|
||||
if (!error) {
|
||||
model.emit('delete', docName)
|
||||
}
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.delete(docName, doc != null ? doc.dbMeta : undefined, done)
|
||||
} else {
|
||||
return done(!doc ? 'Document does not exist' : undefined)
|
||||
}
|
||||
}
|
||||
|
||||
// This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||
//
|
||||
// end can be null. This means 'get me all ops from start'.
|
||||
//
|
||||
// Each op returned is in the form {op:o, meta:m, v:version}.
|
||||
//
|
||||
// Callback is called with (error, [ops])
|
||||
//
|
||||
// If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||
// its awkward to figure out whether or not the document exists for things
|
||||
// like the redis database backend. I guess its a bit gross having this inconsistant
|
||||
// with the other DB calls, but its certainly convenient.
|
||||
//
|
||||
// Use getVersion() to determine if a document actually exists, if thats what you're
|
||||
// after.
|
||||
this.getOps = getOps = function (docName, start, end, callback) {
|
||||
// getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||
if (!(start >= 0)) {
|
||||
throw new Error('start must be 0+')
|
||||
}
|
||||
|
||||
if (typeof end === 'function') {
|
||||
;[end, callback] = Array.from([null, end])
|
||||
}
|
||||
|
||||
const ops = docs[docName] != null ? docs[docName].ops : undefined
|
||||
|
||||
if (ops) {
|
||||
const version = docs[docName].v
|
||||
|
||||
// Ops contains an array of ops. The last op in the list is the last op applied
|
||||
if (end == null) {
|
||||
end = version
|
||||
}
|
||||
start = Math.min(start, end)
|
||||
|
||||
if (start === end) {
|
||||
return callback(null, [])
|
||||
}
|
||||
|
||||
// Base is the version number of the oldest op we have cached
|
||||
const base = version - ops.length
|
||||
|
||||
// If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||
if (start >= base || db === null) {
|
||||
refreshReapingTimeout(docName)
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheHit('getOps')
|
||||
}
|
||||
|
||||
return callback(null, ops.slice(start - base, end - base))
|
||||
}
|
||||
}
|
||||
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheMiss('getOps')
|
||||
}
|
||||
|
||||
return getOpsInternal(docName, start, end, callback)
|
||||
}
|
||||
|
||||
// Gets the snapshot data for the specified document.
|
||||
// getSnapshot(docName, callback)
|
||||
// Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||
this.getSnapshot = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(
|
||||
error,
|
||||
doc
|
||||
? { v: doc.v, type: doc.type, snapshot: doc.snapshot, meta: doc.meta }
|
||||
: undefined
|
||||
)
|
||||
)
|
||||
|
||||
// Gets the latest version # of the document.
|
||||
// getVersion(docName, callback)
|
||||
// callback is called with (error, version).
|
||||
this.getVersion = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(error, doc != null ? doc.v : undefined)
|
||||
)
|
||||
|
||||
// Apply an op to the specified document.
|
||||
// The callback is passed (error, applied version #)
|
||||
// opData = {op:op, v:v, meta:metadata}
|
||||
//
|
||||
// Ops are queued before being applied so that the following code applies op C before op B:
|
||||
// model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||
// model.applyOp 'doc', OPC
|
||||
this.applyOp = (
|
||||
docName,
|
||||
opData,
|
||||
callback // All the logic for this is in makeOpQueue, above.
|
||||
) =>
|
||||
load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
return process.nextTick(() =>
|
||||
doc.opQueue(opData, function (error, newVersion) {
|
||||
refreshReapingTimeout(docName)
|
||||
return typeof callback === 'function'
|
||||
? callback(error, newVersion)
|
||||
: undefined
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
// TODO: store (some) metadata in DB
|
||||
// TODO: op and meta should be combineable in the op that gets sent
|
||||
this.applyMetaOp = function (docName, metaOpData, callback) {
|
||||
const { path, value } = metaOpData.meta
|
||||
|
||||
if (!isArray(path)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('path should be an array')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error != null) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
} else {
|
||||
let applied = false
|
||||
switch (path[0]) {
|
||||
case 'shout':
|
||||
doc.eventEmitter.emit('op', metaOpData)
|
||||
applied = true
|
||||
break
|
||||
}
|
||||
|
||||
if (applied) {
|
||||
model.emit('applyMetaOp', docName, path, value)
|
||||
}
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Listen to all ops from the specified version. If version is in the past, all
|
||||
// ops since that version are sent immediately to the listener.
|
||||
//
|
||||
// The callback is called once the listener is attached, but before any ops have been passed
|
||||
// to the listener.
|
||||
//
|
||||
// This will _not_ edit the document metadata.
|
||||
//
|
||||
// If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||
// might change in a future version.
|
||||
//
|
||||
// version is the document version at which the document is opened. It can be left out if you want to open
|
||||
// the document at the most recent version.
|
||||
//
|
||||
// listener is called with (opData) each time an op is applied.
|
||||
//
|
||||
// callback(error, openedVersion)
|
||||
this.listen = function (docName, version, listener, callback) {
|
||||
if (typeof version === 'function') {
|
||||
;[version, listener, callback] = Array.from([null, version, listener])
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
clearTimeout(doc.reapTimer)
|
||||
|
||||
if (version != null) {
|
||||
return getOps(docName, version, null, function (error, data) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
doc.eventEmitter.on('op', listener)
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, version)
|
||||
}
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const op of Array.from(data)) {
|
||||
var needle
|
||||
listener(op)
|
||||
|
||||
// The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||
// This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||
if (
|
||||
((needle = listener),
|
||||
!Array.from(doc.eventEmitter.listeners('op')).includes(needle))
|
||||
) {
|
||||
break
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
} else {
|
||||
// Version is null / undefined. Just add the listener.
|
||||
doc.eventEmitter.on('op', listener)
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Remove a listener for a particular document.
|
||||
//
|
||||
// removeListener(docName, listener)
|
||||
//
|
||||
// This is synchronous.
|
||||
this.removeListener = function (docName, listener) {
|
||||
// The document should already be loaded.
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
throw new Error('removeListener called but document not loaded')
|
||||
}
|
||||
|
||||
doc.eventEmitter.removeListener('op', listener)
|
||||
return refreshReapingTimeout(docName)
|
||||
}
|
||||
|
||||
// Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||
// sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||
this.flush = function (callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
let pendingWrites = 0
|
||||
|
||||
for (const docName in docs) {
|
||||
const doc = docs[docName]
|
||||
if (doc.committedVersion < doc.v) {
|
||||
pendingWrites++
|
||||
// I'm hoping writeSnapshot will always happen in another thread.
|
||||
tryWriteSnapshot(docName, () =>
|
||||
process.nextTick(function () {
|
||||
pendingWrites--
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// If nothing was queued, terminate immediately.
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||
this.closeDb = function () {
|
||||
__guardMethod__(db, 'close', o => o.close())
|
||||
return (db = null)
|
||||
}
|
||||
}
|
||||
|
||||
// Model inherits from EventEmitter.
|
||||
Model.prototype = new EventEmitter()
|
||||
|
||||
function __guardMethod__(obj, methodName, transform) {
|
||||
if (
|
||||
typeof obj !== 'undefined' &&
|
||||
obj !== null &&
|
||||
typeof obj[methodName] === 'function'
|
||||
) {
|
||||
return transform(obj, methodName)
|
||||
} else {
|
||||
return undefined
|
||||
}
|
||||
}
|
890
services/document-updater/app/js/sharejs/server/model.js
Normal file
890
services/document-updater/app/js/sharejs/server/model.js
Normal file
|
@ -0,0 +1,890 @@
|
|||
/* eslint-disable
|
||||
no-console,
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS104: Avoid inline assignments
|
||||
* DS204: Change includes calls to have a more natural evaluation order
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// The model of all the ops. Responsible for applying & transforming remote deltas
|
||||
// and managing the storage layer.
|
||||
//
|
||||
// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||
|
||||
let Model
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const queue = require('./syncqueue')
|
||||
const types = require('../types')
|
||||
|
||||
const isArray = o => Object.prototype.toString.call(o) === '[object Array]'
|
||||
|
||||
// This constructor creates a new Model object. There will be one model object
|
||||
// per server context.
|
||||
//
|
||||
// The model object is responsible for a lot of things:
|
||||
//
|
||||
// - It manages the interactions with the database
|
||||
// - It maintains (in memory) a set of all active documents
|
||||
// - It calls out to the OT functions when necessary
|
||||
//
|
||||
// The model is an event emitter. It emits the following events:
|
||||
//
|
||||
// create(docName, data): A document has been created with the specified name & data
|
||||
module.exports = Model = function (db, options) {
|
||||
// db can be null if the user doesn't want persistance.
|
||||
|
||||
let getOps
|
||||
if (!(this instanceof Model)) {
|
||||
return new Model(db, options)
|
||||
}
|
||||
|
||||
const model = this
|
||||
|
||||
if (options == null) {
|
||||
options = {}
|
||||
}
|
||||
|
||||
// This is a cache of 'live' documents.
|
||||
//
|
||||
// The cache is a map from docName -> {
|
||||
// ops:[{op, meta}]
|
||||
// snapshot
|
||||
// type
|
||||
// v
|
||||
// meta
|
||||
// eventEmitter
|
||||
// reapTimer
|
||||
// committedVersion: v
|
||||
// snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||
// dbMeta: database specific data
|
||||
// opQueue: syncQueue for processing ops
|
||||
// }
|
||||
//
|
||||
// The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||
// of them if we're using a memory store).
|
||||
//
|
||||
// Documents are stored in this set so long as the document has been accessed in
|
||||
// the last few seconds (options.reapTime) OR at least one client has the document
|
||||
// open. I don't know if I should keep open (but not being edited) documents live -
|
||||
// maybe if a client has a document open but the document isn't being edited, I should
|
||||
// flush it from the cache.
|
||||
//
|
||||
// In any case, the API to model is designed such that if we want to change that later
|
||||
// it should be pretty easy to do so without any external-to-the-model code changes.
|
||||
const docs = {}
|
||||
|
||||
// This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||
// cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||
// are added to the callback list and called when db.getSnapshot() returns.
|
||||
//
|
||||
// callback(error, snapshot data)
|
||||
const awaitingGetSnapshot = {}
|
||||
|
||||
// The time that documents which no clients have open will stay in the cache.
|
||||
// Should be > 0.
|
||||
if (options.reapTime == null) {
|
||||
options.reapTime = 3000
|
||||
}
|
||||
|
||||
// The number of operations the cache holds before reusing the space
|
||||
if (options.numCachedOps == null) {
|
||||
options.numCachedOps = 10
|
||||
}
|
||||
|
||||
// This option forces documents to be reaped, even when there's no database backend.
|
||||
// This is useful when you don't care about persistance and don't want to gradually
|
||||
// fill memory.
|
||||
//
|
||||
// You might want to set reapTime to a day or something.
|
||||
if (options.forceReaping == null) {
|
||||
options.forceReaping = false
|
||||
}
|
||||
|
||||
// Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||
// to the database every ~20 submitted ops.
|
||||
if (options.opsBeforeCommit == null) {
|
||||
options.opsBeforeCommit = 20
|
||||
}
|
||||
|
||||
// It takes some processing time to transform client ops. The server will punt ops back to the
|
||||
// client to transform if they're too old.
|
||||
if (options.maximumAge == null) {
|
||||
options.maximumAge = 40
|
||||
}
|
||||
|
||||
// **** Cache API methods
|
||||
|
||||
// Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||
// for a single document. This contains the logic for transforming & applying ops.
|
||||
const makeOpQueue = (docName, doc) =>
|
||||
queue(function (opData, callback) {
|
||||
if (!(opData.v >= 0)) {
|
||||
return callback('Version missing')
|
||||
}
|
||||
if (opData.v > doc.v) {
|
||||
return callback('Op at future version')
|
||||
}
|
||||
|
||||
// Punt the transforming work back to the client if the op is too old.
|
||||
if (opData.v + options.maximumAge < doc.v) {
|
||||
return callback('Op too old')
|
||||
}
|
||||
|
||||
if (!opData.meta) {
|
||||
opData.meta = {}
|
||||
}
|
||||
opData.meta.ts = Date.now()
|
||||
|
||||
// We'll need to transform the op to the current version of the document. This
|
||||
// calls the callback immediately if opVersion == doc.v.
|
||||
return getOps(docName, opData.v, doc.v, function (error, ops) {
|
||||
let snapshot
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (doc.v - opData.v !== ops.length) {
|
||||
// This should never happen. It indicates that we didn't get all the ops we
|
||||
// asked for. Its important that the submitted op is correctly transformed.
|
||||
console.error(
|
||||
`Could not get old ops in model for document ${docName}`
|
||||
)
|
||||
console.error(
|
||||
`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`
|
||||
)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
try {
|
||||
// If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||
for (const oldOp of Array.from(ops)) {
|
||||
// Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||
// If the id matches, we reject it. The client can also detect the op has been submitted
|
||||
// already if it sees its own previous id in the ops it sees when it does catchup.
|
||||
if (
|
||||
oldOp.meta.source &&
|
||||
opData.dupIfSource &&
|
||||
Array.from(opData.dupIfSource).includes(oldOp.meta.source)
|
||||
) {
|
||||
return callback('Op already submitted')
|
||||
}
|
||||
|
||||
opData.op = doc.type.transform(opData.op, oldOp.op, 'left')
|
||||
opData.v++
|
||||
}
|
||||
} catch (error1) {
|
||||
error = error1
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
snapshot = doc.type.apply(doc.snapshot, opData.op)
|
||||
} catch (error2) {
|
||||
error = error2
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
|
||||
if (
|
||||
options.maxDocLength != null &&
|
||||
doc.snapshot.length > options.maxDocLength
|
||||
) {
|
||||
return callback('Update takes doc over max doc size')
|
||||
}
|
||||
|
||||
// The op data should be at the current version, and the new document data should be at
|
||||
// the next version.
|
||||
//
|
||||
// This should never happen in practice, but its a nice little check to make sure everything
|
||||
// is hunky-dory.
|
||||
if (opData.v !== doc.v) {
|
||||
// This should never happen.
|
||||
console.error(
|
||||
'Version mismatch detected in model. File a ticket - this is a bug.'
|
||||
)
|
||||
console.error(`Expecting ${opData.v} == ${doc.v}`)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
// newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||
const writeOp =
|
||||
(db != null ? db.writeOp : undefined) ||
|
||||
((docName, newOpData, callback) => callback())
|
||||
|
||||
return writeOp(docName, opData, function (error) {
|
||||
if (error) {
|
||||
// The user should probably know about this.
|
||||
console.warn(`Error writing ops to database: ${error}`)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'writeOp', o => o.writeOp())
|
||||
|
||||
// This is needed when we emit the 'change' event, below.
|
||||
const oldSnapshot = doc.snapshot
|
||||
|
||||
// All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||
// and (maybe!) save a new document snapshot to the database.
|
||||
|
||||
doc.v = opData.v + 1
|
||||
doc.snapshot = snapshot
|
||||
|
||||
doc.ops.push(opData)
|
||||
if (db && doc.ops.length > options.numCachedOps) {
|
||||
doc.ops.shift()
|
||||
}
|
||||
|
||||
model.emit('applyOp', docName, opData, snapshot, oldSnapshot)
|
||||
doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot)
|
||||
|
||||
// The callback is called with the version of the document at which the op was applied.
|
||||
// This is the op.v after transformation, and its doc.v - 1.
|
||||
callback(null, opData.v)
|
||||
|
||||
// I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||
//
|
||||
// The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||
// is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||
// ops or something. (Configurable with doc.committedVersion)
|
||||
if (
|
||||
!doc.snapshotWriteLock &&
|
||||
doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||
) {
|
||||
return tryWriteSnapshot(docName, function (error) {
|
||||
if (error) {
|
||||
return console.warn(
|
||||
`Error writing snapshot ${error}. This is nonfatal`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Add the data for the given docName to the cache. The named document shouldn't already
|
||||
// exist in the doc set.
|
||||
//
|
||||
// Returns the new doc.
|
||||
const add = function (docName, error, data, committedVersion, ops, dbMeta) {
|
||||
let callback, doc
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
delete awaitingGetSnapshot[docName]
|
||||
|
||||
if (error) {
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(error)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
doc = docs[docName] = {
|
||||
snapshot: data.snapshot,
|
||||
v: data.v,
|
||||
type: data.type,
|
||||
meta: data.meta,
|
||||
|
||||
// Cache of ops
|
||||
ops: ops || [],
|
||||
|
||||
eventEmitter: new EventEmitter(),
|
||||
|
||||
// Timer before the document will be invalidated from the cache (if the document has no
|
||||
// listeners)
|
||||
reapTimer: null,
|
||||
|
||||
// Version of the snapshot thats in the database
|
||||
committedVersion: committedVersion != null ? committedVersion : data.v,
|
||||
snapshotWriteLock: false,
|
||||
dbMeta,
|
||||
}
|
||||
|
||||
doc.opQueue = makeOpQueue(docName, doc)
|
||||
|
||||
refreshReapingTimeout(docName)
|
||||
model.emit('add', docName, data)
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(null, doc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
// This is a little helper wrapper around db.getOps. It does two things:
|
||||
//
|
||||
// - If there's no database set, it returns an error to the callback
|
||||
// - It adds version numbers to each op returned from the database
|
||||
// (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||
const getOpsInternal = function (docName, start, end, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document does not exist')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return db.getOps(docName, start, end, function (error, ops) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
let v = start
|
||||
for (const op of Array.from(ops)) {
|
||||
op.v = v++
|
||||
}
|
||||
|
||||
return typeof callback === 'function' ? callback(null, ops) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// Load the named document into the cache. This function is re-entrant.
|
||||
//
|
||||
// The callback is called with (error, doc)
|
||||
const load = function (docName, callback) {
|
||||
if (docs[docName]) {
|
||||
// The document is already loaded. Return immediately.
|
||||
__guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot'))
|
||||
return callback(null, docs[docName])
|
||||
}
|
||||
|
||||
// We're a memory store. If we don't have it, nobody does.
|
||||
if (!db) {
|
||||
return callback('Document does not exist')
|
||||
}
|
||||
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
|
||||
// The document is being loaded already. Add ourselves as a callback.
|
||||
if (callbacks) {
|
||||
return callbacks.push(callback)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'cacheMiss', o1 =>
|
||||
o1.cacheMiss('getSnapshot')
|
||||
)
|
||||
|
||||
// The document isn't loaded and isn't being loaded. Load it.
|
||||
awaitingGetSnapshot[docName] = [callback]
|
||||
return db.getSnapshot(docName, function (error, data, dbMeta) {
|
||||
if (error) {
|
||||
return add(docName, error)
|
||||
}
|
||||
|
||||
const type = types[data.type]
|
||||
if (!type) {
|
||||
console.warn(`Type '${data.type}' missing`)
|
||||
return callback('Type not found')
|
||||
}
|
||||
data.type = type
|
||||
|
||||
const committedVersion = data.v
|
||||
|
||||
// The server can close without saving the most recent document snapshot.
|
||||
// In this case, there are extra ops which need to be applied before
|
||||
// returning the snapshot.
|
||||
return getOpsInternal(docName, data.v, null, function (error, ops) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`)
|
||||
|
||||
try {
|
||||
for (const op of Array.from(ops)) {
|
||||
data.snapshot = type.apply(data.snapshot, op.op)
|
||||
data.v++
|
||||
}
|
||||
} catch (e) {
|
||||
// This should never happen - it indicates that whats in the
|
||||
// database is invalid.
|
||||
console.error(`Op data invalid for ${docName}: ${e.stack}`)
|
||||
return callback('Op data invalid')
|
||||
}
|
||||
}
|
||||
|
||||
model.emit('load', docName, data)
|
||||
return add(docName, error, data, committedVersion, ops, dbMeta)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||
// a document, it is loaded from the database and stored.
|
||||
//
|
||||
// Documents are stored so long as either:
|
||||
// - They have been accessed within the past #{PERIOD}
|
||||
// - At least one client has the document open
|
||||
var refreshReapingTimeout = function (docName) {
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
return
|
||||
}
|
||||
|
||||
// I want to let the clients list be updated before this is called.
|
||||
return process.nextTick(function () {
|
||||
// This is an awkward way to find out the number of clients on a document. If this
|
||||
// causes performance issues, add a numClients field to the document.
|
||||
//
|
||||
// The first check is because its possible that between refreshReapingTimeout being called and this
|
||||
// event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||
if (
|
||||
doc === docs[docName] &&
|
||||
doc.eventEmitter.listeners('op').length === 0 &&
|
||||
(db || options.forceReaping) &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
let reapTimer
|
||||
clearTimeout(doc.reapTimer)
|
||||
return (doc.reapTimer = reapTimer =
|
||||
setTimeout(
|
||||
() =>
|
||||
tryWriteSnapshot(docName, function () {
|
||||
// If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||
// in the middle of applying an operation, don't reap.
|
||||
if (
|
||||
docs[docName].reapTimer === reapTimer &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
return delete docs[docName]
|
||||
}
|
||||
}),
|
||||
options.reapTime
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var tryWriteSnapshot = function (docName, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
const doc = docs[docName]
|
||||
|
||||
// The doc is closed
|
||||
if (!doc) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
// The document is already saved.
|
||||
if (doc.committedVersion === doc.v) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (doc.snapshotWriteLock) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Another snapshot write is in progress')
|
||||
: undefined
|
||||
}
|
||||
|
||||
doc.snapshotWriteLock = true
|
||||
|
||||
__guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot())
|
||||
|
||||
const writeSnapshot =
|
||||
(db != null ? db.writeSnapshot : undefined) ||
|
||||
((docName, docData, dbMeta, callback) => callback())
|
||||
|
||||
const data = {
|
||||
v: doc.v,
|
||||
meta: doc.meta,
|
||||
snapshot: doc.snapshot,
|
||||
// The database doesn't know about object types.
|
||||
type: doc.type.name,
|
||||
}
|
||||
|
||||
// Commit snapshot.
|
||||
return writeSnapshot(docName, data, doc.dbMeta, function (error, dbMeta) {
|
||||
doc.snapshotWriteLock = false
|
||||
|
||||
// We have to use data.v here because the version in the doc could
|
||||
// have been updated between the call to writeSnapshot() and now.
|
||||
doc.committedVersion = data.v
|
||||
doc.dbMeta = dbMeta
|
||||
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// *** Model interface methods
|
||||
|
||||
// Create a new document.
|
||||
//
|
||||
// data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||
this.create = function (docName, type, meta, callback) {
|
||||
if (typeof meta === 'function') {
|
||||
;[meta, callback] = Array.from([{}, meta])
|
||||
}
|
||||
|
||||
if (docName.match(/\//)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Invalid document name')
|
||||
: undefined
|
||||
}
|
||||
if (docs[docName]) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document already exists')
|
||||
: undefined
|
||||
}
|
||||
|
||||
if (typeof type === 'string') {
|
||||
type = types[type]
|
||||
}
|
||||
if (!type) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Type not found')
|
||||
: undefined
|
||||
}
|
||||
|
||||
const data = {
|
||||
snapshot: type.create(),
|
||||
type: type.name,
|
||||
meta: meta || {},
|
||||
v: 0,
|
||||
}
|
||||
|
||||
const done = function (error, dbMeta) {
|
||||
// dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
// From here on we'll store the object version of the type name.
|
||||
data.type = type
|
||||
add(docName, null, data, 0, [], dbMeta)
|
||||
model.emit('create', docName, data)
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.create(docName, data, done)
|
||||
} else {
|
||||
return done()
|
||||
}
|
||||
}
|
||||
|
||||
// Perminantly deletes the specified document.
|
||||
// If listeners are attached, they are removed.
|
||||
//
|
||||
// The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||
// document was deleted.
|
||||
//
|
||||
// WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||
// deletion. Subsequent op submissions will fail).
|
||||
this.delete = function (docName, callback) {
|
||||
const doc = docs[docName]
|
||||
|
||||
if (doc) {
|
||||
clearTimeout(doc.reapTimer)
|
||||
delete docs[docName]
|
||||
}
|
||||
|
||||
const done = function (error) {
|
||||
if (!error) {
|
||||
model.emit('delete', docName)
|
||||
}
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.delete(docName, doc != null ? doc.dbMeta : undefined, done)
|
||||
} else {
|
||||
return done(!doc ? 'Document does not exist' : undefined)
|
||||
}
|
||||
}
|
||||
|
||||
// This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||
//
|
||||
// end can be null. This means 'get me all ops from start'.
|
||||
//
|
||||
// Each op returned is in the form {op:o, meta:m, v:version}.
|
||||
//
|
||||
// Callback is called with (error, [ops])
|
||||
//
|
||||
// If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||
// its awkward to figure out whether or not the document exists for things
|
||||
// like the redis database backend. I guess its a bit gross having this inconsistant
|
||||
// with the other DB calls, but its certainly convenient.
|
||||
//
|
||||
// Use getVersion() to determine if a document actually exists, if thats what you're
|
||||
// after.
|
||||
this.getOps = getOps = function (docName, start, end, callback) {
|
||||
// getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||
if (!(start >= 0)) {
|
||||
throw new Error('start must be 0+')
|
||||
}
|
||||
|
||||
if (typeof end === 'function') {
|
||||
;[end, callback] = Array.from([null, end])
|
||||
}
|
||||
|
||||
const ops = docs[docName] != null ? docs[docName].ops : undefined
|
||||
|
||||
if (ops) {
|
||||
const version = docs[docName].v
|
||||
|
||||
// Ops contains an array of ops. The last op in the list is the last op applied
|
||||
if (end == null) {
|
||||
end = version
|
||||
}
|
||||
start = Math.min(start, end)
|
||||
|
||||
if (start === end) {
|
||||
return callback(null, [])
|
||||
}
|
||||
|
||||
// Base is the version number of the oldest op we have cached
|
||||
const base = version - ops.length
|
||||
|
||||
// If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||
if (start >= base || db === null) {
|
||||
refreshReapingTimeout(docName)
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheHit('getOps')
|
||||
}
|
||||
|
||||
return callback(null, ops.slice(start - base, end - base))
|
||||
}
|
||||
}
|
||||
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheMiss('getOps')
|
||||
}
|
||||
|
||||
return getOpsInternal(docName, start, end, callback)
|
||||
}
|
||||
|
||||
// Gets the snapshot data for the specified document.
|
||||
// getSnapshot(docName, callback)
|
||||
// Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||
this.getSnapshot = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(
|
||||
error,
|
||||
doc
|
||||
? { v: doc.v, type: doc.type, snapshot: doc.snapshot, meta: doc.meta }
|
||||
: undefined
|
||||
)
|
||||
)
|
||||
|
||||
// Gets the latest version # of the document.
|
||||
// getVersion(docName, callback)
|
||||
// callback is called with (error, version).
|
||||
this.getVersion = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(error, doc != null ? doc.v : undefined)
|
||||
)
|
||||
|
||||
// Apply an op to the specified document.
|
||||
// The callback is passed (error, applied version #)
|
||||
// opData = {op:op, v:v, meta:metadata}
|
||||
//
|
||||
// Ops are queued before being applied so that the following code applies op C before op B:
|
||||
// model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||
// model.applyOp 'doc', OPC
|
||||
this.applyOp = (
|
||||
docName,
|
||||
opData,
|
||||
callback // All the logic for this is in makeOpQueue, above.
|
||||
) =>
|
||||
load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
return process.nextTick(() =>
|
||||
doc.opQueue(opData, function (error, newVersion) {
|
||||
refreshReapingTimeout(docName)
|
||||
return typeof callback === 'function'
|
||||
? callback(error, newVersion)
|
||||
: undefined
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
// TODO: store (some) metadata in DB
|
||||
// TODO: op and meta should be combineable in the op that gets sent
|
||||
this.applyMetaOp = function (docName, metaOpData, callback) {
|
||||
const { path, value } = metaOpData.meta
|
||||
|
||||
if (!isArray(path)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('path should be an array')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error != null) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
} else {
|
||||
let applied = false
|
||||
switch (path[0]) {
|
||||
case 'shout':
|
||||
doc.eventEmitter.emit('op', metaOpData)
|
||||
applied = true
|
||||
break
|
||||
}
|
||||
|
||||
if (applied) {
|
||||
model.emit('applyMetaOp', docName, path, value)
|
||||
}
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Listen to all ops from the specified version. If version is in the past, all
|
||||
// ops since that version are sent immediately to the listener.
|
||||
//
|
||||
// The callback is called once the listener is attached, but before any ops have been passed
|
||||
// to the listener.
|
||||
//
|
||||
// This will _not_ edit the document metadata.
|
||||
//
|
||||
// If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||
// might change in a future version.
|
||||
//
|
||||
// version is the document version at which the document is opened. It can be left out if you want to open
|
||||
// the document at the most recent version.
|
||||
//
|
||||
// listener is called with (opData) each time an op is applied.
|
||||
//
|
||||
// callback(error, openedVersion)
|
||||
this.listen = function (docName, version, listener, callback) {
|
||||
if (typeof version === 'function') {
|
||||
;[version, listener, callback] = Array.from([null, version, listener])
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
clearTimeout(doc.reapTimer)
|
||||
|
||||
if (version != null) {
|
||||
return getOps(docName, version, null, function (error, data) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
doc.eventEmitter.on('op', listener)
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, version)
|
||||
}
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const op of Array.from(data)) {
|
||||
var needle
|
||||
listener(op)
|
||||
|
||||
// The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||
// This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||
if (
|
||||
((needle = listener),
|
||||
!Array.from(doc.eventEmitter.listeners('op')).includes(needle))
|
||||
) {
|
||||
break
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
} else {
|
||||
// Version is null / undefined. Just add the listener.
|
||||
doc.eventEmitter.on('op', listener)
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Remove a listener for a particular document.
|
||||
//
|
||||
// removeListener(docName, listener)
|
||||
//
|
||||
// This is synchronous.
|
||||
this.removeListener = function (docName, listener) {
|
||||
// The document should already be loaded.
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
throw new Error('removeListener called but document not loaded')
|
||||
}
|
||||
|
||||
doc.eventEmitter.removeListener('op', listener)
|
||||
return refreshReapingTimeout(docName)
|
||||
}
|
||||
|
||||
// Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||
// sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||
this.flush = function (callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
let pendingWrites = 0
|
||||
|
||||
for (const docName in docs) {
|
||||
const doc = docs[docName]
|
||||
if (doc.committedVersion < doc.v) {
|
||||
pendingWrites++
|
||||
// I'm hoping writeSnapshot will always happen in another thread.
|
||||
tryWriteSnapshot(docName, () =>
|
||||
process.nextTick(function () {
|
||||
pendingWrites--
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// If nothing was queued, terminate immediately.
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||
this.closeDb = function () {
|
||||
__guardMethod__(db, 'close', o => o.close())
|
||||
return (db = null)
|
||||
}
|
||||
}
|
||||
|
||||
// Model inherits from EventEmitter.
|
||||
Model.prototype = new EventEmitter()
|
||||
|
||||
function __guardMethod__(obj, methodName, transform) {
|
||||
if (
|
||||
typeof obj !== 'undefined' &&
|
||||
obj !== null &&
|
||||
typeof obj[methodName] === 'function'
|
||||
) {
|
||||
return transform(obj, methodName)
|
||||
} else {
|
||||
return undefined
|
||||
}
|
||||
}
|
60
services/document-updater/app/js/sharejs/server/syncqueue.js
Normal file
60
services/document-updater/app/js/sharejs/server/syncqueue.js
Normal file
|
@ -0,0 +1,60 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A synchronous processing queue. The queue calls process on the arguments,
|
||||
// ensuring that process() is only executing once at a time.
|
||||
//
|
||||
// process(data, callback) _MUST_ eventually call its callback.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// queue = require 'syncqueue'
|
||||
//
|
||||
// fn = queue (data, callback) ->
|
||||
// asyncthing data, ->
|
||||
// callback(321)
|
||||
//
|
||||
// fn(1)
|
||||
// fn(2)
|
||||
// fn(3, (result) -> console.log(result))
|
||||
//
|
||||
// ^--- async thing will only be running once at any time.
|
||||
|
||||
module.exports = function (process) {
|
||||
if (typeof process !== 'function') {
|
||||
throw new Error('process is not a function')
|
||||
}
|
||||
const queue = []
|
||||
|
||||
const enqueue = function (data, callback) {
|
||||
queue.push([data, callback])
|
||||
return flush()
|
||||
}
|
||||
|
||||
enqueue.busy = false
|
||||
|
||||
var flush = function () {
|
||||
if (enqueue.busy || queue.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
enqueue.busy = true
|
||||
const [data, callback] = Array.from(queue.shift())
|
||||
return process(data, function (...result) {
|
||||
// TODO: Make this not use varargs - varargs are really slow.
|
||||
enqueue.busy = false
|
||||
// This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||
if (callback) {
|
||||
callback.apply(null, result)
|
||||
}
|
||||
return flush()
|
||||
})
|
||||
}
|
||||
|
||||
return enqueue
|
||||
}
|
54
services/document-updater/app/js/sharejs/simple.js
Normal file
54
services/document-updater/app/js/sharejs/simple.js
Normal file
|
@ -0,0 +1,54 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is a really simple OT type. Its not compiled with the web client, but it could be.
|
||||
//
|
||||
// Its mostly included for demonstration purposes and its used in a lot of unit tests.
|
||||
//
|
||||
// This defines a really simple text OT type which only allows inserts. (No deletes).
|
||||
//
|
||||
// Ops look like:
|
||||
// {position:#, text:"asdf"}
|
||||
//
|
||||
// Document snapshots look like:
|
||||
// {str:string}
|
||||
|
||||
module.exports = {
|
||||
// The name of the OT type. The type is stored in types[type.name]. The name can be
|
||||
// used in place of the actual type in all the API methods.
|
||||
name: 'simple',
|
||||
|
||||
// Create a new document snapshot
|
||||
create() {
|
||||
return { str: '' }
|
||||
},
|
||||
|
||||
// Apply the given op to the document snapshot. Returns the new snapshot.
|
||||
//
|
||||
// The original snapshot should not be modified.
|
||||
apply(snapshot, op) {
|
||||
if (!(op.position >= 0 && op.position <= snapshot.str.length)) {
|
||||
throw new Error('Invalid position')
|
||||
}
|
||||
|
||||
let { str } = snapshot
|
||||
str = str.slice(0, op.position) + op.text + str.slice(op.position)
|
||||
return { str }
|
||||
},
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the
|
||||
// op being transformed comes from the client or the server.
|
||||
transform(op1, op2, sym) {
|
||||
let pos = op1.position
|
||||
if (op2.position < pos || (op2.position === pos && sym === 'left')) {
|
||||
pos += op2.text.length
|
||||
}
|
||||
|
||||
return { position: pos, text: op1.text }
|
||||
},
|
||||
}
|
60
services/document-updater/app/js/sharejs/syncqueue.js
Normal file
60
services/document-updater/app/js/sharejs/syncqueue.js
Normal file
|
@ -0,0 +1,60 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A synchronous processing queue. The queue calls process on the arguments,
|
||||
// ensuring that process() is only executing once at a time.
|
||||
//
|
||||
// process(data, callback) _MUST_ eventually call its callback.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// queue = require 'syncqueue'
|
||||
//
|
||||
// fn = queue (data, callback) ->
|
||||
// asyncthing data, ->
|
||||
// callback(321)
|
||||
//
|
||||
// fn(1)
|
||||
// fn(2)
|
||||
// fn(3, (result) -> console.log(result))
|
||||
//
|
||||
// ^--- async thing will only be running once at any time.
|
||||
|
||||
module.exports = function (process) {
|
||||
if (typeof process !== 'function') {
|
||||
throw new Error('process is not a function')
|
||||
}
|
||||
const queue = []
|
||||
|
||||
const enqueue = function (data, callback) {
|
||||
queue.push([data, callback])
|
||||
return flush()
|
||||
}
|
||||
|
||||
enqueue.busy = false
|
||||
|
||||
var flush = function () {
|
||||
if (enqueue.busy || queue.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
enqueue.busy = true
|
||||
const [data, callback] = Array.from(queue.shift())
|
||||
return process(data, function (...result) {
|
||||
// TODO: Make this not use varargs - varargs are really slow.
|
||||
enqueue.busy = false
|
||||
// This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||
if (callback) {
|
||||
callback.apply(null, result)
|
||||
}
|
||||
return flush()
|
||||
})
|
||||
}
|
||||
|
||||
return enqueue
|
||||
}
|
52
services/document-updater/app/js/sharejs/text-api.js
Normal file
52
services/document-updater/app/js/sharejs/text-api.js
Normal file
|
@ -0,0 +1,52 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text
|
||||
|
||||
let text
|
||||
if (typeof WEB === 'undefined') {
|
||||
text = require('./text')
|
||||
}
|
||||
|
||||
text.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.length
|
||||
},
|
||||
|
||||
// Get the text contents of a document
|
||||
getText() {
|
||||
return this.snapshot
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
const op = [{ p: pos, i: text }]
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = [{ p: pos, d: this.snapshot.slice(pos, pos + length) }]
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
return this.on('remoteop', function (op) {
|
||||
return Array.from(op).map(component =>
|
||||
component.i !== undefined
|
||||
? this.emit('insert', component.p, component.i)
|
||||
: this.emit('delete', component.p, component.d)
|
||||
)
|
||||
})
|
||||
},
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/* eslint-disable
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text
|
||||
|
||||
let type
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
type = exports.types['text-composable']
|
||||
} else {
|
||||
type = require('./text-composable')
|
||||
}
|
||||
|
||||
type.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.length
|
||||
},
|
||||
|
||||
// Get the text contents of a document
|
||||
getText() {
|
||||
return this.snapshot
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
const op = type.normalize([pos, { i: text }, this.snapshot.length - pos])
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = type.normalize([
|
||||
pos,
|
||||
{ d: this.snapshot.slice(pos, pos + length) },
|
||||
this.snapshot.length - pos - length,
|
||||
])
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
return this.on('remoteop', function (op) {
|
||||
let pos = 0
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const component of Array.from(op)) {
|
||||
if (typeof component === 'number') {
|
||||
result.push((pos += component))
|
||||
} else if (component.i !== undefined) {
|
||||
this.emit('insert', pos, component.i)
|
||||
result.push((pos += component.i.length))
|
||||
} else {
|
||||
// delete
|
||||
result.push(this.emit('delete', pos, component.d))
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
},
|
||||
}
|
||||
// We don't increment pos, because the position
|
||||
// specified is after the delete has happened.
|
399
services/document-updater/app/js/sharejs/text-composable.js
Normal file
399
services/document-updater/app/js/sharejs/text-composable.js
Normal file
|
@ -0,0 +1,399 @@
|
|||
/* eslint-disable
|
||||
no-cond-assign,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// An alternate composable implementation for text. This is much closer
|
||||
// to the implementation used by google wave.
|
||||
//
|
||||
// Ops are lists of components which iterate over the whole document.
|
||||
// Components are either:
|
||||
// A number N: Skip N characters in the original document
|
||||
// {i:'str'}: Insert 'str' at the current position in the document
|
||||
// {d:'str'}: Delete 'str', which appears at the current position in the document
|
||||
//
|
||||
// Eg: [3, {i:'hi'}, 5, {d:'internet'}]
|
||||
//
|
||||
// Snapshots are strings.
|
||||
|
||||
let makeAppend
|
||||
const p = function () {} // require('util').debug
|
||||
const i = function () {} // require('util').inspect
|
||||
|
||||
const exports = typeof WEB !== 'undefined' && WEB !== null ? {} : module.exports
|
||||
|
||||
exports.name = 'text-composable'
|
||||
|
||||
exports.create = () => ''
|
||||
|
||||
// -------- Utility methods
|
||||
|
||||
const checkOp = function (op) {
|
||||
if (!Array.isArray(op)) {
|
||||
throw new Error('Op must be an array of components')
|
||||
}
|
||||
let last = null
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
if (typeof c === 'object') {
|
||||
if (
|
||||
(c.i == null || !(c.i.length > 0)) &&
|
||||
(c.d == null || !(c.d.length > 0))
|
||||
) {
|
||||
throw new Error(`Invalid op component: ${i(c)}`)
|
||||
}
|
||||
} else {
|
||||
if (typeof c !== 'number') {
|
||||
throw new Error('Op components must be objects or numbers')
|
||||
}
|
||||
if (!(c > 0)) {
|
||||
throw new Error('Skip components must be a positive number')
|
||||
}
|
||||
if (typeof last === 'number') {
|
||||
throw new Error('Adjacent skip components should be added')
|
||||
}
|
||||
}
|
||||
|
||||
result.push((last = c))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
// Makes a function for appending components to a given op.
|
||||
// Exported for the randomOpGenerator.
|
||||
exports._makeAppend = makeAppend = op =>
|
||||
function (component) {
|
||||
if (component === 0 || component.i === '' || component.d === '') {
|
||||
} else if (op.length === 0) {
|
||||
return op.push(component)
|
||||
} else if (
|
||||
typeof component === 'number' &&
|
||||
typeof op[op.length - 1] === 'number'
|
||||
) {
|
||||
return (op[op.length - 1] += component)
|
||||
} else if (component.i != null && op[op.length - 1].i != null) {
|
||||
return (op[op.length - 1].i += component.i)
|
||||
} else if (component.d != null && op[op.length - 1].d != null) {
|
||||
return (op[op.length - 1].d += component.d)
|
||||
} else {
|
||||
return op.push(component)
|
||||
}
|
||||
}
|
||||
|
||||
// checkOp op
|
||||
|
||||
// Makes 2 functions for taking components from the start of an op, and for peeking
|
||||
// at the next op that could be taken.
|
||||
const makeTake = function (op) {
|
||||
// The index of the next component to take
|
||||
let idx = 0
|
||||
// The offset into the component
|
||||
let offset = 0
|
||||
|
||||
// Take up to length n from the front of op. If n is null, take the next
|
||||
// op component. If indivisableField == 'd', delete components won't be separated.
|
||||
// If indivisableField == 'i', insert components won't be separated.
|
||||
const take = function (n, indivisableField) {
|
||||
let c
|
||||
if (idx === op.length) {
|
||||
return null
|
||||
}
|
||||
// assert.notStrictEqual op.length, i, 'The op is too short to traverse the document'
|
||||
|
||||
if (typeof op[idx] === 'number') {
|
||||
if (n == null || op[idx] - offset <= n) {
|
||||
c = op[idx] - offset
|
||||
++idx
|
||||
offset = 0
|
||||
return c
|
||||
} else {
|
||||
offset += n
|
||||
return n
|
||||
}
|
||||
} else {
|
||||
// Take from the string
|
||||
const field = op[idx].i ? 'i' : 'd'
|
||||
c = {}
|
||||
if (
|
||||
n == null ||
|
||||
op[idx][field].length - offset <= n ||
|
||||
field === indivisableField
|
||||
) {
|
||||
c[field] = op[idx][field].slice(offset)
|
||||
++idx
|
||||
offset = 0
|
||||
} else {
|
||||
c[field] = op[idx][field].slice(offset, offset + n)
|
||||
offset += n
|
||||
}
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
const peekType = () => op[idx]
|
||||
|
||||
return [take, peekType]
|
||||
}
|
||||
|
||||
// Find and return the length of an op component
|
||||
const componentLength = function (component) {
|
||||
if (typeof component === 'number') {
|
||||
return component
|
||||
} else if (component.i != null) {
|
||||
return component.i.length
|
||||
} else {
|
||||
return component.d.length
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||
// adjacent inserts and deletes.
|
||||
exports.normalize = function (op) {
|
||||
const newOp = []
|
||||
const append = makeAppend(newOp)
|
||||
for (const component of Array.from(op)) {
|
||||
append(component)
|
||||
}
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Apply the op to the string. Returns the new string.
|
||||
exports.apply = function (str, op) {
|
||||
p(`Applying ${i(op)} to '${str}'`)
|
||||
if (typeof str !== 'string') {
|
||||
throw new Error('Snapshot should be a string')
|
||||
}
|
||||
checkOp(op)
|
||||
|
||||
const pos = 0
|
||||
const newDoc = []
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
if (typeof component === 'number') {
|
||||
if (component > str.length) {
|
||||
throw new Error('The op is too long for this document')
|
||||
}
|
||||
newDoc.push(str.slice(0, component))
|
||||
str = str.slice(component)
|
||||
} else if (component.i != null) {
|
||||
newDoc.push(component.i)
|
||||
} else {
|
||||
if (component.d !== str.slice(0, component.d.length)) {
|
||||
throw new Error(
|
||||
`The deleted text '${
|
||||
component.d
|
||||
}' doesn't match the next characters in the document '${str.slice(
|
||||
0,
|
||||
component.d.length
|
||||
)}'`
|
||||
)
|
||||
}
|
||||
str = str.slice(component.d.length)
|
||||
}
|
||||
}
|
||||
|
||||
if (str !== '') {
|
||||
throw new Error("The applied op doesn't traverse the entire document")
|
||||
}
|
||||
|
||||
return newDoc.join('')
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// op1 and op2 are unchanged by transform.
|
||||
exports.transform = function (op, otherOp, side) {
|
||||
let component
|
||||
if (side !== 'left' && side !== 'right') {
|
||||
throw new Error(`side (${side} must be 'left' or 'right'`)
|
||||
}
|
||||
|
||||
checkOp(op)
|
||||
checkOp(otherOp)
|
||||
const newOp = []
|
||||
|
||||
const append = makeAppend(newOp)
|
||||
const [take, peek] = Array.from(makeTake(op))
|
||||
|
||||
for (component of Array.from(otherOp)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'i')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(chunk)
|
||||
if (typeof chunk !== 'object' || chunk.i == null) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
} else if (component.i != null) {
|
||||
// Insert
|
||||
if (side === 'left') {
|
||||
// The left insert should go first.
|
||||
const o = peek()
|
||||
if (o != null ? o.i : undefined) {
|
||||
append(take())
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, skip the inserted text.
|
||||
append(component.i.length)
|
||||
} else {
|
||||
// Delete.
|
||||
// assert.ok component.d
|
||||
;({ length } = component.d)
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'i')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof chunk === 'number') {
|
||||
length -= chunk
|
||||
} else if (chunk.i != null) {
|
||||
append(chunk)
|
||||
} else {
|
||||
// assert.ok chunk.d
|
||||
// The delete is unnecessary now.
|
||||
length -= chunk.d.length
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if ((component != null ? component.i : undefined) == null) {
|
||||
throw new Error(`Remaining fragments in the op: ${i(component)}`)
|
||||
}
|
||||
append(component)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Compose 2 ops into 1 op.
|
||||
exports.compose = function (op1, op2) {
|
||||
let component
|
||||
p(`COMPOSE ${i(op1)} + ${i(op2)}`)
|
||||
checkOp(op1)
|
||||
checkOp(op2)
|
||||
|
||||
const result = []
|
||||
|
||||
const append = makeAppend(result)
|
||||
const [take, _] = Array.from(makeTake(op1))
|
||||
|
||||
for (component of Array.from(op2)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'd')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(chunk)
|
||||
if (typeof chunk !== 'object' || chunk.d == null) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
} else if (component.i != null) {
|
||||
// Insert
|
||||
append({ i: component.i })
|
||||
} else {
|
||||
// Delete
|
||||
let offset = 0
|
||||
while (offset < component.d.length) {
|
||||
chunk = take(component.d.length - offset, 'd')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
// If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length.
|
||||
if (typeof chunk === 'number') {
|
||||
append({ d: component.d.slice(offset, offset + chunk) })
|
||||
offset += chunk
|
||||
} else if (chunk.i != null) {
|
||||
if (component.d.slice(offset, offset + chunk.i.length) !== chunk.i) {
|
||||
throw new Error("The deleted text doesn't match the inserted text")
|
||||
}
|
||||
offset += chunk.i.length
|
||||
// The ops cancel each other out.
|
||||
} else {
|
||||
// Delete
|
||||
append(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if ((component != null ? component.d : undefined) == null) {
|
||||
throw new Error(`Trailing stuff in op1 ${i(component)}`)
|
||||
}
|
||||
append(component)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
const invertComponent = function (c) {
|
||||
if (typeof c === 'number') {
|
||||
return c
|
||||
} else if (c.i != null) {
|
||||
return { d: c.i }
|
||||
} else {
|
||||
return { i: c.d }
|
||||
}
|
||||
}
|
||||
|
||||
// Invert an op
|
||||
exports.invert = function (op) {
|
||||
const result = []
|
||||
const append = makeAppend(result)
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
append(invertComponent(component))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
if (typeof window !== 'undefined' && window !== null) {
|
||||
if (!window.ot) {
|
||||
window.ot = {}
|
||||
}
|
||||
if (!window.ot.types) {
|
||||
window.ot.types = {}
|
||||
}
|
||||
window.ot.types.text = exports
|
||||
}
|
133
services/document-updater/app/js/sharejs/text-tp2-api.js
Normal file
133
services/document-updater/app/js/sharejs/text-tp2-api.js
Normal file
|
@ -0,0 +1,133 @@
|
|||
/* eslint-disable
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text-tp2
|
||||
|
||||
let type
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
type = exports.types['text-tp2']
|
||||
} else {
|
||||
type = require('./text-tp2')
|
||||
}
|
||||
|
||||
const { _takeDoc: takeDoc, _append: append } = type
|
||||
|
||||
const appendSkipChars = (op, doc, pos, maxlength) =>
|
||||
(() => {
|
||||
const result = []
|
||||
while (
|
||||
(maxlength === undefined || maxlength > 0) &&
|
||||
pos.index < doc.data.length
|
||||
) {
|
||||
const part = takeDoc(doc, pos, maxlength, true)
|
||||
if (maxlength !== undefined && typeof part === 'string') {
|
||||
maxlength -= part.length
|
||||
}
|
||||
result.push(append(op, part.length || part))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
|
||||
type.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.charLength
|
||||
},
|
||||
|
||||
// Flatten a document into a string
|
||||
getText() {
|
||||
const strings = Array.from(this.snapshot.data).filter(
|
||||
elem => typeof elem === 'string'
|
||||
)
|
||||
return strings.join('')
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
if (pos === undefined) {
|
||||
pos = 0
|
||||
}
|
||||
|
||||
const op = []
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos, pos)
|
||||
append(op, { i: text })
|
||||
appendSkipChars(op, this.snapshot, docPos)
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = []
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos, pos)
|
||||
|
||||
while (length > 0) {
|
||||
const part = takeDoc(this.snapshot, docPos, length, true)
|
||||
if (typeof part === 'string') {
|
||||
append(op, { d: part.length })
|
||||
length -= part.length
|
||||
} else {
|
||||
append(op, part)
|
||||
}
|
||||
}
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos)
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
// Interpret recieved ops + generate more detailed events for them
|
||||
return this.on('remoteop', function (op, snapshot) {
|
||||
let textPos = 0
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
var part, remainder
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
remainder = component
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(snapshot, docPos, remainder)
|
||||
if (typeof part === 'string') {
|
||||
textPos += part.length
|
||||
}
|
||||
remainder -= part.length || part
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
// Insert
|
||||
if (typeof component.i === 'string') {
|
||||
this.emit('insert', textPos, component.i)
|
||||
textPos += component.i.length
|
||||
}
|
||||
} else {
|
||||
// Delete
|
||||
remainder = component.d
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(snapshot, docPos, remainder)
|
||||
if (typeof part === 'string') {
|
||||
this.emit('delete', textPos, part)
|
||||
}
|
||||
remainder -= part.length || part
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
}
|
497
services/document-updater/app/js/sharejs/text-tp2.js
Normal file
497
services/document-updater/app/js/sharejs/text-tp2.js
Normal file
|
@ -0,0 +1,497 @@
|
|||
/* eslint-disable
|
||||
no-cond-assign,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A TP2 implementation of text, following this spec:
|
||||
// http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README
|
||||
//
|
||||
// A document is made up of a string and a set of tombstones inserted throughout
|
||||
// the string. For example, 'some ', (2 tombstones), 'string'.
|
||||
//
|
||||
// This is encoded in a document as: {s:'some string', t:[5, -2, 6]}
|
||||
//
|
||||
// Ops are lists of components which iterate over the whole document.
|
||||
// Components are either:
|
||||
// N: Skip N characters in the original document
|
||||
// {i:'str'}: Insert 'str' at the current position in the document
|
||||
// {i:N}: Insert N tombstones at the current position in the document
|
||||
// {d:N}: Delete (tombstone) N characters at the current position in the document
|
||||
//
|
||||
// Eg: [3, {i:'hi'}, 5, {d:8}]
|
||||
//
|
||||
// Snapshots are lists with characters and tombstones. Characters are stored in strings
|
||||
// and adjacent tombstones are flattened into numbers.
|
||||
//
|
||||
// Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters)
|
||||
// would be represented by a document snapshot of ['Hello ', 5, 'world']
|
||||
|
||||
let append, appendDoc, takeDoc
|
||||
var type = {
|
||||
name: 'text-tp2',
|
||||
tp2: true,
|
||||
create() {
|
||||
return { charLength: 0, totalLength: 0, positionCache: [], data: [] }
|
||||
},
|
||||
serialize(doc) {
|
||||
if (!doc.data) {
|
||||
throw new Error('invalid doc snapshot')
|
||||
}
|
||||
return doc.data
|
||||
},
|
||||
deserialize(data) {
|
||||
const doc = type.create()
|
||||
doc.data = data
|
||||
|
||||
for (const component of Array.from(data)) {
|
||||
if (typeof component === 'string') {
|
||||
doc.charLength += component.length
|
||||
doc.totalLength += component.length
|
||||
} else {
|
||||
doc.totalLength += component
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
},
|
||||
}
|
||||
|
||||
const checkOp = function (op) {
|
||||
if (!Array.isArray(op)) {
|
||||
throw new Error('Op must be an array of components')
|
||||
}
|
||||
let last = null
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
if (typeof c === 'object') {
|
||||
if (c.i !== undefined) {
|
||||
if (
|
||||
(typeof c.i !== 'string' || !(c.i.length > 0)) &&
|
||||
(typeof c.i !== 'number' || !(c.i > 0))
|
||||
) {
|
||||
throw new Error('Inserts must insert a string or a +ive number')
|
||||
}
|
||||
} else if (c.d !== undefined) {
|
||||
if (typeof c.d !== 'number' || !(c.d > 0)) {
|
||||
throw new Error('Deletes must be a +ive number')
|
||||
}
|
||||
} else {
|
||||
throw new Error('Operation component must define .i or .d')
|
||||
}
|
||||
} else {
|
||||
if (typeof c !== 'number') {
|
||||
throw new Error('Op components must be objects or numbers')
|
||||
}
|
||||
if (!(c > 0)) {
|
||||
throw new Error('Skip components must be a positive number')
|
||||
}
|
||||
if (typeof last === 'number') {
|
||||
throw new Error('Adjacent skip components should be combined')
|
||||
}
|
||||
}
|
||||
|
||||
result.push((last = c))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
// Take the next part from the specified position in a document snapshot.
|
||||
// position = {index, offset}. It will be updated.
|
||||
type._takeDoc = takeDoc = function (
|
||||
doc,
|
||||
position,
|
||||
maxlength,
|
||||
tombsIndivisible
|
||||
) {
|
||||
if (position.index >= doc.data.length) {
|
||||
throw new Error('Operation goes past the end of the document')
|
||||
}
|
||||
|
||||
const part = doc.data[position.index]
|
||||
// peel off data[0]
|
||||
const result =
|
||||
typeof part === 'string'
|
||||
? maxlength !== undefined
|
||||
? part.slice(position.offset, position.offset + maxlength)
|
||||
: part.slice(position.offset)
|
||||
: maxlength === undefined || tombsIndivisible
|
||||
? part - position.offset
|
||||
: Math.min(maxlength, part - position.offset)
|
||||
|
||||
const resultLen = result.length || result
|
||||
|
||||
if ((part.length || part) - position.offset > resultLen) {
|
||||
position.offset += resultLen
|
||||
} else {
|
||||
position.index++
|
||||
position.offset = 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Append a part to the end of a document
|
||||
type._appendDoc = appendDoc = function (doc, p) {
|
||||
if (p === 0 || p === '') {
|
||||
return
|
||||
}
|
||||
|
||||
if (typeof p === 'string') {
|
||||
doc.charLength += p.length
|
||||
doc.totalLength += p.length
|
||||
} else {
|
||||
doc.totalLength += p
|
||||
}
|
||||
|
||||
const { data } = doc
|
||||
if (data.length === 0) {
|
||||
data.push(p)
|
||||
} else if (typeof data[data.length - 1] === typeof p) {
|
||||
data[data.length - 1] += p
|
||||
} else {
|
||||
data.push(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the op to the document. The document is not modified in the process.
|
||||
type.apply = function (doc, op) {
|
||||
if (
|
||||
doc.totalLength === undefined ||
|
||||
doc.charLength === undefined ||
|
||||
doc.data.length === undefined
|
||||
) {
|
||||
throw new Error('Snapshot is invalid')
|
||||
}
|
||||
|
||||
checkOp(op)
|
||||
|
||||
const newDoc = type.create()
|
||||
const position = { index: 0, offset: 0 }
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
var part, remainder
|
||||
if (typeof component === 'number') {
|
||||
remainder = component
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(doc, position, remainder)
|
||||
|
||||
appendDoc(newDoc, part)
|
||||
remainder -= part.length || part
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
appendDoc(newDoc, component.i)
|
||||
} else if (component.d !== undefined) {
|
||||
remainder = component.d
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(doc, position, remainder)
|
||||
remainder -= part.length || part
|
||||
}
|
||||
appendDoc(newDoc, component.d)
|
||||
}
|
||||
}
|
||||
|
||||
return newDoc
|
||||
}
|
||||
|
||||
// Append an op component to the end of the specified op.
|
||||
// Exported for the randomOpGenerator.
|
||||
type._append = append = function (op, component) {
|
||||
if (
|
||||
component === 0 ||
|
||||
component.i === '' ||
|
||||
component.i === 0 ||
|
||||
component.d === 0
|
||||
) {
|
||||
} else if (op.length === 0) {
|
||||
return op.push(component)
|
||||
} else {
|
||||
const last = op[op.length - 1]
|
||||
if (typeof component === 'number' && typeof last === 'number') {
|
||||
return (op[op.length - 1] += component)
|
||||
} else if (
|
||||
component.i !== undefined &&
|
||||
last.i != null &&
|
||||
typeof last.i === typeof component.i
|
||||
) {
|
||||
return (last.i += component.i)
|
||||
} else if (component.d !== undefined && last.d != null) {
|
||||
return (last.d += component.d)
|
||||
} else {
|
||||
return op.push(component)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Makes 2 functions for taking components from the start of an op, and for peeking
|
||||
// at the next op that could be taken.
|
||||
const makeTake = function (op) {
|
||||
// The index of the next component to take
|
||||
let index = 0
|
||||
// The offset into the component
|
||||
let offset = 0
|
||||
|
||||
// Take up to length maxlength from the op. If maxlength is not defined, there is no max.
|
||||
// If insertsIndivisible is true, inserts (& insert tombstones) won't be separated.
|
||||
//
|
||||
// Returns null when op is fully consumed.
|
||||
const take = function (maxlength, insertsIndivisible) {
|
||||
let current
|
||||
if (index === op.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
const e = op[index]
|
||||
if (
|
||||
typeof (current = e) === 'number' ||
|
||||
typeof (current = e.i) === 'number' ||
|
||||
(current = e.d) !== undefined
|
||||
) {
|
||||
let c
|
||||
if (
|
||||
maxlength == null ||
|
||||
current - offset <= maxlength ||
|
||||
(insertsIndivisible && e.i !== undefined)
|
||||
) {
|
||||
// Return the rest of the current element.
|
||||
c = current - offset
|
||||
++index
|
||||
offset = 0
|
||||
} else {
|
||||
offset += maxlength
|
||||
c = maxlength
|
||||
}
|
||||
if (e.i !== undefined) {
|
||||
return { i: c }
|
||||
} else if (e.d !== undefined) {
|
||||
return { d: c }
|
||||
} else {
|
||||
return c
|
||||
}
|
||||
} else {
|
||||
// Take from the inserted string
|
||||
let result
|
||||
if (
|
||||
maxlength == null ||
|
||||
e.i.length - offset <= maxlength ||
|
||||
insertsIndivisible
|
||||
) {
|
||||
result = { i: e.i.slice(offset) }
|
||||
++index
|
||||
offset = 0
|
||||
} else {
|
||||
result = { i: e.i.slice(offset, offset + maxlength) }
|
||||
offset += maxlength
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
const peekType = () => op[index]
|
||||
|
||||
return [take, peekType]
|
||||
}
|
||||
|
||||
// Find and return the length of an op component
|
||||
const componentLength = function (component) {
|
||||
if (typeof component === 'number') {
|
||||
return component
|
||||
} else if (typeof component.i === 'string') {
|
||||
return component.i.length
|
||||
} else {
|
||||
// This should work because c.d and c.i must be +ive.
|
||||
return component.d || component.i
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||
// adjacent inserts and deletes.
|
||||
type.normalize = function (op) {
|
||||
const newOp = []
|
||||
for (const component of Array.from(op)) {
|
||||
append(newOp, component)
|
||||
}
|
||||
return newOp
|
||||
}
|
||||
|
||||
// This is a helper method to transform and prune. goForwards is true for transform, false for prune.
|
||||
const transformer = function (op, otherOp, goForwards, side) {
|
||||
let component
|
||||
checkOp(op)
|
||||
checkOp(otherOp)
|
||||
const newOp = []
|
||||
|
||||
const [take, peek] = Array.from(makeTake(op))
|
||||
|
||||
for (component of Array.from(otherOp)) {
|
||||
var chunk
|
||||
let length = componentLength(component)
|
||||
|
||||
if (component.i !== undefined) {
|
||||
// Insert text or tombs
|
||||
if (goForwards) {
|
||||
// transform - insert skips over inserted parts
|
||||
if (side === 'left') {
|
||||
// The left insert should go first.
|
||||
while (__guard__(peek(), x => x.i) !== undefined) {
|
||||
append(newOp, take())
|
||||
}
|
||||
}
|
||||
|
||||
// In any case, skip the inserted text.
|
||||
append(newOp, length)
|
||||
} else {
|
||||
// Prune. Remove skips for inserts.
|
||||
while (length > 0) {
|
||||
chunk = take(length, true)
|
||||
|
||||
if (chunk === null) {
|
||||
throw new Error('The transformed op is invalid')
|
||||
}
|
||||
if (chunk.d !== undefined) {
|
||||
throw new Error(
|
||||
'The transformed op deletes locally inserted characters - it cannot be purged of the insert.'
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof chunk === 'number') {
|
||||
length -= chunk
|
||||
} else {
|
||||
append(newOp, chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Skip or delete
|
||||
while (length > 0) {
|
||||
chunk = take(length, true)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(newOp, chunk)
|
||||
if (!chunk.i) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if (component.i === undefined) {
|
||||
throw new Error(`Remaining fragments in the op: ${component}`)
|
||||
}
|
||||
append(newOp, component)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// op1 and op2 are unchanged by transform.
|
||||
// side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op.
|
||||
type.transform = function (op, otherOp, side) {
|
||||
if (side !== 'left' && side !== 'right') {
|
||||
throw new Error(`side (${side}) should be 'left' or 'right'`)
|
||||
}
|
||||
return transformer(op, otherOp, true, side)
|
||||
}
|
||||
|
||||
// Prune is the inverse of transform.
|
||||
type.prune = (op, otherOp) => transformer(op, otherOp, false)
|
||||
|
||||
// Compose 2 ops into 1 op.
|
||||
type.compose = function (op1, op2) {
|
||||
let component
|
||||
if (op1 === null || op1 === undefined) {
|
||||
return op2
|
||||
}
|
||||
|
||||
checkOp(op1)
|
||||
checkOp(op2)
|
||||
|
||||
const result = []
|
||||
|
||||
const [take, _] = Array.from(makeTake(op1))
|
||||
|
||||
for (component of Array.from(op2)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
// Just copy from op1.
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(result, chunk)
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
// Insert
|
||||
append(result, { i: component.i })
|
||||
} else {
|
||||
// Delete
|
||||
length = component.d
|
||||
while (length > 0) {
|
||||
chunk = take(length)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
const chunkLength = componentLength(chunk)
|
||||
if (chunk.i !== undefined) {
|
||||
append(result, { i: chunkLength })
|
||||
} else {
|
||||
append(result, { d: chunkLength })
|
||||
}
|
||||
|
||||
length -= chunkLength
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if (component.i === undefined) {
|
||||
throw new Error(`Remaining fragments in op1: ${component}`)
|
||||
}
|
||||
append(result, component)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
exports.types['text-tp2'] = type
|
||||
} else {
|
||||
module.exports = type
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
314
services/document-updater/app/js/sharejs/text.js
Normal file
314
services/document-updater/app/js/sharejs/text.js
Normal file
|
@ -0,0 +1,314 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A simple text implementation
|
||||
//
|
||||
// Operations are lists of components.
|
||||
// Each component either inserts or deletes at a specified position in the document.
|
||||
//
|
||||
// Components are either:
|
||||
// {i:'str', p:100}: Insert 'str' at position 100 in the document
|
||||
// {d:'str', p:100}: Delete 'str' at position 100 in the document
|
||||
//
|
||||
// Components in an operation are executed sequentially, so the position of components
|
||||
// assumes previous components have already executed.
|
||||
//
|
||||
// Eg: This op:
|
||||
// [{i:'abc', p:0}]
|
||||
// is equivalent to this op:
|
||||
// [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}]
|
||||
|
||||
// NOTE: The global scope here is shared with other sharejs files when built with closure.
|
||||
// Be careful what ends up in your namespace.
|
||||
|
||||
let append, transformComponent
|
||||
const text = {}
|
||||
|
||||
text.name = 'text'
|
||||
|
||||
text.create = () => ''
|
||||
|
||||
const strInject = (s1, pos, s2) => s1.slice(0, pos) + s2 + s1.slice(pos)
|
||||
|
||||
const checkValidComponent = function (c) {
|
||||
if (typeof c.p !== 'number') {
|
||||
throw new Error('component missing position field')
|
||||
}
|
||||
|
||||
const i_type = typeof c.i
|
||||
const d_type = typeof c.d
|
||||
if (!((i_type === 'string') ^ (d_type === 'string'))) {
|
||||
throw new Error('component needs an i or d field')
|
||||
}
|
||||
|
||||
if (!(c.p >= 0)) {
|
||||
throw new Error('position cannot be negative')
|
||||
}
|
||||
}
|
||||
|
||||
const checkValidOp = function (op) {
|
||||
for (const c of Array.from(op)) {
|
||||
checkValidComponent(c)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
text.apply = function (snapshot, op) {
|
||||
checkValidOp(op)
|
||||
for (const component of Array.from(op)) {
|
||||
if (component.i != null) {
|
||||
snapshot = strInject(snapshot, component.p, component.i)
|
||||
} else {
|
||||
const deleted = snapshot.slice(
|
||||
component.p,
|
||||
component.p + component.d.length
|
||||
)
|
||||
if (component.d !== deleted) {
|
||||
throw new Error(
|
||||
`Delete component '${component.d}' does not match deleted text '${deleted}'`
|
||||
)
|
||||
}
|
||||
snapshot =
|
||||
snapshot.slice(0, component.p) +
|
||||
snapshot.slice(component.p + component.d.length)
|
||||
}
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
|
||||
// Exported for use by the random op generator.
|
||||
//
|
||||
// For simplicity, this version of append does not compress adjacent inserts and deletes of
|
||||
// the same text. It would be nice to change that at some stage.
|
||||
text._append = append = function (newOp, c) {
|
||||
if (c.i === '' || c.d === '') {
|
||||
return
|
||||
}
|
||||
if (newOp.length === 0) {
|
||||
return newOp.push(c)
|
||||
} else {
|
||||
const last = newOp[newOp.length - 1]
|
||||
|
||||
// Compose the insert into the previous insert if possible
|
||||
if (
|
||||
last.i != null &&
|
||||
c.i != null &&
|
||||
last.p <= c.p &&
|
||||
c.p <= last.p + last.i.length
|
||||
) {
|
||||
return (newOp[newOp.length - 1] = {
|
||||
i: strInject(last.i, c.p - last.p, c.i),
|
||||
p: last.p,
|
||||
})
|
||||
} else if (
|
||||
last.d != null &&
|
||||
c.d != null &&
|
||||
c.p <= last.p &&
|
||||
last.p <= c.p + c.d.length
|
||||
) {
|
||||
return (newOp[newOp.length - 1] = {
|
||||
d: strInject(c.d, last.p - c.p, last.d),
|
||||
p: c.p,
|
||||
})
|
||||
} else {
|
||||
return newOp.push(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
text.compose = function (op1, op2) {
|
||||
checkValidOp(op1)
|
||||
checkValidOp(op2)
|
||||
|
||||
const newOp = op1.slice()
|
||||
for (const c of Array.from(op2)) {
|
||||
append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Attempt to compress the op components together 'as much as possible'.
|
||||
// This implementation preserves order and preserves create/delete pairs.
|
||||
text.compress = op => text.compose([], op)
|
||||
|
||||
text.normalize = function (op) {
|
||||
const newOp = []
|
||||
|
||||
// Normalize should allow ops which are a single (unwrapped) component:
|
||||
// {i:'asdf', p:23}.
|
||||
// There's no good way to test if something is an array:
|
||||
// http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/
|
||||
// so this is probably the least bad solution.
|
||||
if (op.i != null || op.p != null) {
|
||||
op = [op]
|
||||
}
|
||||
|
||||
for (const c of Array.from(op)) {
|
||||
if (c.p == null) {
|
||||
c.p = 0
|
||||
}
|
||||
append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// This helper method transforms a position by an op component.
|
||||
//
|
||||
// If c is an insert, insertAfter specifies whether the transform
|
||||
// is pushed after the insert (true) or before it (false).
|
||||
//
|
||||
// insertAfter is optional for deletes.
|
||||
const transformPosition = function (pos, c, insertAfter) {
|
||||
if (c.i != null) {
|
||||
if (c.p < pos || (c.p === pos && insertAfter)) {
|
||||
return pos + c.i.length
|
||||
} else {
|
||||
return pos
|
||||
}
|
||||
} else {
|
||||
// I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length))
|
||||
// but I think its harder to read that way, and it compiles using ternary operators anyway
|
||||
// so its no slower written like this.
|
||||
if (pos <= c.p) {
|
||||
return pos
|
||||
} else if (pos <= c.p + c.d.length) {
|
||||
return c.p
|
||||
} else {
|
||||
return pos - c.d.length
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method to transform a cursor position as a result of an op.
|
||||
//
|
||||
// Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position
|
||||
// is pushed after an insert (true) or before it (false).
|
||||
text.transformCursor = function (position, op, side) {
|
||||
const insertAfter = side === 'right'
|
||||
for (const c of Array.from(op)) {
|
||||
position = transformPosition(position, c, insertAfter)
|
||||
}
|
||||
return position
|
||||
}
|
||||
|
||||
// Transform an op component by another op component. Asymmetric.
|
||||
// The result will be appended to destination.
|
||||
//
|
||||
// exported for use in JSON type
|
||||
text._tc = transformComponent = function (dest, c, otherC, side) {
|
||||
checkValidOp([c])
|
||||
checkValidOp([otherC])
|
||||
|
||||
if (c.i != null) {
|
||||
append(dest, {
|
||||
i: c.i,
|
||||
p: transformPosition(c.p, otherC, side === 'right'),
|
||||
})
|
||||
} else {
|
||||
// Delete
|
||||
if (otherC.i != null) {
|
||||
// delete vs insert
|
||||
let s = c.d
|
||||
if (c.p < otherC.p) {
|
||||
append(dest, { d: s.slice(0, otherC.p - c.p), p: c.p })
|
||||
s = s.slice(otherC.p - c.p)
|
||||
}
|
||||
if (s !== '') {
|
||||
append(dest, { d: s, p: c.p + otherC.i.length })
|
||||
}
|
||||
} else {
|
||||
// Delete vs delete
|
||||
if (c.p >= otherC.p + otherC.d.length) {
|
||||
append(dest, { d: c.d, p: c.p - otherC.d.length })
|
||||
} else if (c.p + c.d.length <= otherC.p) {
|
||||
append(dest, c)
|
||||
} else {
|
||||
// They overlap somewhere.
|
||||
const newC = { d: '', p: c.p }
|
||||
if (c.p < otherC.p) {
|
||||
newC.d = c.d.slice(0, otherC.p - c.p)
|
||||
}
|
||||
if (c.p + c.d.length > otherC.p + otherC.d.length) {
|
||||
newC.d += c.d.slice(otherC.p + otherC.d.length - c.p)
|
||||
}
|
||||
|
||||
// This is entirely optional - just for a check that the deleted
|
||||
// text in the two ops matches
|
||||
const intersectStart = Math.max(c.p, otherC.p)
|
||||
const intersectEnd = Math.min(
|
||||
c.p + c.d.length,
|
||||
otherC.p + otherC.d.length
|
||||
)
|
||||
const cIntersect = c.d.slice(intersectStart - c.p, intersectEnd - c.p)
|
||||
const otherIntersect = otherC.d.slice(
|
||||
intersectStart - otherC.p,
|
||||
intersectEnd - otherC.p
|
||||
)
|
||||
if (cIntersect !== otherIntersect) {
|
||||
throw new Error(
|
||||
'Delete ops delete different text in the same region of the document'
|
||||
)
|
||||
}
|
||||
|
||||
if (newC.d !== '') {
|
||||
// This could be rewritten similarly to insert v delete, above.
|
||||
newC.p = transformPosition(newC.p, otherC)
|
||||
append(dest, newC)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
const invertComponent = function (c) {
|
||||
if (c.i != null) {
|
||||
return { d: c.i, p: c.p }
|
||||
} else {
|
||||
return { i: c.d, p: c.p }
|
||||
}
|
||||
}
|
||||
|
||||
// No need to use append for invert, because the components won't be able to
|
||||
// cancel with one another.
|
||||
text.invert = op =>
|
||||
Array.from(op.slice().reverse()).map(c => invertComponent(c))
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
if (!exports.types) {
|
||||
exports.types = {}
|
||||
}
|
||||
|
||||
// This is kind of awful - come up with a better way to hook this helper code up.
|
||||
bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||
|
||||
// [] is used to prevent closure from renaming types.text
|
||||
exports.types.text = text
|
||||
} else {
|
||||
module.exports = text
|
||||
|
||||
// The text type really shouldn't need this - it should be possible to define
|
||||
// an efficient transform function by making a sort of transform map and passing each
|
||||
// op component through it.
|
||||
require('./helpers').bootstrapTransform(
|
||||
text,
|
||||
transformComponent,
|
||||
checkValidOp,
|
||||
append
|
||||
)
|
||||
}
|
37
services/document-updater/app/js/sharejs/types/count.js
Normal file
37
services/document-updater/app/js/sharejs/types/count.js
Normal file
|
@ -0,0 +1,37 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is a simple type used for testing other OT code. Each op is [expectedSnapshot, increment]
|
||||
|
||||
exports.name = 'count'
|
||||
exports.create = () => 1
|
||||
|
||||
exports.apply = function (snapshot, op) {
|
||||
const [v, inc] = Array.from(op)
|
||||
if (snapshot !== v) {
|
||||
throw new Error(`Op ${v} != snapshot ${snapshot}`)
|
||||
}
|
||||
return snapshot + inc
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
exports.transform = function (op1, op2) {
|
||||
if (op1[0] !== op2[0]) {
|
||||
throw new Error(`Op1 ${op1[0]} != op2 ${op2[0]}`)
|
||||
}
|
||||
return [op1[0] + op2[1], op1[1]]
|
||||
}
|
||||
|
||||
exports.compose = function (op1, op2) {
|
||||
if (op1[0] + op1[1] !== op2[0]) {
|
||||
throw new Error(`Op1 ${op1} + 1 != op2 ${op2}`)
|
||||
}
|
||||
return [op1[0], op1[1] + op2[1]]
|
||||
}
|
||||
|
||||
exports.generateRandomOp = doc => [[doc, 1], doc + 1]
|
116
services/document-updater/app/js/sharejs/types/helpers.js
Normal file
116
services/document-updater/app/js/sharejs/types/helpers.js
Normal file
|
@ -0,0 +1,116 @@
|
|||
/* eslint-disable
|
||||
no-return-assign,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// These methods let you build a transform function from a transformComponent function
|
||||
// for OT types like text and JSON in which operations are lists of components
|
||||
// and transforming them requires N^2 work.
|
||||
|
||||
// Add transform and transformX functions for an OT type which has transformComponent defined.
|
||||
// transformComponent(destination array, component, other component, side)
|
||||
let bootstrapTransform
|
||||
exports._bt = bootstrapTransform = function (
|
||||
type,
|
||||
transformComponent,
|
||||
checkValidOp,
|
||||
append
|
||||
) {
|
||||
let transformX
|
||||
const transformComponentX = function (left, right, destLeft, destRight) {
|
||||
transformComponent(destLeft, left, right, 'left')
|
||||
return transformComponent(destRight, right, left, 'right')
|
||||
}
|
||||
|
||||
// Transforms rightOp by leftOp. Returns ['rightOp', clientOp']
|
||||
type.transformX =
|
||||
type.transformX =
|
||||
transformX =
|
||||
function (leftOp, rightOp) {
|
||||
checkValidOp(leftOp)
|
||||
checkValidOp(rightOp)
|
||||
|
||||
const newRightOp = []
|
||||
|
||||
for (let rightComponent of Array.from(rightOp)) {
|
||||
// Generate newLeftOp by composing leftOp by rightComponent
|
||||
const newLeftOp = []
|
||||
|
||||
let k = 0
|
||||
while (k < leftOp.length) {
|
||||
var l
|
||||
const nextC = []
|
||||
transformComponentX(leftOp[k], rightComponent, newLeftOp, nextC)
|
||||
k++
|
||||
|
||||
if (nextC.length === 1) {
|
||||
rightComponent = nextC[0]
|
||||
} else if (nextC.length === 0) {
|
||||
for (l of Array.from(leftOp.slice(k))) {
|
||||
append(newLeftOp, l)
|
||||
}
|
||||
rightComponent = null
|
||||
break
|
||||
} else {
|
||||
// Recurse.
|
||||
const [l_, r_] = Array.from(transformX(leftOp.slice(k), nextC))
|
||||
for (l of Array.from(l_)) {
|
||||
append(newLeftOp, l)
|
||||
}
|
||||
for (const r of Array.from(r_)) {
|
||||
append(newRightOp, r)
|
||||
}
|
||||
rightComponent = null
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (rightComponent != null) {
|
||||
append(newRightOp, rightComponent)
|
||||
}
|
||||
leftOp = newLeftOp
|
||||
}
|
||||
|
||||
return [leftOp, newRightOp]
|
||||
}
|
||||
|
||||
// Transforms op with specified type ('left' or 'right') by otherOp.
|
||||
return (type.transform = type.transform =
|
||||
function (op, otherOp, type) {
|
||||
let _
|
||||
if (type !== 'left' && type !== 'right') {
|
||||
throw new Error("type must be 'left' or 'right'")
|
||||
}
|
||||
|
||||
if (otherOp.length === 0) {
|
||||
return op
|
||||
}
|
||||
|
||||
// TODO: Benchmark with and without this line. I _think_ it'll make a big difference...?
|
||||
if (op.length === 1 && otherOp.length === 1) {
|
||||
return transformComponent([], op[0], otherOp[0], type)
|
||||
}
|
||||
|
||||
if (type === 'left') {
|
||||
let left
|
||||
;[left, _] = Array.from(transformX(op, otherOp))
|
||||
return left
|
||||
} else {
|
||||
let right
|
||||
;[_, right] = Array.from(transformX(otherOp, op))
|
||||
return right
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (typeof WEB === 'undefined') {
|
||||
exports.bootstrapTransform = bootstrapTransform
|
||||
}
|
25
services/document-updater/app/js/sharejs/types/index.js
Normal file
25
services/document-updater/app/js/sharejs/types/index.js
Normal file
|
@ -0,0 +1,25 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
|
||||
const register = function (file) {
|
||||
const type = require(file)
|
||||
exports[type.name] = type
|
||||
try {
|
||||
return require(`${file}-api`)
|
||||
} catch (error) {}
|
||||
}
|
||||
|
||||
// Import all the built-in types.
|
||||
register('./simple')
|
||||
register('./count')
|
||||
|
||||
register('./text')
|
||||
register('./text-composable')
|
||||
register('./text-tp2')
|
||||
|
||||
register('./json')
|
357
services/document-updater/app/js/sharejs/types/json-api.js
Normal file
357
services/document-updater/app/js/sharejs/types/json-api.js
Normal file
|
@ -0,0 +1,357 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// API for JSON OT
|
||||
|
||||
let json
|
||||
if (typeof WEB === 'undefined') {
|
||||
json = require('./json')
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
const { extendDoc } = exports
|
||||
exports.extendDoc = function (name, fn) {
|
||||
SubDoc.prototype[name] = fn
|
||||
return extendDoc(name, fn)
|
||||
}
|
||||
}
|
||||
|
||||
const depath = function (path) {
|
||||
if (path.length === 1 && path[0].constructor === Array) {
|
||||
return path[0]
|
||||
} else {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
class SubDoc {
|
||||
constructor(doc, path) {
|
||||
this.doc = doc
|
||||
this.path = path
|
||||
}
|
||||
|
||||
at(...path) {
|
||||
return this.doc.at(this.path.concat(depath(path)))
|
||||
}
|
||||
|
||||
get() {
|
||||
return this.doc.getAt(this.path)
|
||||
}
|
||||
|
||||
// for objects and lists
|
||||
set(value, cb) {
|
||||
return this.doc.setAt(this.path, value, cb)
|
||||
}
|
||||
|
||||
// for strings and lists.
|
||||
insert(pos, value, cb) {
|
||||
return this.doc.insertAt(this.path, pos, value, cb)
|
||||
}
|
||||
|
||||
// for strings
|
||||
del(pos, length, cb) {
|
||||
return this.doc.deleteTextAt(this.path, length, pos, cb)
|
||||
}
|
||||
|
||||
// for objects and lists
|
||||
remove(cb) {
|
||||
return this.doc.removeAt(this.path, cb)
|
||||
}
|
||||
|
||||
push(value, cb) {
|
||||
return this.insert(this.get().length, value, cb)
|
||||
}
|
||||
|
||||
move(from, to, cb) {
|
||||
return this.doc.moveAt(this.path, from, to, cb)
|
||||
}
|
||||
|
||||
add(amount, cb) {
|
||||
return this.doc.addAt(this.path, amount, cb)
|
||||
}
|
||||
|
||||
on(event, cb) {
|
||||
return this.doc.addListener(this.path, event, cb)
|
||||
}
|
||||
|
||||
removeListener(l) {
|
||||
return this.doc.removeListener(l)
|
||||
}
|
||||
|
||||
// text API compatibility
|
||||
getLength() {
|
||||
return this.get().length
|
||||
}
|
||||
|
||||
getText() {
|
||||
return this.get()
|
||||
}
|
||||
}
|
||||
|
||||
const traverse = function (snapshot, path) {
|
||||
const container = { data: snapshot }
|
||||
let key = 'data'
|
||||
let elem = container
|
||||
for (const p of Array.from(path)) {
|
||||
elem = elem[key]
|
||||
key = p
|
||||
if (typeof elem === 'undefined') {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
}
|
||||
return { elem, key }
|
||||
}
|
||||
|
||||
const pathEquals = function (p1, p2) {
|
||||
if (p1.length !== p2.length) {
|
||||
return false
|
||||
}
|
||||
for (let i = 0; i < p1.length; i++) {
|
||||
const e = p1[i]
|
||||
if (e !== p2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
json.api = {
|
||||
provides: { json: true },
|
||||
|
||||
at(...path) {
|
||||
return new SubDoc(this, depath(path))
|
||||
},
|
||||
|
||||
get() {
|
||||
return this.snapshot
|
||||
},
|
||||
set(value, cb) {
|
||||
return this.setAt([], value, cb)
|
||||
},
|
||||
|
||||
getAt(path) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
return elem[key]
|
||||
},
|
||||
|
||||
setAt(path, value, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = { p: path }
|
||||
if (elem.constructor === Array) {
|
||||
op.li = value
|
||||
if (typeof elem[key] !== 'undefined') {
|
||||
op.ld = elem[key]
|
||||
}
|
||||
} else if (typeof elem === 'object') {
|
||||
op.oi = value
|
||||
if (typeof elem[key] !== 'undefined') {
|
||||
op.od = elem[key]
|
||||
}
|
||||
} else {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
removeAt(path, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
if (typeof elem[key] === 'undefined') {
|
||||
throw new Error('no element at that path')
|
||||
}
|
||||
const op = { p: path }
|
||||
if (elem.constructor === Array) {
|
||||
op.ld = elem[key]
|
||||
} else if (typeof elem === 'object') {
|
||||
op.od = elem[key]
|
||||
} else {
|
||||
throw new Error('bad path')
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
insertAt(path, pos, value, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = { p: path.concat(pos) }
|
||||
if (elem[key].constructor === Array) {
|
||||
op.li = value
|
||||
} else if (typeof elem[key] === 'string') {
|
||||
op.si = value
|
||||
}
|
||||
return this.submitOp([op], cb)
|
||||
},
|
||||
|
||||
moveAt(path, from, to, cb) {
|
||||
const op = [{ p: path.concat(from), lm: to }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
addAt(path, amount, cb) {
|
||||
const op = [{ p: path, na: amount }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
deleteTextAt(path, length, pos, cb) {
|
||||
const { elem, key } = traverse(this.snapshot, path)
|
||||
const op = [{ p: path.concat(pos), sd: elem[key].slice(pos, pos + length) }]
|
||||
return this.submitOp(op, cb)
|
||||
},
|
||||
|
||||
addListener(path, event, cb) {
|
||||
const l = { path, event, cb }
|
||||
this._listeners.push(l)
|
||||
return l
|
||||
},
|
||||
removeListener(l) {
|
||||
const i = this._listeners.indexOf(l)
|
||||
if (i < 0) {
|
||||
return false
|
||||
}
|
||||
this._listeners.splice(i, 1)
|
||||
return true
|
||||
},
|
||||
_register() {
|
||||
this._listeners = []
|
||||
this.on('change', function (op) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
var i
|
||||
if (c.na !== undefined || c.si !== undefined || c.sd !== undefined) {
|
||||
// no change to structure
|
||||
continue
|
||||
}
|
||||
var to_remove = []
|
||||
for (i = 0; i < this._listeners.length; i++) {
|
||||
// Transform a dummy op by the incoming op to work out what
|
||||
// should happen to the listener.
|
||||
const l = this._listeners[i]
|
||||
const dummy = { p: l.path, na: 0 }
|
||||
const xformed = this.type.transformComponent([], dummy, c, 'left')
|
||||
if (xformed.length === 0) {
|
||||
// The op was transformed to noop, so we should delete the listener.
|
||||
to_remove.push(i)
|
||||
} else if (xformed.length === 1) {
|
||||
// The op remained, so grab its new path into the listener.
|
||||
l.path = xformed[0].p
|
||||
} else {
|
||||
throw new Error(
|
||||
"Bad assumption in json-api: xforming an 'si' op will always result in 0 or 1 components."
|
||||
)
|
||||
}
|
||||
}
|
||||
to_remove.sort((a, b) => b - a)
|
||||
result.push(
|
||||
(() => {
|
||||
const result1 = []
|
||||
for (i of Array.from(to_remove)) {
|
||||
result1.push(this._listeners.splice(i, 1))
|
||||
}
|
||||
return result1
|
||||
})()
|
||||
)
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
return this.on('remoteop', function (op) {
|
||||
return (() => {
|
||||
const result = []
|
||||
for (var c of Array.from(op)) {
|
||||
var match_path =
|
||||
c.na === undefined ? c.p.slice(0, c.p.length - 1) : c.p
|
||||
result.push(
|
||||
(() => {
|
||||
const result1 = []
|
||||
for (const { path, event, cb } of Array.from(this._listeners)) {
|
||||
var common
|
||||
if (pathEquals(path, match_path)) {
|
||||
switch (event) {
|
||||
case 'insert':
|
||||
if (c.li !== undefined && c.ld === undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.li))
|
||||
} else if (c.oi !== undefined && c.od === undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.oi))
|
||||
} else if (c.si !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.si))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'delete':
|
||||
if (c.li === undefined && c.ld !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.ld))
|
||||
} else if (c.oi === undefined && c.od !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.od))
|
||||
} else if (c.sd !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.sd))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'replace':
|
||||
if (c.li !== undefined && c.ld !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.ld, c.li))
|
||||
} else if (c.oi !== undefined && c.od !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.od, c.oi))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'move':
|
||||
if (c.lm !== undefined) {
|
||||
result1.push(cb(c.p[c.p.length - 1], c.lm))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
case 'add':
|
||||
if (c.na !== undefined) {
|
||||
result1.push(cb(c.na))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
break
|
||||
default:
|
||||
result1.push(undefined)
|
||||
}
|
||||
} else if (
|
||||
(common = this.type.commonPath(match_path, path)) != null
|
||||
) {
|
||||
if (event === 'child op') {
|
||||
if (
|
||||
match_path.length === path.length &&
|
||||
path.length === common
|
||||
) {
|
||||
throw new Error(
|
||||
"paths match length and have commonality, but aren't equal?"
|
||||
)
|
||||
}
|
||||
const child_path = c.p.slice(common + 1)
|
||||
result1.push(cb(child_path, c))
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
} else {
|
||||
result1.push(undefined)
|
||||
}
|
||||
}
|
||||
return result1
|
||||
})()
|
||||
)
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
},
|
||||
}
|
630
services/document-updater/app/js/sharejs/types/json.js
Normal file
630
services/document-updater/app/js/sharejs/types/json.js
Normal file
|
@ -0,0 +1,630 @@
|
|||
/* eslint-disable
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-useless-catch,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is the implementation of the JSON OT type.
|
||||
//
|
||||
// Spec is here: https://github.com/josephg/ShareJS/wiki/JSON-Operations
|
||||
|
||||
let text
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
;({ text } = exports.types)
|
||||
} else {
|
||||
text = require('./text')
|
||||
}
|
||||
|
||||
const json = {}
|
||||
|
||||
json.name = 'json'
|
||||
|
||||
json.create = () => null
|
||||
|
||||
json.invertComponent = function (c) {
|
||||
const c_ = { p: c.p }
|
||||
if (c.si !== undefined) {
|
||||
c_.sd = c.si
|
||||
}
|
||||
if (c.sd !== undefined) {
|
||||
c_.si = c.sd
|
||||
}
|
||||
if (c.oi !== undefined) {
|
||||
c_.od = c.oi
|
||||
}
|
||||
if (c.od !== undefined) {
|
||||
c_.oi = c.od
|
||||
}
|
||||
if (c.li !== undefined) {
|
||||
c_.ld = c.li
|
||||
}
|
||||
if (c.ld !== undefined) {
|
||||
c_.li = c.ld
|
||||
}
|
||||
if (c.na !== undefined) {
|
||||
c_.na = -c.na
|
||||
}
|
||||
if (c.lm !== undefined) {
|
||||
c_.lm = c.p[c.p.length - 1]
|
||||
c_.p = c.p.slice(0, c.p.length - 1).concat([c.lm])
|
||||
}
|
||||
return c_
|
||||
}
|
||||
|
||||
json.invert = op =>
|
||||
Array.from(op.slice().reverse()).map(c => json.invertComponent(c))
|
||||
|
||||
json.checkValidOp = function (op) {}
|
||||
|
||||
const isArray = o => Object.prototype.toString.call(o) === '[object Array]'
|
||||
json.checkList = function (elem) {
|
||||
if (!isArray(elem)) {
|
||||
throw new Error('Referenced element not a list')
|
||||
}
|
||||
}
|
||||
|
||||
json.checkObj = function (elem) {
|
||||
if (elem.constructor !== Object) {
|
||||
throw new Error(
|
||||
`Referenced element not an object (it was ${JSON.stringify(elem)})`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
json.apply = function (snapshot, op) {
|
||||
json.checkValidOp(op)
|
||||
op = clone(op)
|
||||
|
||||
const container = { data: clone(snapshot) }
|
||||
|
||||
try {
|
||||
for (let i = 0; i < op.length; i++) {
|
||||
const c = op[i]
|
||||
let parent = null
|
||||
let parentkey = null
|
||||
let elem = container
|
||||
let key = 'data'
|
||||
|
||||
for (const p of Array.from(c.p)) {
|
||||
parent = elem
|
||||
parentkey = key
|
||||
elem = elem[key]
|
||||
key = p
|
||||
|
||||
if (parent == null) {
|
||||
throw new Error('Path invalid')
|
||||
}
|
||||
}
|
||||
|
||||
if (c.na !== undefined) {
|
||||
// Number add
|
||||
if (typeof elem[key] !== 'number') {
|
||||
throw new Error('Referenced element not a number')
|
||||
}
|
||||
elem[key] += c.na
|
||||
} else if (c.si !== undefined) {
|
||||
// String insert
|
||||
if (typeof elem !== 'string') {
|
||||
throw new Error(
|
||||
`Referenced element not a string (it was ${JSON.stringify(elem)})`
|
||||
)
|
||||
}
|
||||
parent[parentkey] = elem.slice(0, key) + c.si + elem.slice(key)
|
||||
} else if (c.sd !== undefined) {
|
||||
// String delete
|
||||
if (typeof elem !== 'string') {
|
||||
throw new Error('Referenced element not a string')
|
||||
}
|
||||
if (elem.slice(key, key + c.sd.length) !== c.sd) {
|
||||
throw new Error('Deleted string does not match')
|
||||
}
|
||||
parent[parentkey] = elem.slice(0, key) + elem.slice(key + c.sd.length)
|
||||
} else if (c.li !== undefined && c.ld !== undefined) {
|
||||
// List replace
|
||||
json.checkList(elem)
|
||||
|
||||
// Should check the list element matches c.ld
|
||||
elem[key] = c.li
|
||||
} else if (c.li !== undefined) {
|
||||
// List insert
|
||||
json.checkList(elem)
|
||||
|
||||
elem.splice(key, 0, c.li)
|
||||
} else if (c.ld !== undefined) {
|
||||
// List delete
|
||||
json.checkList(elem)
|
||||
|
||||
// Should check the list element matches c.ld here too.
|
||||
elem.splice(key, 1)
|
||||
} else if (c.lm !== undefined) {
|
||||
// List move
|
||||
json.checkList(elem)
|
||||
if (c.lm !== key) {
|
||||
const e = elem[key]
|
||||
// Remove it...
|
||||
elem.splice(key, 1)
|
||||
// And insert it back.
|
||||
elem.splice(c.lm, 0, e)
|
||||
}
|
||||
} else if (c.oi !== undefined) {
|
||||
// Object insert / replace
|
||||
json.checkObj(elem)
|
||||
|
||||
// Should check that elem[key] == c.od
|
||||
elem[key] = c.oi
|
||||
} else if (c.od !== undefined) {
|
||||
// Object delete
|
||||
json.checkObj(elem)
|
||||
|
||||
// Should check that elem[key] == c.od
|
||||
delete elem[key]
|
||||
} else {
|
||||
throw new Error('invalid / missing instruction in op')
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// TODO: Roll back all already applied changes. Write tests before implementing this code.
|
||||
throw error
|
||||
}
|
||||
|
||||
return container.data
|
||||
}
|
||||
|
||||
// Checks if two paths, p1 and p2 match.
|
||||
json.pathMatches = function (p1, p2, ignoreLast) {
|
||||
if (p1.length !== p2.length) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (let i = 0; i < p1.length; i++) {
|
||||
const p = p1[i]
|
||||
if (p !== p2[i] && (!ignoreLast || i !== p1.length - 1)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
json.append = function (dest, c) {
|
||||
let last
|
||||
c = clone(c)
|
||||
if (
|
||||
dest.length !== 0 &&
|
||||
json.pathMatches(c.p, (last = dest[dest.length - 1]).p)
|
||||
) {
|
||||
if (last.na !== undefined && c.na !== undefined) {
|
||||
return (dest[dest.length - 1] = { p: last.p, na: last.na + c.na })
|
||||
} else if (
|
||||
last.li !== undefined &&
|
||||
c.li === undefined &&
|
||||
c.ld === last.li
|
||||
) {
|
||||
// insert immediately followed by delete becomes a noop.
|
||||
if (last.ld !== undefined) {
|
||||
// leave the delete part of the replace
|
||||
return delete last.li
|
||||
} else {
|
||||
return dest.pop()
|
||||
}
|
||||
} else if (
|
||||
last.od !== undefined &&
|
||||
last.oi === undefined &&
|
||||
c.oi !== undefined &&
|
||||
c.od === undefined
|
||||
) {
|
||||
return (last.oi = c.oi)
|
||||
} else if (c.lm !== undefined && c.p[c.p.length - 1] === c.lm) {
|
||||
return null // don't do anything
|
||||
} else {
|
||||
return dest.push(c)
|
||||
}
|
||||
} else {
|
||||
return dest.push(c)
|
||||
}
|
||||
}
|
||||
|
||||
json.compose = function (op1, op2) {
|
||||
json.checkValidOp(op1)
|
||||
json.checkValidOp(op2)
|
||||
|
||||
const newOp = clone(op1)
|
||||
for (const c of Array.from(op2)) {
|
||||
json.append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
json.normalize = function (op) {
|
||||
const newOp = []
|
||||
|
||||
if (!isArray(op)) {
|
||||
op = [op]
|
||||
}
|
||||
|
||||
for (const c of Array.from(op)) {
|
||||
if (c.p == null) {
|
||||
c.p = []
|
||||
}
|
||||
json.append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// hax, copied from test/types/json. Apparently this is still the fastest way to deep clone an object, assuming
|
||||
// we have browser support for JSON.
|
||||
// http://jsperf.com/cloning-an-object/12
|
||||
var clone = o => JSON.parse(JSON.stringify(o))
|
||||
|
||||
json.commonPath = function (p1, p2) {
|
||||
p1 = p1.slice()
|
||||
p2 = p2.slice()
|
||||
p1.unshift('data')
|
||||
p2.unshift('data')
|
||||
p1 = p1.slice(0, p1.length - 1)
|
||||
p2 = p2.slice(0, p2.length - 1)
|
||||
if (p2.length === 0) {
|
||||
return -1
|
||||
}
|
||||
let i = 0
|
||||
while (p1[i] === p2[i] && i < p1.length) {
|
||||
i++
|
||||
if (i === p2.length) {
|
||||
return i - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// transform c so it applies to a document with otherC applied.
|
||||
json.transformComponent = function (dest, c, otherC, type) {
|
||||
let oc
|
||||
c = clone(c)
|
||||
if (c.na !== undefined) {
|
||||
c.p.push(0)
|
||||
}
|
||||
if (otherC.na !== undefined) {
|
||||
otherC.p.push(0)
|
||||
}
|
||||
|
||||
const common = json.commonPath(c.p, otherC.p)
|
||||
const common2 = json.commonPath(otherC.p, c.p)
|
||||
|
||||
const cplength = c.p.length
|
||||
const otherCplength = otherC.p.length
|
||||
|
||||
if (c.na !== undefined) {
|
||||
c.p.pop()
|
||||
} // hax
|
||||
if (otherC.na !== undefined) {
|
||||
otherC.p.pop()
|
||||
}
|
||||
|
||||
if (otherC.na) {
|
||||
if (
|
||||
common2 != null &&
|
||||
otherCplength >= cplength &&
|
||||
otherC.p[common2] === c.p[common2]
|
||||
) {
|
||||
if (c.ld !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.ld = json.apply(clone(c.ld), [oc])
|
||||
} else if (c.od !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.od = json.apply(clone(c.od), [oc])
|
||||
}
|
||||
}
|
||||
json.append(dest, c)
|
||||
return dest
|
||||
}
|
||||
|
||||
if (
|
||||
common2 != null &&
|
||||
otherCplength > cplength &&
|
||||
c.p[common2] === otherC.p[common2]
|
||||
) {
|
||||
// transform based on c
|
||||
if (c.ld !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.ld = json.apply(clone(c.ld), [oc])
|
||||
} else if (c.od !== undefined) {
|
||||
oc = clone(otherC)
|
||||
oc.p = oc.p.slice(cplength)
|
||||
c.od = json.apply(clone(c.od), [oc])
|
||||
}
|
||||
}
|
||||
|
||||
if (common != null) {
|
||||
let from, p, to
|
||||
const commonOperand = cplength === otherCplength
|
||||
// transform based on otherC
|
||||
if (otherC.na !== undefined) {
|
||||
// this case is handled above due to icky path hax
|
||||
} else if (otherC.si !== undefined || otherC.sd !== undefined) {
|
||||
// String op vs string op - pass through to text type
|
||||
if (c.si !== undefined || c.sd !== undefined) {
|
||||
if (!commonOperand) {
|
||||
throw new Error('must be a string?')
|
||||
}
|
||||
|
||||
// Convert an op component to a text op component
|
||||
const convert = function (component) {
|
||||
const newC = { p: component.p[component.p.length - 1] }
|
||||
if (component.si) {
|
||||
newC.i = component.si
|
||||
} else {
|
||||
newC.d = component.sd
|
||||
}
|
||||
return newC
|
||||
}
|
||||
|
||||
const tc1 = convert(c)
|
||||
const tc2 = convert(otherC)
|
||||
|
||||
const res = []
|
||||
text._tc(res, tc1, tc2, type)
|
||||
for (const tc of Array.from(res)) {
|
||||
const jc = { p: c.p.slice(0, common) }
|
||||
jc.p.push(tc.p)
|
||||
if (tc.i != null) {
|
||||
jc.si = tc.i
|
||||
}
|
||||
if (tc.d != null) {
|
||||
jc.sd = tc.d
|
||||
}
|
||||
json.append(dest, jc)
|
||||
}
|
||||
return dest
|
||||
}
|
||||
} else if (otherC.li !== undefined && otherC.ld !== undefined) {
|
||||
if (otherC.p[common] === c.p[common]) {
|
||||
// noop
|
||||
if (!commonOperand) {
|
||||
// we're below the deleted element, so -> noop
|
||||
return dest
|
||||
} else if (c.ld !== undefined) {
|
||||
// we're trying to delete the same element, -> noop
|
||||
if (c.li !== undefined && type === 'left') {
|
||||
// we're both replacing one element with another. only one can
|
||||
// survive!
|
||||
c.ld = clone(otherC.li)
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.li !== undefined) {
|
||||
if (
|
||||
c.li !== undefined &&
|
||||
c.ld === undefined &&
|
||||
commonOperand &&
|
||||
c.p[common] === otherC.p[common]
|
||||
) {
|
||||
// in li vs. li, left wins.
|
||||
if (type === 'right') {
|
||||
c.p[common]++
|
||||
}
|
||||
} else if (otherC.p[common] <= c.p[common]) {
|
||||
c.p[common]++
|
||||
}
|
||||
|
||||
if (c.lm !== undefined) {
|
||||
if (commonOperand) {
|
||||
// otherC edits the same list we edit
|
||||
if (otherC.p[common] <= c.lm) {
|
||||
c.lm++
|
||||
}
|
||||
}
|
||||
}
|
||||
// changing c.from is handled above.
|
||||
} else if (otherC.ld !== undefined) {
|
||||
if (c.lm !== undefined) {
|
||||
if (commonOperand) {
|
||||
if (otherC.p[common] === c.p[common]) {
|
||||
// they deleted the thing we're trying to move
|
||||
return dest
|
||||
}
|
||||
// otherC edits the same list we edit
|
||||
p = otherC.p[common]
|
||||
from = c.p[common]
|
||||
to = c.lm
|
||||
if (p < to || (p === to && from < to)) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (otherC.p[common] < c.p[common]) {
|
||||
c.p[common]--
|
||||
} else if (otherC.p[common] === c.p[common]) {
|
||||
if (otherCplength < cplength) {
|
||||
// we're below the deleted element, so -> noop
|
||||
return dest
|
||||
} else if (c.ld !== undefined) {
|
||||
if (c.li !== undefined) {
|
||||
// we're replacing, they're deleting. we become an insert.
|
||||
delete c.ld
|
||||
} else {
|
||||
// we're trying to delete the same element, -> noop
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.lm !== undefined) {
|
||||
if (c.lm !== undefined && cplength === otherCplength) {
|
||||
// lm vs lm, here we go!
|
||||
from = c.p[common]
|
||||
to = c.lm
|
||||
const otherFrom = otherC.p[common]
|
||||
const otherTo = otherC.lm
|
||||
if (otherFrom !== otherTo) {
|
||||
// if otherFrom == otherTo, we don't need to change our op.
|
||||
|
||||
// where did my thing go?
|
||||
if (from === otherFrom) {
|
||||
// they moved it! tie break.
|
||||
if (type === 'left') {
|
||||
c.p[common] = otherTo
|
||||
if (from === to) {
|
||||
// ugh
|
||||
c.lm = otherTo
|
||||
}
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
} else {
|
||||
// they moved around it
|
||||
if (from > otherFrom) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (from > otherTo) {
|
||||
c.p[common]++
|
||||
} else if (from === otherTo) {
|
||||
if (otherFrom > otherTo) {
|
||||
c.p[common]++
|
||||
if (from === to) {
|
||||
// ugh, again
|
||||
c.lm++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// step 2: where am i going to put it?
|
||||
if (to > otherFrom) {
|
||||
c.lm--
|
||||
} else if (to === otherFrom) {
|
||||
if (to > from) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
if (to > otherTo) {
|
||||
c.lm++
|
||||
} else if (to === otherTo) {
|
||||
// if we're both moving in the same direction, tie break
|
||||
if (
|
||||
(otherTo > otherFrom && to > from) ||
|
||||
(otherTo < otherFrom && to < from)
|
||||
) {
|
||||
if (type === 'right') {
|
||||
c.lm++
|
||||
}
|
||||
} else {
|
||||
if (to > from) {
|
||||
c.lm++
|
||||
} else if (to === otherFrom) {
|
||||
c.lm--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (c.li !== undefined && c.ld === undefined && commonOperand) {
|
||||
// li
|
||||
from = otherC.p[common]
|
||||
to = otherC.lm
|
||||
p = c.p[common]
|
||||
if (p > from) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (p > to) {
|
||||
c.p[common]++
|
||||
}
|
||||
} else {
|
||||
// ld, ld+li, si, sd, na, oi, od, oi+od, any li on an element beneath
|
||||
// the lm
|
||||
//
|
||||
// i.e. things care about where their item is after the move.
|
||||
from = otherC.p[common]
|
||||
to = otherC.lm
|
||||
p = c.p[common]
|
||||
if (p === from) {
|
||||
c.p[common] = to
|
||||
} else {
|
||||
if (p > from) {
|
||||
c.p[common]--
|
||||
}
|
||||
if (p > to) {
|
||||
c.p[common]++
|
||||
} else if (p === to) {
|
||||
if (from > to) {
|
||||
c.p[common]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (otherC.oi !== undefined && otherC.od !== undefined) {
|
||||
if (c.p[common] === otherC.p[common]) {
|
||||
if (c.oi !== undefined && commonOperand) {
|
||||
// we inserted where someone else replaced
|
||||
if (type === 'right') {
|
||||
// left wins
|
||||
return dest
|
||||
} else {
|
||||
// we win, make our op replace what they inserted
|
||||
c.od = otherC.oi
|
||||
}
|
||||
} else {
|
||||
// -> noop if the other component is deleting the same object (or any
|
||||
// parent)
|
||||
return dest
|
||||
}
|
||||
}
|
||||
} else if (otherC.oi !== undefined) {
|
||||
if (c.oi !== undefined && c.p[common] === otherC.p[common]) {
|
||||
// left wins if we try to insert at the same place
|
||||
if (type === 'left') {
|
||||
json.append(dest, { p: c.p, od: otherC.oi })
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
} else if (otherC.od !== undefined) {
|
||||
if (c.p[common] === otherC.p[common]) {
|
||||
if (!commonOperand) {
|
||||
return dest
|
||||
}
|
||||
if (c.oi !== undefined) {
|
||||
delete c.od
|
||||
} else {
|
||||
return dest
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
json.append(dest, c)
|
||||
return dest
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
if (!exports.types) {
|
||||
exports.types = {}
|
||||
}
|
||||
|
||||
// This is kind of awful - come up with a better way to hook this helper code up.
|
||||
exports._bt(json, json.transformComponent, json.checkValidOp, json.append)
|
||||
|
||||
// [] is used to prevent closure from renaming types.text
|
||||
exports.types.json = json
|
||||
} else {
|
||||
module.exports = json
|
||||
|
||||
require('./helpers').bootstrapTransform(
|
||||
json,
|
||||
json.transformComponent,
|
||||
json.checkValidOp,
|
||||
json.append
|
||||
)
|
||||
}
|
883
services/document-updater/app/js/sharejs/types/model.js
Normal file
883
services/document-updater/app/js/sharejs/types/model.js
Normal file
|
@ -0,0 +1,883 @@
|
|||
/* eslint-disable
|
||||
no-console,
|
||||
no-return-assign,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS104: Avoid inline assignments
|
||||
* DS204: Change includes calls to have a more natural evaluation order
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// The model of all the ops. Responsible for applying & transforming remote deltas
|
||||
// and managing the storage layer.
|
||||
//
|
||||
// Actual storage is handled by the database wrappers in db/*, wrapped by DocCache
|
||||
|
||||
let Model
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const queue = require('./syncqueue')
|
||||
const types = require('../types')
|
||||
|
||||
const isArray = o => Object.prototype.toString.call(o) === '[object Array]'
|
||||
|
||||
// This constructor creates a new Model object. There will be one model object
|
||||
// per server context.
|
||||
//
|
||||
// The model object is responsible for a lot of things:
|
||||
//
|
||||
// - It manages the interactions with the database
|
||||
// - It maintains (in memory) a set of all active documents
|
||||
// - It calls out to the OT functions when necessary
|
||||
//
|
||||
// The model is an event emitter. It emits the following events:
|
||||
//
|
||||
// create(docName, data): A document has been created with the specified name & data
|
||||
module.exports = Model = function (db, options) {
|
||||
// db can be null if the user doesn't want persistance.
|
||||
|
||||
let getOps
|
||||
if (!(this instanceof Model)) {
|
||||
return new Model(db, options)
|
||||
}
|
||||
|
||||
const model = this
|
||||
|
||||
if (options == null) {
|
||||
options = {}
|
||||
}
|
||||
|
||||
// This is a cache of 'live' documents.
|
||||
//
|
||||
// The cache is a map from docName -> {
|
||||
// ops:[{op, meta}]
|
||||
// snapshot
|
||||
// type
|
||||
// v
|
||||
// meta
|
||||
// eventEmitter
|
||||
// reapTimer
|
||||
// committedVersion: v
|
||||
// snapshotWriteLock: bool to make sure writeSnapshot isn't re-entrant
|
||||
// dbMeta: database specific data
|
||||
// opQueue: syncQueue for processing ops
|
||||
// }
|
||||
//
|
||||
// The ops list contains the document's last options.numCachedOps ops. (Or all
|
||||
// of them if we're using a memory store).
|
||||
//
|
||||
// Documents are stored in this set so long as the document has been accessed in
|
||||
// the last few seconds (options.reapTime) OR at least one client has the document
|
||||
// open. I don't know if I should keep open (but not being edited) documents live -
|
||||
// maybe if a client has a document open but the document isn't being edited, I should
|
||||
// flush it from the cache.
|
||||
//
|
||||
// In any case, the API to model is designed such that if we want to change that later
|
||||
// it should be pretty easy to do so without any external-to-the-model code changes.
|
||||
const docs = {}
|
||||
|
||||
// This is a map from docName -> [callback]. It is used when a document hasn't been
|
||||
// cached and multiple getSnapshot() / getVersion() requests come in. All requests
|
||||
// are added to the callback list and called when db.getSnapshot() returns.
|
||||
//
|
||||
// callback(error, snapshot data)
|
||||
const awaitingGetSnapshot = {}
|
||||
|
||||
// The time that documents which no clients have open will stay in the cache.
|
||||
// Should be > 0.
|
||||
if (options.reapTime == null) {
|
||||
options.reapTime = 3000
|
||||
}
|
||||
|
||||
// The number of operations the cache holds before reusing the space
|
||||
if (options.numCachedOps == null) {
|
||||
options.numCachedOps = 10
|
||||
}
|
||||
|
||||
// This option forces documents to be reaped, even when there's no database backend.
|
||||
// This is useful when you don't care about persistance and don't want to gradually
|
||||
// fill memory.
|
||||
//
|
||||
// You might want to set reapTime to a day or something.
|
||||
if (options.forceReaping == null) {
|
||||
options.forceReaping = false
|
||||
}
|
||||
|
||||
// Until I come up with a better strategy, we'll save a copy of the document snapshot
|
||||
// to the database every ~20 submitted ops.
|
||||
if (options.opsBeforeCommit == null) {
|
||||
options.opsBeforeCommit = 20
|
||||
}
|
||||
|
||||
// It takes some processing time to transform client ops. The server will punt ops back to the
|
||||
// client to transform if they're too old.
|
||||
if (options.maximumAge == null) {
|
||||
options.maximumAge = 40
|
||||
}
|
||||
|
||||
// **** Cache API methods
|
||||
|
||||
// Its important that all ops are applied in order. This helper method creates the op submission queue
|
||||
// for a single document. This contains the logic for transforming & applying ops.
|
||||
const makeOpQueue = (docName, doc) =>
|
||||
queue(function (opData, callback) {
|
||||
if (!(opData.v >= 0)) {
|
||||
return callback('Version missing')
|
||||
}
|
||||
if (opData.v > doc.v) {
|
||||
return callback('Op at future version')
|
||||
}
|
||||
|
||||
// Punt the transforming work back to the client if the op is too old.
|
||||
if (opData.v + options.maximumAge < doc.v) {
|
||||
return callback('Op too old')
|
||||
}
|
||||
|
||||
if (!opData.meta) {
|
||||
opData.meta = {}
|
||||
}
|
||||
opData.meta.ts = Date.now()
|
||||
|
||||
// We'll need to transform the op to the current version of the document. This
|
||||
// calls the callback immediately if opVersion == doc.v.
|
||||
return getOps(docName, opData.v, doc.v, function (error, ops) {
|
||||
let snapshot
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (doc.v - opData.v !== ops.length) {
|
||||
// This should never happen. It indicates that we didn't get all the ops we
|
||||
// asked for. Its important that the submitted op is correctly transformed.
|
||||
console.error(
|
||||
`Could not get old ops in model for document ${docName}`
|
||||
)
|
||||
console.error(
|
||||
`Expected ops ${opData.v} to ${doc.v} and got ${ops.length} ops`
|
||||
)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
try {
|
||||
// If there's enough ops, it might be worth spinning this out into a webworker thread.
|
||||
for (const oldOp of Array.from(ops)) {
|
||||
// Dup detection works by sending the id(s) the op has been submitted with previously.
|
||||
// If the id matches, we reject it. The client can also detect the op has been submitted
|
||||
// already if it sees its own previous id in the ops it sees when it does catchup.
|
||||
if (
|
||||
oldOp.meta.source &&
|
||||
opData.dupIfSource &&
|
||||
Array.from(opData.dupIfSource).includes(oldOp.meta.source)
|
||||
) {
|
||||
return callback('Op already submitted')
|
||||
}
|
||||
|
||||
opData.op = doc.type.transform(opData.op, oldOp.op, 'left')
|
||||
opData.v++
|
||||
}
|
||||
} catch (error1) {
|
||||
error = error1
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
snapshot = doc.type.apply(doc.snapshot, opData.op)
|
||||
} catch (error2) {
|
||||
error = error2
|
||||
console.error(error.stack)
|
||||
return callback(error.message)
|
||||
}
|
||||
|
||||
// The op data should be at the current version, and the new document data should be at
|
||||
// the next version.
|
||||
//
|
||||
// This should never happen in practice, but its a nice little check to make sure everything
|
||||
// is hunky-dory.
|
||||
if (opData.v !== doc.v) {
|
||||
// This should never happen.
|
||||
console.error(
|
||||
'Version mismatch detected in model. File a ticket - this is a bug.'
|
||||
)
|
||||
console.error(`Expecting ${opData.v} == ${doc.v}`)
|
||||
return callback('Internal error')
|
||||
}
|
||||
|
||||
// newDocData = {snapshot, type:type.name, v:opVersion + 1, meta:docData.meta}
|
||||
const writeOp =
|
||||
(db != null ? db.writeOp : undefined) ||
|
||||
((docName, newOpData, callback) => callback())
|
||||
|
||||
return writeOp(docName, opData, function (error) {
|
||||
if (error) {
|
||||
// The user should probably know about this.
|
||||
console.warn(`Error writing ops to database: ${error}`)
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'writeOp', o => o.writeOp())
|
||||
|
||||
// This is needed when we emit the 'change' event, below.
|
||||
const oldSnapshot = doc.snapshot
|
||||
|
||||
// All the heavy lifting is now done. Finally, we'll update the cache with the new data
|
||||
// and (maybe!) save a new document snapshot to the database.
|
||||
|
||||
doc.v = opData.v + 1
|
||||
doc.snapshot = snapshot
|
||||
|
||||
doc.ops.push(opData)
|
||||
if (db && doc.ops.length > options.numCachedOps) {
|
||||
doc.ops.shift()
|
||||
}
|
||||
|
||||
model.emit('applyOp', docName, opData, snapshot, oldSnapshot)
|
||||
doc.eventEmitter.emit('op', opData, snapshot, oldSnapshot)
|
||||
|
||||
// The callback is called with the version of the document at which the op was applied.
|
||||
// This is the op.v after transformation, and its doc.v - 1.
|
||||
callback(null, opData.v)
|
||||
|
||||
// I need a decent strategy here for deciding whether or not to save the snapshot.
|
||||
//
|
||||
// The 'right' strategy looks something like "Store the snapshot whenever the snapshot
|
||||
// is smaller than the accumulated op data". For now, I'll just store it every 20
|
||||
// ops or something. (Configurable with doc.committedVersion)
|
||||
if (
|
||||
!doc.snapshotWriteLock &&
|
||||
doc.committedVersion + options.opsBeforeCommit <= doc.v
|
||||
) {
|
||||
return tryWriteSnapshot(docName, function (error) {
|
||||
if (error) {
|
||||
return console.warn(
|
||||
`Error writing snapshot ${error}. This is nonfatal`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Add the data for the given docName to the cache. The named document shouldn't already
|
||||
// exist in the doc set.
|
||||
//
|
||||
// Returns the new doc.
|
||||
const add = function (docName, error, data, committedVersion, ops, dbMeta) {
|
||||
let callback, doc
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
delete awaitingGetSnapshot[docName]
|
||||
|
||||
if (error) {
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(error)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
doc = docs[docName] = {
|
||||
snapshot: data.snapshot,
|
||||
v: data.v,
|
||||
type: data.type,
|
||||
meta: data.meta,
|
||||
|
||||
// Cache of ops
|
||||
ops: ops || [],
|
||||
|
||||
eventEmitter: new EventEmitter(),
|
||||
|
||||
// Timer before the document will be invalidated from the cache (if the document has no
|
||||
// listeners)
|
||||
reapTimer: null,
|
||||
|
||||
// Version of the snapshot thats in the database
|
||||
committedVersion: committedVersion != null ? committedVersion : data.v,
|
||||
snapshotWriteLock: false,
|
||||
dbMeta,
|
||||
}
|
||||
|
||||
doc.opQueue = makeOpQueue(docName, doc)
|
||||
|
||||
refreshReapingTimeout(docName)
|
||||
model.emit('add', docName, data)
|
||||
if (callbacks) {
|
||||
for (callback of Array.from(callbacks)) {
|
||||
callback(null, doc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
// This is a little helper wrapper around db.getOps. It does two things:
|
||||
//
|
||||
// - If there's no database set, it returns an error to the callback
|
||||
// - It adds version numbers to each op returned from the database
|
||||
// (These can be inferred from context so the DB doesn't store them, but its useful to have them).
|
||||
const getOpsInternal = function (docName, start, end, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document does not exist')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return db.getOps(docName, start, end, function (error, ops) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
let v = start
|
||||
for (const op of Array.from(ops)) {
|
||||
op.v = v++
|
||||
}
|
||||
|
||||
return typeof callback === 'function' ? callback(null, ops) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// Load the named document into the cache. This function is re-entrant.
|
||||
//
|
||||
// The callback is called with (error, doc)
|
||||
const load = function (docName, callback) {
|
||||
if (docs[docName]) {
|
||||
// The document is already loaded. Return immediately.
|
||||
__guardMethod__(options.stats, 'cacheHit', o => o.cacheHit('getSnapshot'))
|
||||
return callback(null, docs[docName])
|
||||
}
|
||||
|
||||
// We're a memory store. If we don't have it, nobody does.
|
||||
if (!db) {
|
||||
return callback('Document does not exist')
|
||||
}
|
||||
|
||||
const callbacks = awaitingGetSnapshot[docName]
|
||||
|
||||
// The document is being loaded already. Add ourselves as a callback.
|
||||
if (callbacks) {
|
||||
return callbacks.push(callback)
|
||||
}
|
||||
|
||||
__guardMethod__(options.stats, 'cacheMiss', o1 =>
|
||||
o1.cacheMiss('getSnapshot')
|
||||
)
|
||||
|
||||
// The document isn't loaded and isn't being loaded. Load it.
|
||||
awaitingGetSnapshot[docName] = [callback]
|
||||
return db.getSnapshot(docName, function (error, data, dbMeta) {
|
||||
if (error) {
|
||||
return add(docName, error)
|
||||
}
|
||||
|
||||
const type = types[data.type]
|
||||
if (!type) {
|
||||
console.warn(`Type '${data.type}' missing`)
|
||||
return callback('Type not found')
|
||||
}
|
||||
data.type = type
|
||||
|
||||
const committedVersion = data.v
|
||||
|
||||
// The server can close without saving the most recent document snapshot.
|
||||
// In this case, there are extra ops which need to be applied before
|
||||
// returning the snapshot.
|
||||
return getOpsInternal(docName, data.v, null, function (error, ops) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
if (ops.length > 0) {
|
||||
console.log(`Catchup ${docName} ${data.v} -> ${data.v + ops.length}`)
|
||||
|
||||
try {
|
||||
for (const op of Array.from(ops)) {
|
||||
data.snapshot = type.apply(data.snapshot, op.op)
|
||||
data.v++
|
||||
}
|
||||
} catch (e) {
|
||||
// This should never happen - it indicates that whats in the
|
||||
// database is invalid.
|
||||
console.error(`Op data invalid for ${docName}: ${e.stack}`)
|
||||
return callback('Op data invalid')
|
||||
}
|
||||
}
|
||||
|
||||
model.emit('load', docName, data)
|
||||
return add(docName, error, data, committedVersion, ops, dbMeta)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// This makes sure the cache contains a document. If the doc cache doesn't contain
|
||||
// a document, it is loaded from the database and stored.
|
||||
//
|
||||
// Documents are stored so long as either:
|
||||
// - They have been accessed within the past #{PERIOD}
|
||||
// - At least one client has the document open
|
||||
var refreshReapingTimeout = function (docName) {
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
return
|
||||
}
|
||||
|
||||
// I want to let the clients list be updated before this is called.
|
||||
return process.nextTick(function () {
|
||||
// This is an awkward way to find out the number of clients on a document. If this
|
||||
// causes performance issues, add a numClients field to the document.
|
||||
//
|
||||
// The first check is because its possible that between refreshReapingTimeout being called and this
|
||||
// event being fired, someone called delete() on the document and hence the doc is something else now.
|
||||
if (
|
||||
doc === docs[docName] &&
|
||||
doc.eventEmitter.listeners('op').length === 0 &&
|
||||
(db || options.forceReaping) &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
let reapTimer
|
||||
clearTimeout(doc.reapTimer)
|
||||
return (doc.reapTimer = reapTimer =
|
||||
setTimeout(
|
||||
() =>
|
||||
tryWriteSnapshot(docName, function () {
|
||||
// If the reaping timeout has been refreshed while we're writing the snapshot, or if we're
|
||||
// in the middle of applying an operation, don't reap.
|
||||
if (
|
||||
docs[docName].reapTimer === reapTimer &&
|
||||
doc.opQueue.busy === false
|
||||
) {
|
||||
return delete docs[docName]
|
||||
}
|
||||
}),
|
||||
options.reapTime
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var tryWriteSnapshot = function (docName, callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
const doc = docs[docName]
|
||||
|
||||
// The doc is closed
|
||||
if (!doc) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
// The document is already saved.
|
||||
if (doc.committedVersion === doc.v) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (doc.snapshotWriteLock) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Another snapshot write is in progress')
|
||||
: undefined
|
||||
}
|
||||
|
||||
doc.snapshotWriteLock = true
|
||||
|
||||
__guardMethod__(options.stats, 'writeSnapshot', o => o.writeSnapshot())
|
||||
|
||||
const writeSnapshot =
|
||||
(db != null ? db.writeSnapshot : undefined) ||
|
||||
((docName, docData, dbMeta, callback) => callback())
|
||||
|
||||
const data = {
|
||||
v: doc.v,
|
||||
meta: doc.meta,
|
||||
snapshot: doc.snapshot,
|
||||
// The database doesn't know about object types.
|
||||
type: doc.type.name,
|
||||
}
|
||||
|
||||
// Commit snapshot.
|
||||
return writeSnapshot(docName, data, doc.dbMeta, function (error, dbMeta) {
|
||||
doc.snapshotWriteLock = false
|
||||
|
||||
// We have to use data.v here because the version in the doc could
|
||||
// have been updated between the call to writeSnapshot() and now.
|
||||
doc.committedVersion = data.v
|
||||
doc.dbMeta = dbMeta
|
||||
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
})
|
||||
}
|
||||
|
||||
// *** Model interface methods
|
||||
|
||||
// Create a new document.
|
||||
//
|
||||
// data should be {snapshot, type, [meta]}. The version of a new document is 0.
|
||||
this.create = function (docName, type, meta, callback) {
|
||||
if (typeof meta === 'function') {
|
||||
;[meta, callback] = Array.from([{}, meta])
|
||||
}
|
||||
|
||||
if (docName.match(/\//)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Invalid document name')
|
||||
: undefined
|
||||
}
|
||||
if (docs[docName]) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Document already exists')
|
||||
: undefined
|
||||
}
|
||||
|
||||
if (typeof type === 'string') {
|
||||
type = types[type]
|
||||
}
|
||||
if (!type) {
|
||||
return typeof callback === 'function'
|
||||
? callback('Type not found')
|
||||
: undefined
|
||||
}
|
||||
|
||||
const data = {
|
||||
snapshot: type.create(),
|
||||
type: type.name,
|
||||
meta: meta || {},
|
||||
v: 0,
|
||||
}
|
||||
|
||||
const done = function (error, dbMeta) {
|
||||
// dbMeta can be used to cache extra state needed by the database to access the document, like an ID or something.
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
// From here on we'll store the object version of the type name.
|
||||
data.type = type
|
||||
add(docName, null, data, 0, [], dbMeta)
|
||||
model.emit('create', docName, data)
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.create(docName, data, done)
|
||||
} else {
|
||||
return done()
|
||||
}
|
||||
}
|
||||
|
||||
// Perminantly deletes the specified document.
|
||||
// If listeners are attached, they are removed.
|
||||
//
|
||||
// The callback is called with (error) if there was an error. If error is null / undefined, the
|
||||
// document was deleted.
|
||||
//
|
||||
// WARNING: This isn't well supported throughout the code. (Eg, streaming clients aren't told about the
|
||||
// deletion. Subsequent op submissions will fail).
|
||||
this.delete = function (docName, callback) {
|
||||
const doc = docs[docName]
|
||||
|
||||
if (doc) {
|
||||
clearTimeout(doc.reapTimer)
|
||||
delete docs[docName]
|
||||
}
|
||||
|
||||
const done = function (error) {
|
||||
if (!error) {
|
||||
model.emit('delete', docName)
|
||||
}
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
if (db) {
|
||||
return db.delete(docName, doc != null ? doc.dbMeta : undefined, done)
|
||||
} else {
|
||||
return done(!doc ? 'Document does not exist' : undefined)
|
||||
}
|
||||
}
|
||||
|
||||
// This gets all operations from [start...end]. (That is, its not inclusive.)
|
||||
//
|
||||
// end can be null. This means 'get me all ops from start'.
|
||||
//
|
||||
// Each op returned is in the form {op:o, meta:m, v:version}.
|
||||
//
|
||||
// Callback is called with (error, [ops])
|
||||
//
|
||||
// If the document does not exist, getOps doesn't necessarily return an error. This is because
|
||||
// its awkward to figure out whether or not the document exists for things
|
||||
// like the redis database backend. I guess its a bit gross having this inconsistant
|
||||
// with the other DB calls, but its certainly convenient.
|
||||
//
|
||||
// Use getVersion() to determine if a document actually exists, if thats what you're
|
||||
// after.
|
||||
this.getOps = getOps = function (docName, start, end, callback) {
|
||||
// getOps will only use the op cache if its there. It won't fill the op cache in.
|
||||
if (!(start >= 0)) {
|
||||
throw new Error('start must be 0+')
|
||||
}
|
||||
|
||||
if (typeof end === 'function') {
|
||||
;[end, callback] = Array.from([null, end])
|
||||
}
|
||||
|
||||
const ops = docs[docName] != null ? docs[docName].ops : undefined
|
||||
|
||||
if (ops) {
|
||||
const version = docs[docName].v
|
||||
|
||||
// Ops contains an array of ops. The last op in the list is the last op applied
|
||||
if (end == null) {
|
||||
end = version
|
||||
}
|
||||
start = Math.min(start, end)
|
||||
|
||||
if (start === end) {
|
||||
return callback(null, [])
|
||||
}
|
||||
|
||||
// Base is the version number of the oldest op we have cached
|
||||
const base = version - ops.length
|
||||
|
||||
// If the database is null, we'll trim to the ops we do have and hope thats enough.
|
||||
if (start >= base || db === null) {
|
||||
refreshReapingTimeout(docName)
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheHit('getOps')
|
||||
}
|
||||
|
||||
return callback(null, ops.slice(start - base, end - base))
|
||||
}
|
||||
}
|
||||
|
||||
if (options.stats != null) {
|
||||
options.stats.cacheMiss('getOps')
|
||||
}
|
||||
|
||||
return getOpsInternal(docName, start, end, callback)
|
||||
}
|
||||
|
||||
// Gets the snapshot data for the specified document.
|
||||
// getSnapshot(docName, callback)
|
||||
// Callback is called with (error, {v: <version>, type: <type>, snapshot: <snapshot>, meta: <meta>})
|
||||
this.getSnapshot = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(
|
||||
error,
|
||||
doc
|
||||
? { v: doc.v, type: doc.type, snapshot: doc.snapshot, meta: doc.meta }
|
||||
: undefined
|
||||
)
|
||||
)
|
||||
|
||||
// Gets the latest version # of the document.
|
||||
// getVersion(docName, callback)
|
||||
// callback is called with (error, version).
|
||||
this.getVersion = (docName, callback) =>
|
||||
load(docName, (error, doc) =>
|
||||
callback(error, doc != null ? doc.v : undefined)
|
||||
)
|
||||
|
||||
// Apply an op to the specified document.
|
||||
// The callback is passed (error, applied version #)
|
||||
// opData = {op:op, v:v, meta:metadata}
|
||||
//
|
||||
// Ops are queued before being applied so that the following code applies op C before op B:
|
||||
// model.applyOp 'doc', OPA, -> model.applyOp 'doc', OPB
|
||||
// model.applyOp 'doc', OPC
|
||||
this.applyOp = (
|
||||
docName,
|
||||
opData,
|
||||
callback // All the logic for this is in makeOpQueue, above.
|
||||
) =>
|
||||
load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
return process.nextTick(() =>
|
||||
doc.opQueue(opData, function (error, newVersion) {
|
||||
refreshReapingTimeout(docName)
|
||||
return typeof callback === 'function'
|
||||
? callback(error, newVersion)
|
||||
: undefined
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
// TODO: store (some) metadata in DB
|
||||
// TODO: op and meta should be combineable in the op that gets sent
|
||||
this.applyMetaOp = function (docName, metaOpData, callback) {
|
||||
const { path, value } = metaOpData.meta
|
||||
|
||||
if (!isArray(path)) {
|
||||
return typeof callback === 'function'
|
||||
? callback('path should be an array')
|
||||
: undefined
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error != null) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
} else {
|
||||
let applied = false
|
||||
switch (path[0]) {
|
||||
case 'shout':
|
||||
doc.eventEmitter.emit('op', metaOpData)
|
||||
applied = true
|
||||
break
|
||||
}
|
||||
|
||||
if (applied) {
|
||||
model.emit('applyMetaOp', docName, path, value)
|
||||
}
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Listen to all ops from the specified version. If version is in the past, all
|
||||
// ops since that version are sent immediately to the listener.
|
||||
//
|
||||
// The callback is called once the listener is attached, but before any ops have been passed
|
||||
// to the listener.
|
||||
//
|
||||
// This will _not_ edit the document metadata.
|
||||
//
|
||||
// If there are any listeners, we don't purge the document from the cache. But be aware, this behaviour
|
||||
// might change in a future version.
|
||||
//
|
||||
// version is the document version at which the document is opened. It can be left out if you want to open
|
||||
// the document at the most recent version.
|
||||
//
|
||||
// listener is called with (opData) each time an op is applied.
|
||||
//
|
||||
// callback(error, openedVersion)
|
||||
this.listen = function (docName, version, listener, callback) {
|
||||
if (typeof version === 'function') {
|
||||
;[version, listener, callback] = Array.from([null, version, listener])
|
||||
}
|
||||
|
||||
return load(docName, function (error, doc) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
clearTimeout(doc.reapTimer)
|
||||
|
||||
if (version != null) {
|
||||
return getOps(docName, version, null, function (error, data) {
|
||||
if (error) {
|
||||
return typeof callback === 'function' ? callback(error) : undefined
|
||||
}
|
||||
|
||||
doc.eventEmitter.on('op', listener)
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, version)
|
||||
}
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const op of Array.from(data)) {
|
||||
var needle
|
||||
listener(op)
|
||||
|
||||
// The listener may well remove itself during the catchup phase. If this happens, break early.
|
||||
// This is done in a quite inefficient way. (O(n) where n = #listeners on doc)
|
||||
if (
|
||||
((needle = listener),
|
||||
!Array.from(doc.eventEmitter.listeners('op')).includes(needle))
|
||||
) {
|
||||
break
|
||||
} else {
|
||||
result.push(undefined)
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
} else {
|
||||
// Version is null / undefined. Just add the listener.
|
||||
doc.eventEmitter.on('op', listener)
|
||||
return typeof callback === 'function'
|
||||
? callback(null, doc.v)
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Remove a listener for a particular document.
|
||||
//
|
||||
// removeListener(docName, listener)
|
||||
//
|
||||
// This is synchronous.
|
||||
this.removeListener = function (docName, listener) {
|
||||
// The document should already be loaded.
|
||||
const doc = docs[docName]
|
||||
if (!doc) {
|
||||
throw new Error('removeListener called but document not loaded')
|
||||
}
|
||||
|
||||
doc.eventEmitter.removeListener('op', listener)
|
||||
return refreshReapingTimeout(docName)
|
||||
}
|
||||
|
||||
// Flush saves all snapshot data to the database. I'm not sure whether or not this is actually needed -
|
||||
// sharejs will happily replay uncommitted ops when documents are re-opened anyway.
|
||||
this.flush = function (callback) {
|
||||
if (!db) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
|
||||
let pendingWrites = 0
|
||||
|
||||
for (const docName in docs) {
|
||||
const doc = docs[docName]
|
||||
if (doc.committedVersion < doc.v) {
|
||||
pendingWrites++
|
||||
// I'm hoping writeSnapshot will always happen in another thread.
|
||||
tryWriteSnapshot(docName, () =>
|
||||
process.nextTick(function () {
|
||||
pendingWrites--
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// If nothing was queued, terminate immediately.
|
||||
if (pendingWrites === 0) {
|
||||
return typeof callback === 'function' ? callback() : undefined
|
||||
}
|
||||
}
|
||||
|
||||
// Close the database connection. This is needed so nodejs can shut down cleanly.
|
||||
this.closeDb = function () {
|
||||
__guardMethod__(db, 'close', o => o.close())
|
||||
return (db = null)
|
||||
}
|
||||
}
|
||||
|
||||
// Model inherits from EventEmitter.
|
||||
Model.prototype = new EventEmitter()
|
||||
|
||||
function __guardMethod__(obj, methodName, transform) {
|
||||
if (
|
||||
typeof obj !== 'undefined' &&
|
||||
obj !== null &&
|
||||
typeof obj[methodName] === 'function'
|
||||
) {
|
||||
return transform(obj, methodName)
|
||||
} else {
|
||||
return undefined
|
||||
}
|
||||
}
|
54
services/document-updater/app/js/sharejs/types/simple.js
Normal file
54
services/document-updater/app/js/sharejs/types/simple.js
Normal file
|
@ -0,0 +1,54 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// This is a really simple OT type. Its not compiled with the web client, but it could be.
|
||||
//
|
||||
// Its mostly included for demonstration purposes and its used in a lot of unit tests.
|
||||
//
|
||||
// This defines a really simple text OT type which only allows inserts. (No deletes).
|
||||
//
|
||||
// Ops look like:
|
||||
// {position:#, text:"asdf"}
|
||||
//
|
||||
// Document snapshots look like:
|
||||
// {str:string}
|
||||
|
||||
module.exports = {
|
||||
// The name of the OT type. The type is stored in types[type.name]. The name can be
|
||||
// used in place of the actual type in all the API methods.
|
||||
name: 'simple',
|
||||
|
||||
// Create a new document snapshot
|
||||
create() {
|
||||
return { str: '' }
|
||||
},
|
||||
|
||||
// Apply the given op to the document snapshot. Returns the new snapshot.
|
||||
//
|
||||
// The original snapshot should not be modified.
|
||||
apply(snapshot, op) {
|
||||
if (!(op.position >= 0 && op.position <= snapshot.str.length)) {
|
||||
throw new Error('Invalid position')
|
||||
}
|
||||
|
||||
let { str } = snapshot
|
||||
str = str.slice(0, op.position) + op.text + str.slice(op.position)
|
||||
return { str }
|
||||
},
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// sym describes the symmetry of the op. Its 'left' or 'right' depending on whether the
|
||||
// op being transformed comes from the client or the server.
|
||||
transform(op1, op2, sym) {
|
||||
let pos = op1.position
|
||||
if (op2.position < pos || (op2.position === pos && sym === 'left')) {
|
||||
pos += op2.text.length
|
||||
}
|
||||
|
||||
return { position: pos, text: op1.text }
|
||||
},
|
||||
}
|
60
services/document-updater/app/js/sharejs/types/syncqueue.js
Normal file
60
services/document-updater/app/js/sharejs/types/syncqueue.js
Normal file
|
@ -0,0 +1,60 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A synchronous processing queue. The queue calls process on the arguments,
|
||||
// ensuring that process() is only executing once at a time.
|
||||
//
|
||||
// process(data, callback) _MUST_ eventually call its callback.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// queue = require 'syncqueue'
|
||||
//
|
||||
// fn = queue (data, callback) ->
|
||||
// asyncthing data, ->
|
||||
// callback(321)
|
||||
//
|
||||
// fn(1)
|
||||
// fn(2)
|
||||
// fn(3, (result) -> console.log(result))
|
||||
//
|
||||
// ^--- async thing will only be running once at any time.
|
||||
|
||||
module.exports = function (process) {
|
||||
if (typeof process !== 'function') {
|
||||
throw new Error('process is not a function')
|
||||
}
|
||||
const queue = []
|
||||
|
||||
const enqueue = function (data, callback) {
|
||||
queue.push([data, callback])
|
||||
return flush()
|
||||
}
|
||||
|
||||
enqueue.busy = false
|
||||
|
||||
var flush = function () {
|
||||
if (enqueue.busy || queue.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
enqueue.busy = true
|
||||
const [data, callback] = Array.from(queue.shift())
|
||||
return process(data, function (...result) {
|
||||
// TODO: Make this not use varargs - varargs are really slow.
|
||||
enqueue.busy = false
|
||||
// This is called after busy = false so a user can check if enqueue.busy is set in the callback.
|
||||
if (callback) {
|
||||
callback.apply(null, result)
|
||||
}
|
||||
return flush()
|
||||
})
|
||||
}
|
||||
|
||||
return enqueue
|
||||
}
|
52
services/document-updater/app/js/sharejs/types/text-api.js
Normal file
52
services/document-updater/app/js/sharejs/types/text-api.js
Normal file
|
@ -0,0 +1,52 @@
|
|||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Sanity-check the conversion and remove this comment.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text
|
||||
|
||||
let text
|
||||
if (typeof WEB === 'undefined') {
|
||||
text = require('./text')
|
||||
}
|
||||
|
||||
text.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.length
|
||||
},
|
||||
|
||||
// Get the text contents of a document
|
||||
getText() {
|
||||
return this.snapshot
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
const op = [{ p: pos, i: text }]
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = [{ p: pos, d: this.snapshot.slice(pos, pos + length) }]
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
return this.on('remoteop', function (op) {
|
||||
return Array.from(op).map(component =>
|
||||
component.i !== undefined
|
||||
? this.emit('insert', component.p, component.i)
|
||||
: this.emit('delete', component.p, component.d)
|
||||
)
|
||||
})
|
||||
},
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/* eslint-disable
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text
|
||||
|
||||
let type
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
type = exports.types['text-composable']
|
||||
} else {
|
||||
type = require('./text-composable')
|
||||
}
|
||||
|
||||
type.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.length
|
||||
},
|
||||
|
||||
// Get the text contents of a document
|
||||
getText() {
|
||||
return this.snapshot
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
const op = type.normalize([pos, { i: text }, this.snapshot.length - pos])
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = type.normalize([
|
||||
pos,
|
||||
{ d: this.snapshot.slice(pos, pos + length) },
|
||||
this.snapshot.length - pos - length,
|
||||
])
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
return this.on('remoteop', function (op) {
|
||||
let pos = 0
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const component of Array.from(op)) {
|
||||
if (typeof component === 'number') {
|
||||
result.push((pos += component))
|
||||
} else if (component.i !== undefined) {
|
||||
this.emit('insert', pos, component.i)
|
||||
result.push((pos += component.i.length))
|
||||
} else {
|
||||
// delete
|
||||
result.push(this.emit('delete', pos, component.d))
|
||||
}
|
||||
}
|
||||
return result
|
||||
})()
|
||||
})
|
||||
},
|
||||
}
|
||||
// We don't increment pos, because the position
|
||||
// specified is after the delete has happened.
|
|
@ -0,0 +1,398 @@
|
|||
/* eslint-disable
|
||||
no-cond-assign,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// An alternate composable implementation for text. This is much closer
|
||||
// to the implementation used by google wave.
|
||||
//
|
||||
// Ops are lists of components which iterate over the whole document.
|
||||
// Components are either:
|
||||
// A number N: Skip N characters in the original document
|
||||
// {i:'str'}: Insert 'str' at the current position in the document
|
||||
// {d:'str'}: Delete 'str', which appears at the current position in the document
|
||||
//
|
||||
// Eg: [3, {i:'hi'}, 5, {d:'internet'}]
|
||||
//
|
||||
// Snapshots are strings.
|
||||
|
||||
let makeAppend
|
||||
const p = function () {} // require('util').debug
|
||||
const i = function () {} // require('util').inspect
|
||||
|
||||
const moduleExport =
|
||||
typeof WEB !== 'undefined' && WEB !== null ? {} : module.exports
|
||||
|
||||
moduleExport.name = 'text-composable'
|
||||
|
||||
moduleExport.create = () => ''
|
||||
|
||||
// -------- Utility methods
|
||||
|
||||
const checkOp = function (op) {
|
||||
if (!Array.isArray(op)) {
|
||||
throw new Error('Op must be an array of components')
|
||||
}
|
||||
let last = null
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
if (typeof c === 'object') {
|
||||
if (
|
||||
(c.i == null || !(c.i.length > 0)) &&
|
||||
(c.d == null || !(c.d.length > 0))
|
||||
) {
|
||||
throw new Error(`Invalid op component: ${i(c)}`)
|
||||
}
|
||||
} else {
|
||||
if (typeof c !== 'number') {
|
||||
throw new Error('Op components must be objects or numbers')
|
||||
}
|
||||
if (!(c > 0)) {
|
||||
throw new Error('Skip components must be a positive number')
|
||||
}
|
||||
if (typeof last === 'number') {
|
||||
throw new Error('Adjacent skip components should be added')
|
||||
}
|
||||
}
|
||||
|
||||
result.push((last = c))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
// Makes a function for appending components to a given op.
|
||||
// Exported for the randomOpGenerator.
|
||||
moduleExport._makeAppend = makeAppend = op =>
|
||||
function (component) {
|
||||
if (component === 0 || component.i === '' || component.d === '') {
|
||||
} else if (op.length === 0) {
|
||||
return op.push(component)
|
||||
} else if (
|
||||
typeof component === 'number' &&
|
||||
typeof op[op.length - 1] === 'number'
|
||||
) {
|
||||
return (op[op.length - 1] += component)
|
||||
} else if (component.i != null && op[op.length - 1].i != null) {
|
||||
return (op[op.length - 1].i += component.i)
|
||||
} else if (component.d != null && op[op.length - 1].d != null) {
|
||||
return (op[op.length - 1].d += component.d)
|
||||
} else {
|
||||
return op.push(component)
|
||||
}
|
||||
}
|
||||
|
||||
// checkOp op
|
||||
|
||||
// Makes 2 functions for taking components from the start of an op, and for peeking
|
||||
// at the next op that could be taken.
|
||||
const makeTake = function (op) {
|
||||
// The index of the next component to take
|
||||
let idx = 0
|
||||
// The offset into the component
|
||||
let offset = 0
|
||||
|
||||
// Take up to length n from the front of op. If n is null, take the next
|
||||
// op component. If indivisableField == 'd', delete components won't be separated.
|
||||
// If indivisableField == 'i', insert components won't be separated.
|
||||
const take = function (n, indivisableField) {
|
||||
let c
|
||||
if (idx === op.length) {
|
||||
return null
|
||||
}
|
||||
// assert.notStrictEqual op.length, i, 'The op is too short to traverse the document'
|
||||
|
||||
if (typeof op[idx] === 'number') {
|
||||
if (n == null || op[idx] - offset <= n) {
|
||||
c = op[idx] - offset
|
||||
++idx
|
||||
offset = 0
|
||||
return c
|
||||
} else {
|
||||
offset += n
|
||||
return n
|
||||
}
|
||||
} else {
|
||||
// Take from the string
|
||||
const field = op[idx].i ? 'i' : 'd'
|
||||
c = {}
|
||||
if (
|
||||
n == null ||
|
||||
op[idx][field].length - offset <= n ||
|
||||
field === indivisableField
|
||||
) {
|
||||
c[field] = op[idx][field].slice(offset)
|
||||
++idx
|
||||
offset = 0
|
||||
} else {
|
||||
c[field] = op[idx][field].slice(offset, offset + n)
|
||||
offset += n
|
||||
}
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
const peekType = () => op[idx]
|
||||
|
||||
return [take, peekType]
|
||||
}
|
||||
|
||||
// Find and return the length of an op component
|
||||
const componentLength = function (component) {
|
||||
if (typeof component === 'number') {
|
||||
return component
|
||||
} else if (component.i != null) {
|
||||
return component.i.length
|
||||
} else {
|
||||
return component.d.length
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||
// adjacent inserts and deletes.
|
||||
moduleExport.normalize = function (op) {
|
||||
const newOp = []
|
||||
const append = makeAppend(newOp)
|
||||
for (const component of Array.from(op)) {
|
||||
append(component)
|
||||
}
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Apply the op to the string. Returns the new string.
|
||||
moduleExport.apply = function (str, op) {
|
||||
p(`Applying ${i(op)} to '${str}'`)
|
||||
if (typeof str !== 'string') {
|
||||
throw new Error('Snapshot should be a string')
|
||||
}
|
||||
checkOp(op)
|
||||
|
||||
const pos = 0
|
||||
const newDoc = []
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
if (typeof component === 'number') {
|
||||
if (component > str.length) {
|
||||
throw new Error('The op is too long for this document')
|
||||
}
|
||||
newDoc.push(str.slice(0, component))
|
||||
str = str.slice(component)
|
||||
} else if (component.i != null) {
|
||||
newDoc.push(component.i)
|
||||
} else {
|
||||
if (component.d !== str.slice(0, component.d.length)) {
|
||||
throw new Error(
|
||||
`The deleted text '${
|
||||
component.d
|
||||
}' doesn't match the next characters in the document '${str.slice(
|
||||
0,
|
||||
component.d.length
|
||||
)}'`
|
||||
)
|
||||
}
|
||||
str = str.slice(component.d.length)
|
||||
}
|
||||
}
|
||||
|
||||
if (str !== '') {
|
||||
throw new Error("The applied op doesn't traverse the entire document")
|
||||
}
|
||||
|
||||
return newDoc.join('')
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// op1 and op2 are unchanged by transform.
|
||||
moduleExport.transform = function (op, otherOp, side) {
|
||||
if (side !== 'left' && side !== 'right') {
|
||||
throw new Error(`side (${side} must be 'left' or 'right'`)
|
||||
}
|
||||
|
||||
checkOp(op)
|
||||
checkOp(otherOp)
|
||||
const newOp = []
|
||||
|
||||
const append = makeAppend(newOp)
|
||||
const [take, peek] = Array.from(makeTake(op))
|
||||
|
||||
for (component of Array.from(otherOp)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'i')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(chunk)
|
||||
if (typeof chunk !== 'object' || chunk.i == null) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
} else if (component.i != null) {
|
||||
// Insert
|
||||
if (side === 'left') {
|
||||
// The left insert should go first.
|
||||
const o = peek()
|
||||
if (o != null ? o.i : undefined) {
|
||||
append(take())
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, skip the inserted text.
|
||||
append(component.i.length)
|
||||
} else {
|
||||
// Delete.
|
||||
// assert.ok component.d
|
||||
;({ length } = component.d)
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'i')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof chunk === 'number') {
|
||||
length -= chunk
|
||||
} else if (chunk.i != null) {
|
||||
append(chunk)
|
||||
} else {
|
||||
// assert.ok chunk.d
|
||||
// The delete is unnecessary now.
|
||||
length -= chunk.d.length
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if ((component != null ? component.i : undefined) == null) {
|
||||
throw new Error(`Remaining fragments in the op: ${i(component)}`)
|
||||
}
|
||||
append(component)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Compose 2 ops into 1 op.
|
||||
moduleExport.compose = function (op1, op2) {
|
||||
p(`COMPOSE ${i(op1)} + ${i(op2)}`)
|
||||
checkOp(op1)
|
||||
checkOp(op2)
|
||||
|
||||
const result = []
|
||||
|
||||
const append = makeAppend(result)
|
||||
const [take, _] = Array.from(makeTake(op1))
|
||||
|
||||
for (component of Array.from(op2)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length, 'd')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(chunk)
|
||||
if (typeof chunk !== 'object' || chunk.d == null) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
} else if (component.i != null) {
|
||||
// Insert
|
||||
append({ i: component.i })
|
||||
} else {
|
||||
// Delete
|
||||
let offset = 0
|
||||
while (offset < component.d.length) {
|
||||
chunk = take(component.d.length - offset, 'd')
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
// If its delete, append it. If its skip, drop it and decrease length. If its insert, check the strings match, drop it and decrease length.
|
||||
if (typeof chunk === 'number') {
|
||||
append({ d: component.d.slice(offset, offset + chunk) })
|
||||
offset += chunk
|
||||
} else if (chunk.i != null) {
|
||||
if (component.d.slice(offset, offset + chunk.i.length) !== chunk.i) {
|
||||
throw new Error("The deleted text doesn't match the inserted text")
|
||||
}
|
||||
offset += chunk.i.length
|
||||
// The ops cancel each other out.
|
||||
} else {
|
||||
// Delete
|
||||
append(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if ((component != null ? component.d : undefined) == null) {
|
||||
throw new Error(`Trailing stuff in op1 ${i(component)}`)
|
||||
}
|
||||
append(component)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
const invertComponent = function (c) {
|
||||
if (typeof c === 'number') {
|
||||
return c
|
||||
} else if (c.i != null) {
|
||||
return { d: c.i }
|
||||
} else {
|
||||
return { i: c.d }
|
||||
}
|
||||
}
|
||||
|
||||
// Invert an op
|
||||
moduleExport.invert = function (op) {
|
||||
const result = []
|
||||
const append = makeAppend(result)
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
append(invertComponent(component))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
if (typeof window !== 'undefined' && window !== null) {
|
||||
if (!window.ot) {
|
||||
window.ot = {}
|
||||
}
|
||||
if (!window.ot.types) {
|
||||
window.ot.types = {}
|
||||
}
|
||||
window.ot.types.text = moduleExport
|
||||
}
|
133
services/document-updater/app/js/sharejs/types/text-tp2-api.js
Normal file
133
services/document-updater/app/js/sharejs/types/text-tp2-api.js
Normal file
|
@ -0,0 +1,133 @@
|
|||
/* eslint-disable
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// Text document API for text-tp2
|
||||
|
||||
let type
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
type = exports.types['text-tp2']
|
||||
} else {
|
||||
type = require('./text-tp2')
|
||||
}
|
||||
|
||||
const { _takeDoc: takeDoc, _append: append } = type
|
||||
|
||||
const appendSkipChars = (op, doc, pos, maxlength) =>
|
||||
(() => {
|
||||
const result = []
|
||||
while (
|
||||
(maxlength === undefined || maxlength > 0) &&
|
||||
pos.index < doc.data.length
|
||||
) {
|
||||
const part = takeDoc(doc, pos, maxlength, true)
|
||||
if (maxlength !== undefined && typeof part === 'string') {
|
||||
maxlength -= part.length
|
||||
}
|
||||
result.push(append(op, part.length || part))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
|
||||
type.api = {
|
||||
provides: { text: true },
|
||||
|
||||
// The number of characters in the string
|
||||
getLength() {
|
||||
return this.snapshot.charLength
|
||||
},
|
||||
|
||||
// Flatten a document into a string
|
||||
getText() {
|
||||
const strings = Array.from(this.snapshot.data).filter(
|
||||
elem => typeof elem === 'string'
|
||||
)
|
||||
return strings.join('')
|
||||
},
|
||||
|
||||
insert(pos, text, callback) {
|
||||
if (pos === undefined) {
|
||||
pos = 0
|
||||
}
|
||||
|
||||
const op = []
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos, pos)
|
||||
append(op, { i: text })
|
||||
appendSkipChars(op, this.snapshot, docPos)
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
del(pos, length, callback) {
|
||||
const op = []
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos, pos)
|
||||
|
||||
while (length > 0) {
|
||||
const part = takeDoc(this.snapshot, docPos, length, true)
|
||||
if (typeof part === 'string') {
|
||||
append(op, { d: part.length })
|
||||
length -= part.length
|
||||
} else {
|
||||
append(op, part)
|
||||
}
|
||||
}
|
||||
|
||||
appendSkipChars(op, this.snapshot, docPos)
|
||||
|
||||
this.submitOp(op, callback)
|
||||
return op
|
||||
},
|
||||
|
||||
_register() {
|
||||
// Interpret recieved ops + generate more detailed events for them
|
||||
return this.on('remoteop', function (op, snapshot) {
|
||||
let textPos = 0
|
||||
const docPos = { index: 0, offset: 0 }
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
var part, remainder
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
remainder = component
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(snapshot, docPos, remainder)
|
||||
if (typeof part === 'string') {
|
||||
textPos += part.length
|
||||
}
|
||||
remainder -= part.length || part
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
// Insert
|
||||
if (typeof component.i === 'string') {
|
||||
this.emit('insert', textPos, component.i)
|
||||
textPos += component.i.length
|
||||
}
|
||||
} else {
|
||||
// Delete
|
||||
remainder = component.d
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(snapshot, docPos, remainder)
|
||||
if (typeof part === 'string') {
|
||||
this.emit('delete', textPos, part)
|
||||
}
|
||||
remainder -= part.length || part
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
}
|
497
services/document-updater/app/js/sharejs/types/text-tp2.js
Normal file
497
services/document-updater/app/js/sharejs/types/text-tp2.js
Normal file
|
@ -0,0 +1,497 @@
|
|||
/* eslint-disable
|
||||
no-cond-assign,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS103: Rewrite code to no longer use __guard__
|
||||
* DS205: Consider reworking code to avoid use of IIFEs
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A TP2 implementation of text, following this spec:
|
||||
// http://code.google.com/p/lightwave/source/browse/trunk/experimental/ot/README
|
||||
//
|
||||
// A document is made up of a string and a set of tombstones inserted throughout
|
||||
// the string. For example, 'some ', (2 tombstones), 'string'.
|
||||
//
|
||||
// This is encoded in a document as: {s:'some string', t:[5, -2, 6]}
|
||||
//
|
||||
// Ops are lists of components which iterate over the whole document.
|
||||
// Components are either:
|
||||
// N: Skip N characters in the original document
|
||||
// {i:'str'}: Insert 'str' at the current position in the document
|
||||
// {i:N}: Insert N tombstones at the current position in the document
|
||||
// {d:N}: Delete (tombstone) N characters at the current position in the document
|
||||
//
|
||||
// Eg: [3, {i:'hi'}, 5, {d:8}]
|
||||
//
|
||||
// Snapshots are lists with characters and tombstones. Characters are stored in strings
|
||||
// and adjacent tombstones are flattened into numbers.
|
||||
//
|
||||
// Eg, the document: 'Hello .....world' ('.' denotes tombstoned (deleted) characters)
|
||||
// would be represented by a document snapshot of ['Hello ', 5, 'world']
|
||||
|
||||
let append, appendDoc, takeDoc
|
||||
var type = {
|
||||
name: 'text-tp2',
|
||||
tp2: true,
|
||||
create() {
|
||||
return { charLength: 0, totalLength: 0, positionCache: [], data: [] }
|
||||
},
|
||||
serialize(doc) {
|
||||
if (!doc.data) {
|
||||
throw new Error('invalid doc snapshot')
|
||||
}
|
||||
return doc.data
|
||||
},
|
||||
deserialize(data) {
|
||||
const doc = type.create()
|
||||
doc.data = data
|
||||
|
||||
for (const component of Array.from(data)) {
|
||||
if (typeof component === 'string') {
|
||||
doc.charLength += component.length
|
||||
doc.totalLength += component.length
|
||||
} else {
|
||||
doc.totalLength += component
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
},
|
||||
}
|
||||
|
||||
const checkOp = function (op) {
|
||||
if (!Array.isArray(op)) {
|
||||
throw new Error('Op must be an array of components')
|
||||
}
|
||||
let last = null
|
||||
return (() => {
|
||||
const result = []
|
||||
for (const c of Array.from(op)) {
|
||||
if (typeof c === 'object') {
|
||||
if (c.i !== undefined) {
|
||||
if (
|
||||
(typeof c.i !== 'string' || !(c.i.length > 0)) &&
|
||||
(typeof c.i !== 'number' || !(c.i > 0))
|
||||
) {
|
||||
throw new Error('Inserts must insert a string or a +ive number')
|
||||
}
|
||||
} else if (c.d !== undefined) {
|
||||
if (typeof c.d !== 'number' || !(c.d > 0)) {
|
||||
throw new Error('Deletes must be a +ive number')
|
||||
}
|
||||
} else {
|
||||
throw new Error('Operation component must define .i or .d')
|
||||
}
|
||||
} else {
|
||||
if (typeof c !== 'number') {
|
||||
throw new Error('Op components must be objects or numbers')
|
||||
}
|
||||
if (!(c > 0)) {
|
||||
throw new Error('Skip components must be a positive number')
|
||||
}
|
||||
if (typeof last === 'number') {
|
||||
throw new Error('Adjacent skip components should be combined')
|
||||
}
|
||||
}
|
||||
|
||||
result.push((last = c))
|
||||
}
|
||||
return result
|
||||
})()
|
||||
}
|
||||
|
||||
// Take the next part from the specified position in a document snapshot.
|
||||
// position = {index, offset}. It will be updated.
|
||||
type._takeDoc = takeDoc = function (
|
||||
doc,
|
||||
position,
|
||||
maxlength,
|
||||
tombsIndivisible
|
||||
) {
|
||||
if (position.index >= doc.data.length) {
|
||||
throw new Error('Operation goes past the end of the document')
|
||||
}
|
||||
|
||||
const part = doc.data[position.index]
|
||||
// peel off data[0]
|
||||
const result =
|
||||
typeof part === 'string'
|
||||
? maxlength !== undefined
|
||||
? part.slice(position.offset, position.offset + maxlength)
|
||||
: part.slice(position.offset)
|
||||
: maxlength === undefined || tombsIndivisible
|
||||
? part - position.offset
|
||||
: Math.min(maxlength, part - position.offset)
|
||||
|
||||
const resultLen = result.length || result
|
||||
|
||||
if ((part.length || part) - position.offset > resultLen) {
|
||||
position.offset += resultLen
|
||||
} else {
|
||||
position.index++
|
||||
position.offset = 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Append a part to the end of a document
|
||||
type._appendDoc = appendDoc = function (doc, p) {
|
||||
if (p === 0 || p === '') {
|
||||
return
|
||||
}
|
||||
|
||||
if (typeof p === 'string') {
|
||||
doc.charLength += p.length
|
||||
doc.totalLength += p.length
|
||||
} else {
|
||||
doc.totalLength += p
|
||||
}
|
||||
|
||||
const { data } = doc
|
||||
if (data.length === 0) {
|
||||
data.push(p)
|
||||
} else if (typeof data[data.length - 1] === typeof p) {
|
||||
data[data.length - 1] += p
|
||||
} else {
|
||||
data.push(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the op to the document. The document is not modified in the process.
|
||||
type.apply = function (doc, op) {
|
||||
if (
|
||||
doc.totalLength === undefined ||
|
||||
doc.charLength === undefined ||
|
||||
doc.data.length === undefined
|
||||
) {
|
||||
throw new Error('Snapshot is invalid')
|
||||
}
|
||||
|
||||
checkOp(op)
|
||||
|
||||
const newDoc = type.create()
|
||||
const position = { index: 0, offset: 0 }
|
||||
|
||||
for (const component of Array.from(op)) {
|
||||
var part, remainder
|
||||
if (typeof component === 'number') {
|
||||
remainder = component
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(doc, position, remainder)
|
||||
|
||||
appendDoc(newDoc, part)
|
||||
remainder -= part.length || part
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
appendDoc(newDoc, component.i)
|
||||
} else if (component.d !== undefined) {
|
||||
remainder = component.d
|
||||
while (remainder > 0) {
|
||||
part = takeDoc(doc, position, remainder)
|
||||
remainder -= part.length || part
|
||||
}
|
||||
appendDoc(newDoc, component.d)
|
||||
}
|
||||
}
|
||||
|
||||
return newDoc
|
||||
}
|
||||
|
||||
// Append an op component to the end of the specified op.
|
||||
// Exported for the randomOpGenerator.
|
||||
type._append = append = function (op, component) {
|
||||
if (
|
||||
component === 0 ||
|
||||
component.i === '' ||
|
||||
component.i === 0 ||
|
||||
component.d === 0
|
||||
) {
|
||||
} else if (op.length === 0) {
|
||||
return op.push(component)
|
||||
} else {
|
||||
const last = op[op.length - 1]
|
||||
if (typeof component === 'number' && typeof last === 'number') {
|
||||
return (op[op.length - 1] += component)
|
||||
} else if (
|
||||
component.i !== undefined &&
|
||||
last.i != null &&
|
||||
typeof last.i === typeof component.i
|
||||
) {
|
||||
return (last.i += component.i)
|
||||
} else if (component.d !== undefined && last.d != null) {
|
||||
return (last.d += component.d)
|
||||
} else {
|
||||
return op.push(component)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Makes 2 functions for taking components from the start of an op, and for peeking
|
||||
// at the next op that could be taken.
|
||||
const makeTake = function (op) {
|
||||
// The index of the next component to take
|
||||
let index = 0
|
||||
// The offset into the component
|
||||
let offset = 0
|
||||
|
||||
// Take up to length maxlength from the op. If maxlength is not defined, there is no max.
|
||||
// If insertsIndivisible is true, inserts (& insert tombstones) won't be separated.
|
||||
//
|
||||
// Returns null when op is fully consumed.
|
||||
const take = function (maxlength, insertsIndivisible) {
|
||||
let current
|
||||
if (index === op.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
const e = op[index]
|
||||
if (
|
||||
typeof (current = e) === 'number' ||
|
||||
typeof (current = e.i) === 'number' ||
|
||||
(current = e.d) !== undefined
|
||||
) {
|
||||
let c
|
||||
if (
|
||||
maxlength == null ||
|
||||
current - offset <= maxlength ||
|
||||
(insertsIndivisible && e.i !== undefined)
|
||||
) {
|
||||
// Return the rest of the current element.
|
||||
c = current - offset
|
||||
++index
|
||||
offset = 0
|
||||
} else {
|
||||
offset += maxlength
|
||||
c = maxlength
|
||||
}
|
||||
if (e.i !== undefined) {
|
||||
return { i: c }
|
||||
} else if (e.d !== undefined) {
|
||||
return { d: c }
|
||||
} else {
|
||||
return c
|
||||
}
|
||||
} else {
|
||||
// Take from the inserted string
|
||||
let result
|
||||
if (
|
||||
maxlength == null ||
|
||||
e.i.length - offset <= maxlength ||
|
||||
insertsIndivisible
|
||||
) {
|
||||
result = { i: e.i.slice(offset) }
|
||||
++index
|
||||
offset = 0
|
||||
} else {
|
||||
result = { i: e.i.slice(offset, offset + maxlength) }
|
||||
offset += maxlength
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
const peekType = () => op[index]
|
||||
|
||||
return [take, peekType]
|
||||
}
|
||||
|
||||
// Find and return the length of an op component
|
||||
const componentLength = function (component) {
|
||||
if (typeof component === 'number') {
|
||||
return component
|
||||
} else if (typeof component.i === 'string') {
|
||||
return component.i.length
|
||||
} else {
|
||||
// This should work because c.d and c.i must be +ive.
|
||||
return component.d || component.i
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize an op, removing all empty skips and empty inserts / deletes. Concatenate
|
||||
// adjacent inserts and deletes.
|
||||
type.normalize = function (op) {
|
||||
const newOp = []
|
||||
for (const component of Array.from(op)) {
|
||||
append(newOp, component)
|
||||
}
|
||||
return newOp
|
||||
}
|
||||
|
||||
// This is a helper method to transform and prune. goForwards is true for transform, false for prune.
|
||||
const transformer = function (op, otherOp, goForwards, side) {
|
||||
let component
|
||||
checkOp(op)
|
||||
checkOp(otherOp)
|
||||
const newOp = []
|
||||
|
||||
const [take, peek] = Array.from(makeTake(op))
|
||||
|
||||
for (component of Array.from(otherOp)) {
|
||||
var chunk
|
||||
let length = componentLength(component)
|
||||
|
||||
if (component.i !== undefined) {
|
||||
// Insert text or tombs
|
||||
if (goForwards) {
|
||||
// transform - insert skips over inserted parts
|
||||
if (side === 'left') {
|
||||
// The left insert should go first.
|
||||
while (__guard__(peek(), x => x.i) !== undefined) {
|
||||
append(newOp, take())
|
||||
}
|
||||
}
|
||||
|
||||
// In any case, skip the inserted text.
|
||||
append(newOp, length)
|
||||
} else {
|
||||
// Prune. Remove skips for inserts.
|
||||
while (length > 0) {
|
||||
chunk = take(length, true)
|
||||
|
||||
if (chunk === null) {
|
||||
throw new Error('The transformed op is invalid')
|
||||
}
|
||||
if (chunk.d !== undefined) {
|
||||
throw new Error(
|
||||
'The transformed op deletes locally inserted characters - it cannot be purged of the insert.'
|
||||
)
|
||||
}
|
||||
|
||||
if (typeof chunk === 'number') {
|
||||
length -= chunk
|
||||
} else {
|
||||
append(newOp, chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Skip or delete
|
||||
while (length > 0) {
|
||||
chunk = take(length, true)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(newOp, chunk)
|
||||
if (!chunk.i) {
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if (component.i === undefined) {
|
||||
throw new Error(`Remaining fragments in the op: ${component}`)
|
||||
}
|
||||
append(newOp, component)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// transform op1 by op2. Return transformed version of op1.
|
||||
// op1 and op2 are unchanged by transform.
|
||||
// side should be 'left' or 'right', depending on if op1.id <> op2.id. 'left' == client op.
|
||||
type.transform = function (op, otherOp, side) {
|
||||
if (side !== 'left' && side !== 'right') {
|
||||
throw new Error(`side (${side}) should be 'left' or 'right'`)
|
||||
}
|
||||
return transformer(op, otherOp, true, side)
|
||||
}
|
||||
|
||||
// Prune is the inverse of transform.
|
||||
type.prune = (op, otherOp) => transformer(op, otherOp, false)
|
||||
|
||||
// Compose 2 ops into 1 op.
|
||||
type.compose = function (op1, op2) {
|
||||
let component
|
||||
if (op1 === null || op1 === undefined) {
|
||||
return op2
|
||||
}
|
||||
|
||||
checkOp(op1)
|
||||
checkOp(op2)
|
||||
|
||||
const result = []
|
||||
|
||||
const [take, _] = Array.from(makeTake(op1))
|
||||
|
||||
for (component of Array.from(op2)) {
|
||||
var chunk, length
|
||||
if (typeof component === 'number') {
|
||||
// Skip
|
||||
// Just copy from op1.
|
||||
length = component
|
||||
while (length > 0) {
|
||||
chunk = take(length)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
append(result, chunk)
|
||||
length -= componentLength(chunk)
|
||||
}
|
||||
} else if (component.i !== undefined) {
|
||||
// Insert
|
||||
append(result, { i: component.i })
|
||||
} else {
|
||||
// Delete
|
||||
length = component.d
|
||||
while (length > 0) {
|
||||
chunk = take(length)
|
||||
if (chunk === null) {
|
||||
throw new Error(
|
||||
'The op traverses more elements than the document has'
|
||||
)
|
||||
}
|
||||
|
||||
const chunkLength = componentLength(chunk)
|
||||
if (chunk.i !== undefined) {
|
||||
append(result, { i: chunkLength })
|
||||
} else {
|
||||
append(result, { d: chunkLength })
|
||||
}
|
||||
|
||||
length -= chunkLength
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append extras from op1
|
||||
while ((component = take())) {
|
||||
if (component.i === undefined) {
|
||||
throw new Error(`Remaining fragments in op1: ${component}`)
|
||||
}
|
||||
append(result, component)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
exports.types['text-tp2'] = type
|
||||
} else {
|
||||
module.exports = type
|
||||
}
|
||||
|
||||
function __guard__(value, transform) {
|
||||
return typeof value !== 'undefined' && value !== null
|
||||
? transform(value)
|
||||
: undefined
|
||||
}
|
390
services/document-updater/app/js/sharejs/types/text.js
Normal file
390
services/document-updater/app/js/sharejs/types/text.js
Normal file
|
@ -0,0 +1,390 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
no-return-assign,
|
||||
no-undef,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
// A simple text implementation
|
||||
//
|
||||
// Operations are lists of components.
|
||||
// Each component either inserts or deletes at a specified position in the document.
|
||||
//
|
||||
// Components are either:
|
||||
// {i:'str', p:100}: Insert 'str' at position 100 in the document
|
||||
// {d:'str', p:100}: Delete 'str' at position 100 in the document
|
||||
//
|
||||
// Components in an operation are executed sequentially, so the position of components
|
||||
// assumes previous components have already executed.
|
||||
//
|
||||
// Eg: This op:
|
||||
// [{i:'abc', p:0}]
|
||||
// is equivalent to this op:
|
||||
// [{i:'a', p:0}, {i:'b', p:1}, {i:'c', p:2}]
|
||||
|
||||
// NOTE: The global scope here is shared with other sharejs files when built with closure.
|
||||
// Be careful what ends up in your namespace.
|
||||
|
||||
let append, transformComponent
|
||||
const text = {}
|
||||
|
||||
text.name = 'text'
|
||||
|
||||
text.create = () => ''
|
||||
|
||||
const strInject = (s1, pos, s2) => s1.slice(0, pos) + s2 + s1.slice(pos)
|
||||
|
||||
const checkValidComponent = function (c) {
|
||||
if (typeof c.p !== 'number') {
|
||||
throw new Error('component missing position field')
|
||||
}
|
||||
|
||||
const i_type = typeof c.i
|
||||
const d_type = typeof c.d
|
||||
const c_type = typeof c.c
|
||||
if (
|
||||
!((i_type === 'string') ^ (d_type === 'string') ^ (c_type === 'string'))
|
||||
) {
|
||||
throw new Error('component needs an i, d or c field')
|
||||
}
|
||||
|
||||
if (!(c.p >= 0)) {
|
||||
throw new Error('position cannot be negative')
|
||||
}
|
||||
}
|
||||
|
||||
const checkValidOp = function (op) {
|
||||
for (const c of Array.from(op)) {
|
||||
checkValidComponent(c)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
text.apply = function (snapshot, op) {
|
||||
checkValidOp(op)
|
||||
for (const component of Array.from(op)) {
|
||||
if (component.i != null) {
|
||||
snapshot = strInject(snapshot, component.p, component.i)
|
||||
} else if (component.d != null) {
|
||||
const deleted = snapshot.slice(
|
||||
component.p,
|
||||
component.p + component.d.length
|
||||
)
|
||||
if (component.d !== deleted) {
|
||||
throw new Error(
|
||||
`Delete component '${component.d}' does not match deleted text '${deleted}'`
|
||||
)
|
||||
}
|
||||
snapshot =
|
||||
snapshot.slice(0, component.p) +
|
||||
snapshot.slice(component.p + component.d.length)
|
||||
} else if (component.c != null) {
|
||||
const comment = snapshot.slice(
|
||||
component.p,
|
||||
component.p + component.c.length
|
||||
)
|
||||
if (component.c !== comment) {
|
||||
throw new Error(
|
||||
`Comment component '${component.c}' does not match commented text '${comment}'`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
throw new Error('Unknown op type')
|
||||
}
|
||||
}
|
||||
return snapshot
|
||||
}
|
||||
|
||||
// Exported for use by the random op generator.
|
||||
//
|
||||
// For simplicity, this version of append does not compress adjacent inserts and deletes of
|
||||
// the same text. It would be nice to change that at some stage.
|
||||
text._append = append = function (newOp, c) {
|
||||
if (c.i === '' || c.d === '') {
|
||||
return
|
||||
}
|
||||
if (newOp.length === 0) {
|
||||
return newOp.push(c)
|
||||
} else {
|
||||
const last = newOp[newOp.length - 1]
|
||||
|
||||
// Compose the insert into the previous insert if possible
|
||||
if (
|
||||
last.i != null &&
|
||||
c.i != null &&
|
||||
last.p <= c.p &&
|
||||
c.p <= last.p + last.i.length
|
||||
) {
|
||||
return (newOp[newOp.length - 1] = {
|
||||
i: strInject(last.i, c.p - last.p, c.i),
|
||||
p: last.p,
|
||||
})
|
||||
} else if (
|
||||
last.d != null &&
|
||||
c.d != null &&
|
||||
c.p <= last.p &&
|
||||
last.p <= c.p + c.d.length
|
||||
) {
|
||||
return (newOp[newOp.length - 1] = {
|
||||
d: strInject(c.d, last.p - c.p, last.d),
|
||||
p: c.p,
|
||||
})
|
||||
} else {
|
||||
return newOp.push(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
text.compose = function (op1, op2) {
|
||||
checkValidOp(op1)
|
||||
checkValidOp(op2)
|
||||
|
||||
const newOp = op1.slice()
|
||||
for (const c of Array.from(op2)) {
|
||||
append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// Attempt to compress the op components together 'as much as possible'.
|
||||
// This implementation preserves order and preserves create/delete pairs.
|
||||
text.compress = op => text.compose([], op)
|
||||
|
||||
text.normalize = function (op) {
|
||||
const newOp = []
|
||||
|
||||
// Normalize should allow ops which are a single (unwrapped) component:
|
||||
// {i:'asdf', p:23}.
|
||||
// There's no good way to test if something is an array:
|
||||
// http://perfectionkills.com/instanceof-considered-harmful-or-how-to-write-a-robust-isarray/
|
||||
// so this is probably the least bad solution.
|
||||
if (op.i != null || op.p != null) {
|
||||
op = [op]
|
||||
}
|
||||
|
||||
for (const c of Array.from(op)) {
|
||||
if (c.p == null) {
|
||||
c.p = 0
|
||||
}
|
||||
append(newOp, c)
|
||||
}
|
||||
|
||||
return newOp
|
||||
}
|
||||
|
||||
// This helper method transforms a position by an op component.
|
||||
//
|
||||
// If c is an insert, insertAfter specifies whether the transform
|
||||
// is pushed after the insert (true) or before it (false).
|
||||
//
|
||||
// insertAfter is optional for deletes.
|
||||
const transformPosition = function (pos, c, insertAfter) {
|
||||
if (c.i != null) {
|
||||
if (c.p < pos || (c.p === pos && insertAfter)) {
|
||||
return pos + c.i.length
|
||||
} else {
|
||||
return pos
|
||||
}
|
||||
} else if (c.d != null) {
|
||||
// I think this could also be written as: Math.min(c.p, Math.min(c.p - otherC.p, otherC.d.length))
|
||||
// but I think its harder to read that way, and it compiles using ternary operators anyway
|
||||
// so its no slower written like this.
|
||||
if (pos <= c.p) {
|
||||
return pos
|
||||
} else if (pos <= c.p + c.d.length) {
|
||||
return c.p
|
||||
} else {
|
||||
return pos - c.d.length
|
||||
}
|
||||
} else if (c.c != null) {
|
||||
return pos
|
||||
} else {
|
||||
throw new Error('unknown op type')
|
||||
}
|
||||
}
|
||||
|
||||
// Helper method to transform a cursor position as a result of an op.
|
||||
//
|
||||
// Like transformPosition above, if c is an insert, insertAfter specifies whether the cursor position
|
||||
// is pushed after an insert (true) or before it (false).
|
||||
text.transformCursor = function (position, op, side) {
|
||||
const insertAfter = side === 'right'
|
||||
for (const c of Array.from(op)) {
|
||||
position = transformPosition(position, c, insertAfter)
|
||||
}
|
||||
return position
|
||||
}
|
||||
|
||||
// Transform an op component by another op component. Asymmetric.
|
||||
// The result will be appended to destination.
|
||||
//
|
||||
// exported for use in JSON type
|
||||
text._tc = transformComponent = function (dest, c, otherC, side) {
|
||||
let cIntersect, intersectEnd, intersectStart, newC, otherIntersect
|
||||
checkValidOp([c])
|
||||
checkValidOp([otherC])
|
||||
|
||||
if (c.i != null) {
|
||||
append(dest, {
|
||||
i: c.i,
|
||||
p: transformPosition(c.p, otherC, side === 'right'),
|
||||
})
|
||||
} else if (c.d != null) {
|
||||
// Delete
|
||||
if (otherC.i != null) {
|
||||
// delete vs insert
|
||||
let s = c.d
|
||||
if (c.p < otherC.p) {
|
||||
append(dest, { d: s.slice(0, otherC.p - c.p), p: c.p })
|
||||
s = s.slice(otherC.p - c.p)
|
||||
}
|
||||
if (s !== '') {
|
||||
append(dest, { d: s, p: c.p + otherC.i.length })
|
||||
}
|
||||
} else if (otherC.d != null) {
|
||||
// Delete vs delete
|
||||
if (c.p >= otherC.p + otherC.d.length) {
|
||||
append(dest, { d: c.d, p: c.p - otherC.d.length })
|
||||
} else if (c.p + c.d.length <= otherC.p) {
|
||||
append(dest, c)
|
||||
} else {
|
||||
// They overlap somewhere.
|
||||
newC = { d: '', p: c.p }
|
||||
if (c.p < otherC.p) {
|
||||
newC.d = c.d.slice(0, otherC.p - c.p)
|
||||
}
|
||||
if (c.p + c.d.length > otherC.p + otherC.d.length) {
|
||||
newC.d += c.d.slice(otherC.p + otherC.d.length - c.p)
|
||||
}
|
||||
|
||||
// This is entirely optional - just for a check that the deleted
|
||||
// text in the two ops matches
|
||||
intersectStart = Math.max(c.p, otherC.p)
|
||||
intersectEnd = Math.min(c.p + c.d.length, otherC.p + otherC.d.length)
|
||||
cIntersect = c.d.slice(intersectStart - c.p, intersectEnd - c.p)
|
||||
otherIntersect = otherC.d.slice(
|
||||
intersectStart - otherC.p,
|
||||
intersectEnd - otherC.p
|
||||
)
|
||||
if (cIntersect !== otherIntersect) {
|
||||
throw new Error(
|
||||
'Delete ops delete different text in the same region of the document'
|
||||
)
|
||||
}
|
||||
|
||||
if (newC.d !== '') {
|
||||
// This could be rewritten similarly to insert v delete, above.
|
||||
newC.p = transformPosition(newC.p, otherC)
|
||||
append(dest, newC)
|
||||
}
|
||||
}
|
||||
} else if (otherC.c != null) {
|
||||
append(dest, c)
|
||||
} else {
|
||||
throw new Error('unknown op type')
|
||||
}
|
||||
} else if (c.c != null) {
|
||||
// Comment
|
||||
if (otherC.i != null) {
|
||||
if (c.p < otherC.p && otherC.p < c.p + c.c.length) {
|
||||
const offset = otherC.p - c.p
|
||||
const new_c =
|
||||
c.c.slice(0, +(offset - 1) + 1 || undefined) +
|
||||
otherC.i +
|
||||
c.c.slice(offset)
|
||||
append(dest, { c: new_c, p: c.p, t: c.t })
|
||||
} else {
|
||||
append(dest, {
|
||||
c: c.c,
|
||||
p: transformPosition(c.p, otherC, true),
|
||||
t: c.t,
|
||||
})
|
||||
}
|
||||
} else if (otherC.d != null) {
|
||||
if (c.p >= otherC.p + otherC.d.length) {
|
||||
append(dest, { c: c.c, p: c.p - otherC.d.length, t: c.t })
|
||||
} else if (c.p + c.c.length <= otherC.p) {
|
||||
append(dest, c)
|
||||
} else {
|
||||
// Delete overlaps comment
|
||||
// They overlap somewhere.
|
||||
newC = { c: '', p: c.p, t: c.t }
|
||||
if (c.p < otherC.p) {
|
||||
newC.c = c.c.slice(0, otherC.p - c.p)
|
||||
}
|
||||
if (c.p + c.c.length > otherC.p + otherC.d.length) {
|
||||
newC.c += c.c.slice(otherC.p + otherC.d.length - c.p)
|
||||
}
|
||||
|
||||
// This is entirely optional - just for a check that the deleted
|
||||
// text in the two ops matches
|
||||
intersectStart = Math.max(c.p, otherC.p)
|
||||
intersectEnd = Math.min(c.p + c.c.length, otherC.p + otherC.d.length)
|
||||
cIntersect = c.c.slice(intersectStart - c.p, intersectEnd - c.p)
|
||||
otherIntersect = otherC.d.slice(
|
||||
intersectStart - otherC.p,
|
||||
intersectEnd - otherC.p
|
||||
)
|
||||
if (cIntersect !== otherIntersect) {
|
||||
throw new Error(
|
||||
'Delete ops delete different text in the same region of the document'
|
||||
)
|
||||
}
|
||||
|
||||
newC.p = transformPosition(newC.p, otherC)
|
||||
append(dest, newC)
|
||||
}
|
||||
} else if (otherC.c != null) {
|
||||
append(dest, c)
|
||||
} else {
|
||||
throw new Error('unknown op type')
|
||||
}
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
const invertComponent = function (c) {
|
||||
if (c.i != null) {
|
||||
return { d: c.i, p: c.p }
|
||||
} else {
|
||||
return { i: c.d, p: c.p }
|
||||
}
|
||||
}
|
||||
|
||||
// No need to use append for invert, because the components won't be able to
|
||||
// cancel with one another.
|
||||
text.invert = op =>
|
||||
Array.from(op.slice().reverse()).map(c => invertComponent(c))
|
||||
|
||||
if (typeof WEB !== 'undefined' && WEB !== null) {
|
||||
if (!exports.types) {
|
||||
exports.types = {}
|
||||
}
|
||||
|
||||
// This is kind of awful - come up with a better way to hook this helper code up.
|
||||
bootstrapTransform(text, transformComponent, checkValidOp, append)
|
||||
|
||||
// [] is used to prevent closure from renaming types.text
|
||||
exports.types.text = text
|
||||
} else {
|
||||
module.exports = text
|
||||
|
||||
// The text type really shouldn't need this - it should be possible to define
|
||||
// an efficient transform function by making a sort of transform map and passing each
|
||||
// op component through it.
|
||||
require('./helpers').bootstrapTransform(
|
||||
text,
|
||||
transformComponent,
|
||||
checkValidOp,
|
||||
append
|
||||
)
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
/* eslint-disable
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
// This is included at the top of each compiled type file for the web.
|
||||
|
||||
/**
|
||||
@const
|
||||
@type {boolean}
|
||||
*/
|
||||
const WEB = true
|
||||
|
||||
const exports = window.sharejs
|
14
services/document-updater/app/js/sharejs/web-prelude.js
Normal file
14
services/document-updater/app/js/sharejs/web-prelude.js
Normal file
|
@ -0,0 +1,14 @@
|
|||
/* eslint-disable
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
// This is included at the top of each compiled type file for the web.
|
||||
|
||||
/**
|
||||
@const
|
||||
@type {boolean}
|
||||
*/
|
||||
const WEB = true
|
||||
|
||||
const exports = window.sharejs
|
188
services/document-updater/benchmarks/multi_vs_mget_mset.rb
Normal file
188
services/document-updater/benchmarks/multi_vs_mget_mset.rb
Normal file
|
@ -0,0 +1,188 @@
|
|||
require "benchmark"
|
||||
require "redis"
|
||||
|
||||
N = (ARGV.first || 1).to_i
|
||||
DOC_ID = (ARGV.last || "606072b20bb4d3109fb5b122")
|
||||
|
||||
@r = Redis.new
|
||||
|
||||
|
||||
def get
|
||||
@r.get("doclines:{#{DOC_ID}}")
|
||||
@r.get("DocVersion:{#{DOC_ID}}")
|
||||
@r.get("DocHash:{#{DOC_ID}}")
|
||||
@r.get("ProjectId:{#{DOC_ID}}")
|
||||
@r.get("Ranges:{#{DOC_ID}}")
|
||||
@r.get("Pathname:{#{DOC_ID}}")
|
||||
@r.get("ProjectHistoryId:{#{DOC_ID}}")
|
||||
@r.get("UnflushedTime:{#{DOC_ID}}")
|
||||
@r.get("lastUpdatedAt:{#{DOC_ID}}")
|
||||
@r.get("lastUpdatedBy:{#{DOC_ID}}")
|
||||
end
|
||||
|
||||
def mget
|
||||
@r.mget(
|
||||
"doclines:{#{DOC_ID}}",
|
||||
"DocVersion:{#{DOC_ID}}",
|
||||
"DocHash:{#{DOC_ID}}",
|
||||
"ProjectId:{#{DOC_ID}}",
|
||||
"Ranges:{#{DOC_ID}}",
|
||||
"Pathname:{#{DOC_ID}}",
|
||||
"ProjectHistoryId:{#{DOC_ID}}",
|
||||
"UnflushedTime:{#{DOC_ID}}",
|
||||
"lastUpdatedAt:{#{DOC_ID}}",
|
||||
"lastUpdatedBy:{#{DOC_ID}}",
|
||||
)
|
||||
end
|
||||
|
||||
def set
|
||||
@r.set("doclines:{#{DOC_ID}}", "[\"@book{adams1995hitchhiker,\",\" title={The Hitchhiker's Guide to the Galaxy},\",\" author={Adams, D.},\",\" isbn={9781417642595},\",\" url={http://books.google.com/books?id=W-xMPgAACAAJ},\",\" year={1995},\",\" publisher={San Val}\",\"}\",\"\"]")
|
||||
@r.set("DocVersion:{#{DOC_ID}}", "0")
|
||||
@r.set("DocHash:{#{DOC_ID}}", "0075bb0629c6c13d0d68918443648bbfe7d98869")
|
||||
@r.set("ProjectId:{#{DOC_ID}}", "606072b20bb4d3109fb5b11e")
|
||||
@r.set("Ranges:{#{DOC_ID}}", "")
|
||||
@r.set("Pathname:{#{DOC_ID}}", "/references.bib")
|
||||
@r.set("ProjectHistoryId:{#{DOC_ID}}", "")
|
||||
@r.set("UnflushedTime:{#{DOC_ID}}", "")
|
||||
@r.set("lastUpdatedAt:{#{DOC_ID}}", "")
|
||||
@r.set("lastUpdatedBy:{#{DOC_ID}}", "")
|
||||
end
|
||||
|
||||
def mset
|
||||
@r.mset(
|
||||
"doclines:{#{DOC_ID}}", "[\"@book{adams1995hitchhiker,\",\" title={The Hitchhiker's Guide to the Galaxy},\",\" author={Adams, D.},\",\" isbn={9781417642595},\",\" url={http://books.google.com/books?id=W-xMPgAACAAJ},\",\" year={1995},\",\" publisher={San Val}\",\"}\",\"\"]",
|
||||
"DocVersion:{#{DOC_ID}}", "0",
|
||||
"DocHash:{#{DOC_ID}}", "0075bb0629c6c13d0d68918443648bbfe7d98869",
|
||||
"ProjectId:{#{DOC_ID}}", "606072b20bb4d3109fb5b11e",
|
||||
"Ranges:{#{DOC_ID}}", "",
|
||||
"Pathname:{#{DOC_ID}}", "/references.bib",
|
||||
"ProjectHistoryId:{#{DOC_ID}}", "",
|
||||
"UnflushedTime:{#{DOC_ID}}", "",
|
||||
"lastUpdatedAt:{#{DOC_ID}}", "",
|
||||
"lastUpdatedBy:{#{DOC_ID}}", "",
|
||||
)
|
||||
end
|
||||
|
||||
|
||||
def benchmark_multi_get(benchmark, i)
|
||||
benchmark.report("#{i}: multi get") do
|
||||
N.times do
|
||||
@r.multi do
|
||||
get
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def benchmark_mget(benchmark, i)
|
||||
benchmark.report("#{i}: mget") do
|
||||
N.times do
|
||||
mget
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def benchmark_multi_set(benchmark, i)
|
||||
benchmark.report("#{i}: multi set") do
|
||||
N.times do
|
||||
@r.multi do
|
||||
set
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def benchmark_mset(benchmark, i)
|
||||
benchmark.report("#{i}: mset") do
|
||||
N.times do
|
||||
mset
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
# init
|
||||
set
|
||||
|
||||
Benchmark.bmbm do |benchmark|
|
||||
3.times do |i|
|
||||
benchmark_multi_get(benchmark, i)
|
||||
benchmark_mget(benchmark, i)
|
||||
benchmark_multi_set(benchmark, i)
|
||||
benchmark_mset(benchmark, i)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
=begin
|
||||
# Results
|
||||
|
||||
I could not max out the redis-server process with this benchmark.
|
||||
The ruby process hit 100% of a modern i7 CPU thread and the redis-server process
|
||||
barely hit 50% of a CPU thread.
|
||||
|
||||
Based on the timings below, mget is about 3 times faster and mset about 4 times
|
||||
faster than multiple get/set commands in a multi.
|
||||
=end
|
||||
|
||||
=begin
|
||||
$ redis-server --version
|
||||
Redis server v=5.0.7 sha=00000000:0 malloc=jemalloc-5.2.1 bits=64 build=636cde3b5c7a3923
|
||||
$ ruby multi_vs_mget_mset.rb 100000
|
||||
Rehearsal ------------------------------------------------
|
||||
0: multi get 12.132423 4.246689 16.379112 ( 16.420069)
|
||||
0: mget 4.499457 0.947556 5.447013 ( 6.274883)
|
||||
0: multi set 12.685936 4.495241 17.181177 ( 17.225984)
|
||||
0: mset 2.543401 0.913448 3.456849 ( 4.554799)
|
||||
1: multi get 13.397207 4.581881 17.979088 ( 18.027755)
|
||||
1: mget 4.551287 1.160531 5.711818 ( 6.579168)
|
||||
1: multi set 13.018957 4.927175 17.946132 ( 17.987502)
|
||||
1: mset 2.561096 1.048416 3.609512 ( 4.780087)
|
||||
2: multi get 13.224422 5.014475 18.238897 ( 18.284152)
|
||||
2: mget 4.664434 1.051083 5.715517 ( 6.592088)
|
||||
2: multi set 12.972284 4.600422 17.572706 ( 17.613185)
|
||||
2: mset 2.621344 0.984123 3.605467 ( 4.766855)
|
||||
------------------------------------- total: 132.843288sec
|
||||
|
||||
user system total real
|
||||
0: multi get 13.341552 4.900892 18.242444 ( 18.289912)
|
||||
0: mget 5.056534 0.960954 6.017488 ( 6.971189)
|
||||
0: multi set 12.989880 4.823793 17.813673 ( 17.858393)
|
||||
0: mset 2.543434 1.025352 3.568786 ( 4.723040)
|
||||
1: multi get 13.059379 4.674345 17.733724 ( 17.777859)
|
||||
1: mget 4.698754 0.915637 5.614391 ( 6.489614)
|
||||
1: multi set 12.608293 4.729163 17.337456 ( 17.372993)
|
||||
1: mset 2.645290 0.940584 3.585874 ( 4.744134)
|
||||
2: multi get 13.678224 4.732373 18.410597 ( 18.457525)
|
||||
2: mget 4.716749 1.072064 5.788813 ( 6.697683)
|
||||
2: multi set 13.058710 4.889801 17.948511 ( 17.988742)
|
||||
2: mset 2.311854 0.989166 3.301020 ( 4.346467)
|
||||
=end
|
||||
|
||||
=begin
|
||||
# multi get/set run at about O(65'000) operations per second
|
||||
$ redis-cli info | grep 'instantaneous_ops_per_sec'
|
||||
instantaneous_ops_per_sec:65557
|
||||
|
||||
# mget runs at about O(15'000) operations per second
|
||||
$ redis-cli info | grep 'instantaneous_ops_per_sec'
|
||||
instantaneous_ops_per_sec:14580
|
||||
|
||||
# mset runs at about O(20'000) operations per second
|
||||
$ redis-cli info | grep 'instantaneous_ops_per_sec'
|
||||
instantaneous_ops_per_sec:20792
|
||||
|
||||
These numbers are pretty reasonable:
|
||||
multi: 100'000 * 12 ops / 18s = 66'666 ops/s
|
||||
mget : 100'000 * 1 ops / 7s = 14'285 ops/s
|
||||
mset : 100'000 * 1 ops / 5s = 20'000 ops/s
|
||||
|
||||
|
||||
|
||||
Bonus: Running three benchmarks in parallel on different keys.
|
||||
multi get: O(125'000) ops/s and 80% CPU load of redis-server
|
||||
multi set: O(130'000) ops/s and 90% CPU load of redis-server
|
||||
mget : O( 30'000) ops/s and 70% CPU load of redis-server
|
||||
mset : O( 40'000) ops/s and 90% CPU load of redis-server
|
||||
=end
|
8
services/document-updater/buildscript.txt
Normal file
8
services/document-updater/buildscript.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
document-updater
|
||||
--dependencies=mongo,redis
|
||||
--docker-repos=gcr.io/overleaf-ops
|
||||
--env-add=
|
||||
--env-pass-through=
|
||||
--node-version=12.22.3
|
||||
--public-repo=True
|
||||
--script-version=3.11.0
|
195
services/document-updater/config/settings.defaults.js
Executable file
195
services/document-updater/config/settings.defaults.js
Executable file
|
@ -0,0 +1,195 @@
|
|||
module.exports = {
|
||||
internal: {
|
||||
documentupdater: {
|
||||
host: process.env.LISTEN_ADDRESS || 'localhost',
|
||||
port: 3003,
|
||||
},
|
||||
},
|
||||
|
||||
apis: {
|
||||
web: {
|
||||
url: `http://${
|
||||
process.env.WEB_API_HOST || process.env.WEB_HOST || 'localhost'
|
||||
}:${process.env.WEB_API_PORT || process.env.WEB_PORT || 3000}`,
|
||||
user: process.env.WEB_API_USER || 'sharelatex',
|
||||
pass: process.env.WEB_API_PASSWORD || 'password',
|
||||
},
|
||||
trackchanges: {
|
||||
url: `http://${process.env.TRACK_CHANGES_HOST || 'localhost'}:3015`,
|
||||
},
|
||||
project_history: {
|
||||
enabled: true,
|
||||
url: `http://${process.env.PROJECT_HISTORY_HOST || 'localhost'}:3054`,
|
||||
},
|
||||
},
|
||||
|
||||
redis: {
|
||||
pubsub: {
|
||||
host:
|
||||
process.env.PUBSUB_REDIS_HOST || process.env.REDIS_HOST || 'localhost',
|
||||
port: process.env.PUBSUB_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
password:
|
||||
process.env.PUBSUB_REDIS_PASSWORD || process.env.REDIS_PASSWORD || '',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST || '20'
|
||||
),
|
||||
},
|
||||
|
||||
history: {
|
||||
port: process.env.HISTORY_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
host:
|
||||
process.env.HISTORY_REDIS_HOST || process.env.REDIS_HOST || 'localhost',
|
||||
password:
|
||||
process.env.HISTORY_REDIS_PASSWORD || process.env.REDIS_PASSWORD || '',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST || '20'
|
||||
),
|
||||
key_schema: {
|
||||
uncompressedHistoryOps({ doc_id: docId }) {
|
||||
return `UncompressedHistoryOps:{${docId}}`
|
||||
},
|
||||
docsWithHistoryOps({ project_id: projectId }) {
|
||||
return `DocsWithHistoryOps:{${projectId}}`
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
project_history: {
|
||||
port:
|
||||
process.env.NEW_HISTORY_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
host:
|
||||
process.env.NEW_HISTORY_REDIS_HOST ||
|
||||
process.env.REDIS_HOST ||
|
||||
'localhost',
|
||||
password:
|
||||
process.env.NEW_HISTORY_REDIS_PASSWORD ||
|
||||
process.env.REDIS_PASSWORD ||
|
||||
'',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST || '20'
|
||||
),
|
||||
key_schema: {
|
||||
projectHistoryOps({ project_id: projectId }) {
|
||||
return `ProjectHistory:Ops:{${projectId}}`
|
||||
},
|
||||
projectHistoryFirstOpTimestamp({ project_id: projectId }) {
|
||||
return `ProjectHistory:FirstOpTimestamp:{${projectId}}`
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lock: {
|
||||
port: process.env.LOCK_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
host:
|
||||
process.env.LOCK_REDIS_HOST || process.env.REDIS_HOST || 'localhost',
|
||||
password:
|
||||
process.env.LOCK_REDIS_PASSWORD || process.env.REDIS_PASSWORD || '',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST || '20'
|
||||
),
|
||||
key_schema: {
|
||||
blockingKey({ doc_id: docId }) {
|
||||
return `Blocking:{${docId}}`
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
documentupdater: {
|
||||
port:
|
||||
process.env.DOC_UPDATER_REDIS_PORT || process.env.REDIS_PORT || '6379',
|
||||
host:
|
||||
process.env.DOC_UPDATER_REDIS_HOST ||
|
||||
process.env.REDIS_HOST ||
|
||||
'localhost',
|
||||
password:
|
||||
process.env.DOC_UPDATER_REDIS_PASSWORD ||
|
||||
process.env.REDIS_PASSWORD ||
|
||||
'',
|
||||
maxRetriesPerRequest: parseInt(
|
||||
process.env.REDIS_MAX_RETRIES_PER_REQUEST || '20'
|
||||
),
|
||||
key_schema: {
|
||||
blockingKey({ doc_id: docId }) {
|
||||
return `Blocking:{${docId}}`
|
||||
},
|
||||
docLines({ doc_id: docId }) {
|
||||
return `doclines:{${docId}}`
|
||||
},
|
||||
docOps({ doc_id: docId }) {
|
||||
return `DocOps:{${docId}}`
|
||||
},
|
||||
docVersion({ doc_id: docId }) {
|
||||
return `DocVersion:{${docId}}`
|
||||
},
|
||||
docHash({ doc_id: docId }) {
|
||||
return `DocHash:{${docId}}`
|
||||
},
|
||||
projectKey({ doc_id: docId }) {
|
||||
return `ProjectId:{${docId}}`
|
||||
},
|
||||
docsInProject({ project_id: projectId }) {
|
||||
return `DocsIn:{${projectId}}`
|
||||
},
|
||||
ranges({ doc_id: docId }) {
|
||||
return `Ranges:{${docId}}`
|
||||
},
|
||||
unflushedTime({ doc_id: docId }) {
|
||||
return `UnflushedTime:{${docId}}`
|
||||
},
|
||||
pathname({ doc_id: docId }) {
|
||||
return `Pathname:{${docId}}`
|
||||
},
|
||||
projectHistoryId({ doc_id: docId }) {
|
||||
return `ProjectHistoryId:{${docId}}`
|
||||
},
|
||||
projectHistoryType({ doc_id: docId }) {
|
||||
return `ProjectHistoryType:{${docId}}`
|
||||
},
|
||||
projectState({ project_id: projectId }) {
|
||||
return `ProjectState:{${projectId}}`
|
||||
},
|
||||
pendingUpdates({ doc_id: docId }) {
|
||||
return `PendingUpdates:{${docId}}`
|
||||
},
|
||||
lastUpdatedBy({ doc_id: docId }) {
|
||||
return `lastUpdatedBy:{${docId}}`
|
||||
},
|
||||
lastUpdatedAt({ doc_id: docId }) {
|
||||
return `lastUpdatedAt:{${docId}}`
|
||||
},
|
||||
flushAndDeleteQueue() {
|
||||
return 'DocUpdaterFlushAndDeleteQueue'
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
max_doc_length: 2 * 1024 * 1024, // 2mb
|
||||
maxJsonRequestSize:
|
||||
parseInt(process.env.MAX_JSON_REQUEST_SIZE, 10) || 8 * 1024 * 1024,
|
||||
|
||||
dispatcherCount: parseInt(process.env.DISPATCHER_COUNT || 10, 10),
|
||||
|
||||
mongo: {
|
||||
options: {
|
||||
useUnifiedTopology:
|
||||
(process.env.MONGO_USE_UNIFIED_TOPOLOGY || 'true') === 'true',
|
||||
},
|
||||
url:
|
||||
process.env.MONGO_CONNECTION_STRING ||
|
||||
`mongodb://${process.env.MONGO_HOST || '127.0.0.1'}/sharelatex`,
|
||||
},
|
||||
|
||||
sentry: {
|
||||
dsn: process.env.SENTRY_DSN,
|
||||
},
|
||||
|
||||
publishOnIndividualChannels:
|
||||
process.env.PUBLISH_ON_INDIVIDUAL_CHANNELS || false,
|
||||
|
||||
continuousBackgroundFlush: process.env.CONTINUOUS_BACKGROUND_FLUSH || false,
|
||||
|
||||
smoothingOffset: process.env.SMOOTHING_OFFSET || 1000, // milliseconds
|
||||
|
||||
disableDoubleFlush: process.env.DISABLE_DOUBLE_FLUSH || false, // don't flush track-changes for projects using project-history
|
||||
}
|
57
services/document-updater/docker-compose.ci.yml
Normal file
57
services/document-updater/docker-compose.ci.yml
Normal file
|
@ -0,0 +1,57 @@
|
|||
# This file was auto-generated, do not edit it directly.
|
||||
# Instead run bin/update_build_scripts from
|
||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||
|
||||
version: "2.3"
|
||||
|
||||
services:
|
||||
test_unit:
|
||||
image: ci/$PROJECT_NAME:$BRANCH_NAME-$BUILD_NUMBER
|
||||
user: node
|
||||
command: npm run test:unit:_run
|
||||
environment:
|
||||
NODE_ENV: test
|
||||
NODE_OPTIONS: "--unhandled-rejections=strict"
|
||||
|
||||
|
||||
test_acceptance:
|
||||
build: .
|
||||
image: ci/$PROJECT_NAME:$BRANCH_NAME-$BUILD_NUMBER
|
||||
environment:
|
||||
ELASTIC_SEARCH_DSN: es:9200
|
||||
REDIS_HOST: redis
|
||||
QUEUES_REDIS_HOST: redis
|
||||
MONGO_HOST: mongo
|
||||
POSTGRES_HOST: postgres
|
||||
MOCHA_GREP: ${MOCHA_GREP}
|
||||
NODE_ENV: test
|
||||
NODE_OPTIONS: "--unhandled-rejections=strict"
|
||||
depends_on:
|
||||
mongo:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
user: node
|
||||
command: npm run test:acceptance:_run
|
||||
|
||||
|
||||
tar:
|
||||
build: .
|
||||
image: ci/$PROJECT_NAME:$BRANCH_NAME-$BUILD_NUMBER
|
||||
volumes:
|
||||
- ./:/tmp/build/
|
||||
command: tar -czf /tmp/build/build.tar.gz --exclude=build.tar.gz --exclude-vcs .
|
||||
user: root
|
||||
redis:
|
||||
image: redis
|
||||
healthcheck:
|
||||
test: ping="$$(redis-cli ping)" && [ "$$ping" = 'PONG' ]
|
||||
interval: 1s
|
||||
retries: 20
|
||||
|
||||
mongo:
|
||||
image: mongo:4.0
|
||||
healthcheck:
|
||||
test: "mongo --quiet localhost/test --eval 'quit(db.runCommand({ ping: 1 }).ok ? 0 : 1)'"
|
||||
interval: 1s
|
||||
retries: 20
|
56
services/document-updater/docker-compose.yml
Normal file
56
services/document-updater/docker-compose.yml
Normal file
|
@ -0,0 +1,56 @@
|
|||
# This file was auto-generated, do not edit it directly.
|
||||
# Instead run bin/update_build_scripts from
|
||||
# https://github.com/sharelatex/sharelatex-dev-environment
|
||||
|
||||
version: "2.3"
|
||||
|
||||
services:
|
||||
test_unit:
|
||||
image: node:12.22.3
|
||||
volumes:
|
||||
- .:/app
|
||||
working_dir: /app
|
||||
environment:
|
||||
MOCHA_GREP: ${MOCHA_GREP}
|
||||
NODE_ENV: test
|
||||
NODE_OPTIONS: "--unhandled-rejections=strict"
|
||||
command: npm run --silent test:unit
|
||||
user: node
|
||||
|
||||
test_acceptance:
|
||||
image: node:12.22.3
|
||||
volumes:
|
||||
- .:/app
|
||||
working_dir: /app
|
||||
environment:
|
||||
ELASTIC_SEARCH_DSN: es:9200
|
||||
REDIS_HOST: redis
|
||||
QUEUES_REDIS_HOST: redis
|
||||
MONGO_HOST: mongo
|
||||
POSTGRES_HOST: postgres
|
||||
MOCHA_GREP: ${MOCHA_GREP}
|
||||
LOG_LEVEL: ERROR
|
||||
NODE_ENV: test
|
||||
NODE_OPTIONS: "--unhandled-rejections=strict"
|
||||
user: node
|
||||
depends_on:
|
||||
mongo:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
command: npm run --silent test:acceptance
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
healthcheck:
|
||||
test: ping=$$(redis-cli ping) && [ "$$ping" = 'PONG' ]
|
||||
interval: 1s
|
||||
retries: 20
|
||||
|
||||
mongo:
|
||||
image: mongo:4.0
|
||||
healthcheck:
|
||||
test: "mongo --quiet localhost/test --eval 'quit(db.runCommand({ ping: 1 }).ok ? 0 : 1)'"
|
||||
interval: 1s
|
||||
retries: 20
|
||||
|
65
services/document-updater/expire_docops.js
Normal file
65
services/document-updater/expire_docops.js
Normal file
|
@ -0,0 +1,65 @@
|
|||
const Settings = require('@overleaf/settings')
|
||||
const rclient = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.documentupdater
|
||||
)
|
||||
let keys = Settings.redis.documentupdater.key_schema
|
||||
const async = require('async')
|
||||
const RedisManager = require('./app/js/RedisManager')
|
||||
|
||||
const getKeysFromNode = function (node, pattern, callback) {
|
||||
let cursor = 0 // redis iterator
|
||||
const keySet = {} // use hash to avoid duplicate results
|
||||
// scan over all keys looking for pattern
|
||||
const doIteration = () =>
|
||||
node.scan(cursor, 'MATCH', pattern, 'COUNT', 1000, function (error, reply) {
|
||||
if (error) {
|
||||
return callback(error)
|
||||
}
|
||||
;[cursor, keys] = reply
|
||||
console.log('SCAN', keys.length)
|
||||
for (const key of keys) {
|
||||
keySet[key] = true
|
||||
}
|
||||
if (cursor === '0') {
|
||||
// note redis returns string result not numeric
|
||||
return callback(null, Object.keys(keySet))
|
||||
} else {
|
||||
return doIteration()
|
||||
}
|
||||
})
|
||||
return doIteration()
|
||||
}
|
||||
|
||||
const getKeys = function (pattern, callback) {
|
||||
const nodes = (typeof rclient.nodes === 'function'
|
||||
? rclient.nodes('master')
|
||||
: undefined) || [rclient]
|
||||
console.log('GOT NODES', nodes.length)
|
||||
const doKeyLookupForNode = (node, cb) => getKeysFromNode(node, pattern, cb)
|
||||
return async.concatSeries(nodes, doKeyLookupForNode, callback)
|
||||
}
|
||||
|
||||
const expireDocOps = callback =>
|
||||
// eslint-disable-next-line handle-callback-err
|
||||
getKeys(keys.docOps({ doc_id: '*' }), (error, keys) =>
|
||||
async.mapSeries(
|
||||
keys,
|
||||
function (key, cb) {
|
||||
console.log(`EXPIRE ${key} ${RedisManager.DOC_OPS_TTL}`)
|
||||
return rclient.expire(key, RedisManager.DOC_OPS_TTL, cb)
|
||||
},
|
||||
callback
|
||||
)
|
||||
)
|
||||
|
||||
setTimeout(
|
||||
() =>
|
||||
// Give redis a chance to connect
|
||||
expireDocOps(function (error) {
|
||||
if (error) {
|
||||
throw error
|
||||
}
|
||||
return process.exit()
|
||||
}),
|
||||
1000
|
||||
)
|
17
services/document-updater/nodemon.json
Normal file
17
services/document-updater/nodemon.json
Normal file
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"ignore": [
|
||||
".git",
|
||||
"node_modules/"
|
||||
],
|
||||
"verbose": true,
|
||||
"legacyWatch": true,
|
||||
"execMap": {
|
||||
"js": "npm run start"
|
||||
},
|
||||
"watch": [
|
||||
"app/js/",
|
||||
"app.js",
|
||||
"config/"
|
||||
],
|
||||
"ext": "js"
|
||||
}
|
5043
services/document-updater/package-lock.json
generated
Normal file
5043
services/document-updater/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
57
services/document-updater/package.json
Normal file
57
services/document-updater/package.json
Normal file
|
@ -0,0 +1,57 @@
|
|||
{
|
||||
"name": "document-updater-sharelatex",
|
||||
"version": "0.1.4",
|
||||
"description": "An API for applying incoming updates to documents in real-time",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/sharelatex/document-updater-sharelatex.git"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node $NODE_APP_OPTIONS app.js",
|
||||
"test:acceptance:_run": "mocha --recursive --reporter spec --timeout 15000 --exit $@ test/acceptance/js",
|
||||
"test:acceptance": "npm run test:acceptance:_run -- --grep=$MOCHA_GREP",
|
||||
"test:unit:_run": "mocha --recursive --reporter spec $@ test/unit/js",
|
||||
"test:unit": "npm run test:unit:_run -- --grep=$MOCHA_GREP",
|
||||
"nodemon": "nodemon --config nodemon.json",
|
||||
"lint": "eslint --max-warnings 0 --format unix .",
|
||||
"format": "prettier --list-different $PWD/'**/*.js'",
|
||||
"format:fix": "prettier --write $PWD/'**/*.js'",
|
||||
"lint:fix": "eslint --fix ."
|
||||
},
|
||||
"dependencies": {
|
||||
"@overleaf/metrics": "^3.5.1",
|
||||
"@overleaf/o-error": "^3.3.1",
|
||||
"@overleaf/redis-wrapper": "^2.0.1",
|
||||
"@overleaf/settings": "^2.1.1",
|
||||
"async": "^2.5.0",
|
||||
"body-parser": "^1.19.0",
|
||||
"bunyan": "^1.8.15",
|
||||
"diff-match-patch": "https://github.com/overleaf/diff-match-patch/archive/89805f9c671a77a263fc53461acd62aa7498f688.tar.gz",
|
||||
"express": "4.17.1",
|
||||
"lodash": "^4.17.21",
|
||||
"logger-sharelatex": "^2.2.0",
|
||||
"mongodb": "^3.6.6",
|
||||
"request": "^2.88.2",
|
||||
"requestretry": "^4.1.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"chai": "^4.2.0",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"cluster-key-slot": "^1.0.5",
|
||||
"eslint": "^7.21.0",
|
||||
"eslint-config-prettier": "^8.1.0",
|
||||
"eslint-config-standard": "^16.0.2",
|
||||
"eslint-plugin-chai-expect": "^2.2.0",
|
||||
"eslint-plugin-chai-friendly": "^0.6.0",
|
||||
"eslint-plugin-import": "^2.22.1",
|
||||
"eslint-plugin-mocha": "^8.0.0",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-prettier": "^3.1.2",
|
||||
"eslint-plugin-promise": "^4.2.1",
|
||||
"mocha": "^8.3.2",
|
||||
"prettier": "^2.2.1",
|
||||
"sandboxed-module": "^2.0.4",
|
||||
"sinon": "^9.0.2",
|
||||
"timekeeper": "^2.0.0"
|
||||
}
|
||||
}
|
5
services/document-updater/redis_cluster/7000/redis.conf
Normal file
5
services/document-updater/redis_cluster/7000/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7000
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
5
services/document-updater/redis_cluster/7001/redis.conf
Normal file
5
services/document-updater/redis_cluster/7001/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7001
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
5
services/document-updater/redis_cluster/7002/redis.conf
Normal file
5
services/document-updater/redis_cluster/7002/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7002
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
5
services/document-updater/redis_cluster/7003/redis.conf
Normal file
5
services/document-updater/redis_cluster/7003/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7003
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
5
services/document-updater/redis_cluster/7004/redis.conf
Normal file
5
services/document-updater/redis_cluster/7004/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7004
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
5
services/document-updater/redis_cluster/7005/redis.conf
Normal file
5
services/document-updater/redis_cluster/7005/redis.conf
Normal file
|
@ -0,0 +1,5 @@
|
|||
port 7005
|
||||
cluster-enabled yes
|
||||
cluster-config-file nodes.conf
|
||||
cluster-node-timeout 5000
|
||||
appendonly yes
|
23
services/document-updater/redis_cluster/redis-cluster.sh
Executable file
23
services/document-updater/redis_cluster/redis-cluster.sh
Executable file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/sh
|
||||
|
||||
(cd 7000 && redis-server redis.conf) &
|
||||
PID1="$!"
|
||||
|
||||
(cd 7001 && redis-server redis.conf) &
|
||||
PID2="$!"
|
||||
|
||||
(cd 7002 && redis-server redis.conf) &
|
||||
PID3="$!"
|
||||
|
||||
(cd 7003 && redis-server redis.conf) &
|
||||
PID4="$!"
|
||||
|
||||
(cd 7004 && redis-server redis.conf) &
|
||||
PID5="$!"
|
||||
|
||||
(cd 7005 && redis-server redis.conf) &
|
||||
PID6="$!"
|
||||
|
||||
trap "kill $PID1 $PID2 $PID3 $PID4 $PID5 $PID6" exit INT TERM
|
||||
|
||||
wait
|
1696
services/document-updater/redis_cluster/redis-trib.rb
Executable file
1696
services/document-updater/redis_cluster/redis-trib.rb
Executable file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,849 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const { expect } = require('chai')
|
||||
const async = require('async')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const rclient_history = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.history
|
||||
) // note: this is track changes, not project-history
|
||||
const rclient_project_history = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.project_history
|
||||
)
|
||||
const rclient_du = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.documentupdater
|
||||
)
|
||||
const Keys = Settings.redis.documentupdater.key_schema
|
||||
const HistoryKeys = Settings.redis.history.key_schema
|
||||
const ProjectHistoryKeys = Settings.redis.project_history.key_schema
|
||||
|
||||
const MockTrackChangesApi = require('./helpers/MockTrackChangesApi')
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Applying updates to a doc', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
this.update = {
|
||||
doc: this.doc_id,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: this.version,
|
||||
}
|
||||
this.result = ['one', 'one and a half', 'two', 'three']
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('when the document is not loaded', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
this.startTime = Date.now()
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should load the document from the web API', function () {
|
||||
return MockWebApi.getDocument
|
||||
.calledWith(this.project_id, this.doc_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
it('should update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should push the applied updates to the track changes api', function (done) {
|
||||
rclient_history.lrange(
|
||||
HistoryKeys.uncompressedHistoryOps({ doc_id: this.doc_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
JSON.parse(updates[0]).op.should.deep.equal(this.update.op)
|
||||
return rclient_history.sismember(
|
||||
HistoryKeys.docsWithHistoryOps({ project_id: this.project_id }),
|
||||
this.doc_id,
|
||||
(error, result) => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
result.should.equal(1)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should push the applied updates to the project history changes api', function (done) {
|
||||
rclient_project_history.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
JSON.parse(updates[0]).op.should.deep.equal(this.update.op)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should set the first op timestamp', function (done) {
|
||||
rclient_project_history.get(
|
||||
ProjectHistoryKeys.projectHistoryFirstOpTimestamp({
|
||||
project_id: this.project_id,
|
||||
}),
|
||||
(error, result) => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
result = parseInt(result, 10)
|
||||
result.should.be.within(this.startTime, Date.now())
|
||||
this.firstOpTimestamp = result
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return describe('when sending another update', function () {
|
||||
before(function (done) {
|
||||
this.timeout = 10000
|
||||
this.second_update = Object.create(this.update)
|
||||
this.second_update.v = this.version + 1
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.second_update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should not change the first op timestamp', function (done) {
|
||||
rclient_project_history.get(
|
||||
ProjectHistoryKeys.projectHistoryFirstOpTimestamp({
|
||||
project_id: this.project_id,
|
||||
}),
|
||||
(error, result) => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
result = parseInt(result, 10)
|
||||
result.should.equal(this.firstOpTimestamp)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document is loaded', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
DocUpdaterClient.preloadDoc(this.project_id, this.doc_id, error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
return null
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should not need to call the web api', function () {
|
||||
return MockWebApi.getDocument.called.should.equal(false)
|
||||
})
|
||||
|
||||
it('should update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should push the applied updates to the track changes api', function (done) {
|
||||
rclient_history.lrange(
|
||||
HistoryKeys.uncompressedHistoryOps({ doc_id: this.doc_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
JSON.parse(updates[0]).op.should.deep.equal(this.update.op)
|
||||
return rclient_history.sismember(
|
||||
HistoryKeys.docsWithHistoryOps({ project_id: this.project_id }),
|
||||
this.doc_id,
|
||||
(error, result) => {
|
||||
result.should.equal(1)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should push the applied updates to the project history changes api', function (done) {
|
||||
rclient_project_history.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
JSON.parse(updates[0]).op.should.deep.equal(this.update.op)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document is loaded and is using project-history only', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
projectHistoryType: 'project-history',
|
||||
})
|
||||
DocUpdaterClient.preloadDoc(this.project_id, this.doc_id, error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
return null
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should not push any applied updates to the track changes api', function (done) {
|
||||
rclient_history.lrange(
|
||||
HistoryKeys.uncompressedHistoryOps({ doc_id: this.doc_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
updates.length.should.equal(0)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should push the applied updates to the project history changes api', function (done) {
|
||||
rclient_project_history.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
JSON.parse(updates[0]).op.should.deep.equal(this.update.op)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document has been deleted', function () {
|
||||
describe('when the ops come in a single linear order', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
const lines = ['', '', '']
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines,
|
||||
version: 0,
|
||||
})
|
||||
this.updates = [
|
||||
{ doc_id: this.doc_id, v: 0, op: [{ i: 'h', p: 0 }] },
|
||||
{ doc_id: this.doc_id, v: 1, op: [{ i: 'e', p: 1 }] },
|
||||
{ doc_id: this.doc_id, v: 2, op: [{ i: 'l', p: 2 }] },
|
||||
{ doc_id: this.doc_id, v: 3, op: [{ i: 'l', p: 3 }] },
|
||||
{ doc_id: this.doc_id, v: 4, op: [{ i: 'o', p: 4 }] },
|
||||
{ doc_id: this.doc_id, v: 5, op: [{ i: ' ', p: 5 }] },
|
||||
{ doc_id: this.doc_id, v: 6, op: [{ i: 'w', p: 6 }] },
|
||||
{ doc_id: this.doc_id, v: 7, op: [{ i: 'o', p: 7 }] },
|
||||
{ doc_id: this.doc_id, v: 8, op: [{ i: 'r', p: 8 }] },
|
||||
{ doc_id: this.doc_id, v: 9, op: [{ i: 'l', p: 9 }] },
|
||||
{ doc_id: this.doc_id, v: 10, op: [{ i: 'd', p: 10 }] },
|
||||
]
|
||||
this.my_result = ['hello world', '', '']
|
||||
return done()
|
||||
})
|
||||
|
||||
it('should be able to continue applying updates when the project has been deleted', function (done) {
|
||||
let update
|
||||
const actions = []
|
||||
for (update of Array.from(this.updates.slice(0, 6))) {
|
||||
;(update => {
|
||||
return actions.push(callback =>
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
)
|
||||
})(update)
|
||||
}
|
||||
actions.push(callback =>
|
||||
DocUpdaterClient.deleteDoc(this.project_id, this.doc_id, callback)
|
||||
)
|
||||
for (update of Array.from(this.updates.slice(6))) {
|
||||
;(update => {
|
||||
return actions.push(callback =>
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
)
|
||||
})(update)
|
||||
}
|
||||
|
||||
async.series(actions, error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.my_result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
return null
|
||||
})
|
||||
|
||||
it('should push the applied updates to the track changes api', function (done) {
|
||||
rclient_history.lrange(
|
||||
HistoryKeys.uncompressedHistoryOps({ doc_id: this.doc_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
updates = Array.from(updates).map(u => JSON.parse(u))
|
||||
for (let i = 0; i < this.updates.length; i++) {
|
||||
const appliedUpdate = this.updates[i]
|
||||
appliedUpdate.op.should.deep.equal(updates[i].op)
|
||||
}
|
||||
|
||||
return rclient_history.sismember(
|
||||
HistoryKeys.docsWithHistoryOps({ project_id: this.project_id }),
|
||||
this.doc_id,
|
||||
(error, result) => {
|
||||
result.should.equal(1)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should store the doc ops in the correct order', function (done) {
|
||||
rclient_du.lrange(
|
||||
Keys.docOps({ doc_id: this.doc_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
updates = Array.from(updates).map(u => JSON.parse(u))
|
||||
for (let i = 0; i < this.updates.length; i++) {
|
||||
const appliedUpdate = this.updates[i]
|
||||
appliedUpdate.op.should.deep.equal(updates[i].op)
|
||||
}
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when older ops come in after the delete', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
const lines = ['', '', '']
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines,
|
||||
version: 0,
|
||||
})
|
||||
this.updates = [
|
||||
{ doc_id: this.doc_id, v: 0, op: [{ i: 'h', p: 0 }] },
|
||||
{ doc_id: this.doc_id, v: 1, op: [{ i: 'e', p: 1 }] },
|
||||
{ doc_id: this.doc_id, v: 2, op: [{ i: 'l', p: 2 }] },
|
||||
{ doc_id: this.doc_id, v: 3, op: [{ i: 'l', p: 3 }] },
|
||||
{ doc_id: this.doc_id, v: 4, op: [{ i: 'o', p: 4 }] },
|
||||
{ doc_id: this.doc_id, v: 0, op: [{ i: 'world', p: 1 }] },
|
||||
]
|
||||
this.my_result = ['hello', 'world', '']
|
||||
return done()
|
||||
})
|
||||
|
||||
return it('should be able to continue applying updates when the project has been deleted', function (done) {
|
||||
let update
|
||||
const actions = []
|
||||
for (update of Array.from(this.updates.slice(0, 5))) {
|
||||
;(update => {
|
||||
return actions.push(callback =>
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
)
|
||||
})(update)
|
||||
}
|
||||
actions.push(callback =>
|
||||
DocUpdaterClient.deleteDoc(this.project_id, this.doc_id, callback)
|
||||
)
|
||||
for (update of Array.from(this.updates.slice(5))) {
|
||||
;(update => {
|
||||
return actions.push(callback =>
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
update,
|
||||
callback
|
||||
)
|
||||
)
|
||||
})(update)
|
||||
}
|
||||
|
||||
async.series(actions, error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.my_result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
return null
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('with a broken update', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
this.broken_update = {
|
||||
doc_id: this.doc_id,
|
||||
v: this.version,
|
||||
op: [{ d: 'not the correct content', p: 0 }],
|
||||
}
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
|
||||
DocUpdaterClient.subscribeToAppliedOps(
|
||||
(this.messageCallback = sinon.stub())
|
||||
)
|
||||
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.broken_update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should not update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.lines)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should send a message with an error', function () {
|
||||
this.messageCallback.called.should.equal(true)
|
||||
const [channel, message] = Array.from(this.messageCallback.args[0])
|
||||
channel.should.equal('applied-ops')
|
||||
return JSON.parse(message).should.deep.include({
|
||||
project_id: this.project_id,
|
||||
doc_id: this.doc_id,
|
||||
error: 'Delete component does not match',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('with enough updates to flush to the track changes api', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
const updates = []
|
||||
for (let v = 0; v <= 199; v++) {
|
||||
// Should flush after 100 ops
|
||||
updates.push({
|
||||
doc_id: this.doc_id,
|
||||
op: [{ i: v.toString(), p: 0 }],
|
||||
v,
|
||||
})
|
||||
}
|
||||
|
||||
sinon.spy(MockTrackChangesApi, 'flushDoc')
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: 0,
|
||||
})
|
||||
|
||||
// Send updates in chunks to causes multiple flushes
|
||||
const actions = []
|
||||
for (let i = 0; i <= 19; i++) {
|
||||
;(i => {
|
||||
return actions.push(cb => {
|
||||
return DocUpdaterClient.sendUpdates(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
updates.slice(i * 10, (i + 1) * 10),
|
||||
cb
|
||||
)
|
||||
})
|
||||
})(i)
|
||||
}
|
||||
async.series(actions, error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 2000)
|
||||
})
|
||||
return null
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockTrackChangesApi.flushDoc.restore()
|
||||
})
|
||||
|
||||
return it('should flush the doc twice', function () {
|
||||
return MockTrackChangesApi.flushDoc.calledTwice.should.equal(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when there is no version in Mongo', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
})
|
||||
|
||||
const update = {
|
||||
doc: this.doc_id,
|
||||
op: this.update.op,
|
||||
v: 0,
|
||||
}
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should update the doc (using version = 0)', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the sending duplicate ops', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
|
||||
DocUpdaterClient.subscribeToAppliedOps(
|
||||
(this.messageCallback = sinon.stub())
|
||||
)
|
||||
|
||||
// One user delete 'one', the next turns it into 'once'. The second becomes a NOP.
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
{
|
||||
doc: this.doc_id,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: this.version,
|
||||
meta: {
|
||||
source: 'ikHceq3yfAdQYzBo4-xZ',
|
||||
},
|
||||
},
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
{
|
||||
doc: this.doc_id,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: this.version,
|
||||
dupIfSource: ['ikHceq3yfAdQYzBo4-xZ'],
|
||||
meta: {
|
||||
source: 'ikHceq3yfAdQYzBo4-xZ',
|
||||
},
|
||||
},
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
doc.lines.should.deep.equal(this.result)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should return a message about duplicate ops', function () {
|
||||
this.messageCallback.calledTwice.should.equal(true)
|
||||
this.messageCallback.args[0][0].should.equal('applied-ops')
|
||||
expect(JSON.parse(this.messageCallback.args[0][1]).op.dup).to.be.undefined
|
||||
this.messageCallback.args[1][0].should.equal('applied-ops')
|
||||
return expect(
|
||||
JSON.parse(this.messageCallback.args[1][1]).op.dup
|
||||
).to.equal(true)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when sending updates for a non-existing doc id', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
this.non_existing = {
|
||||
doc_id: this.doc_id,
|
||||
v: this.version,
|
||||
op: [{ d: 'content', p: 0 }],
|
||||
}
|
||||
|
||||
DocUpdaterClient.subscribeToAppliedOps(
|
||||
(this.messageCallback = sinon.stub())
|
||||
)
|
||||
|
||||
DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.non_existing,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
it('should not update or create a doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
res.statusCode.should.equal(404)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
return null
|
||||
})
|
||||
|
||||
return it('should send a message with an error', function () {
|
||||
this.messageCallback.called.should.equal(true)
|
||||
const [channel, message] = Array.from(this.messageCallback.args[0])
|
||||
channel.should.equal('applied-ops')
|
||||
return JSON.parse(message).should.deep.include({
|
||||
project_id: this.project_id,
|
||||
doc_id: this.doc_id,
|
||||
error: `doc not not found: /project/${this.project_id}/doc/${this.doc_id}`,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,503 @@
|
|||
const sinon = require('sinon')
|
||||
const Settings = require('@overleaf/settings')
|
||||
const rclientProjectHistory = require('@overleaf/redis-wrapper').createClient(
|
||||
Settings.redis.project_history
|
||||
)
|
||||
const ProjectHistoryKeys = Settings.redis.project_history.key_schema
|
||||
|
||||
const MockProjectHistoryApi = require('./helpers/MockProjectHistoryApi')
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe("Applying updates to a project's structure", function () {
|
||||
before(function () {
|
||||
this.user_id = 'user-id-123'
|
||||
this.version = 1234
|
||||
})
|
||||
|
||||
describe('renaming a file', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.fileUpdate = {
|
||||
type: 'rename-file',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-path',
|
||||
newPathname: '/new-file-path',
|
||||
}
|
||||
this.updates = [this.fileUpdate]
|
||||
DocUpdaterApp.ensureRunning(error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('should push the applied file renames to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
const update = JSON.parse(updates[0])
|
||||
update.file.should.equal(this.fileUpdate.id)
|
||||
update.pathname.should.equal('/file-path')
|
||||
update.new_pathname.should.equal('/new-file-path')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('renaming a document', function () {
|
||||
before(function () {
|
||||
this.update = {
|
||||
type: 'rename-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/doc-path',
|
||||
newPathname: '/new-doc-path',
|
||||
}
|
||||
this.updates = [this.update]
|
||||
})
|
||||
|
||||
describe('when the document is not loaded', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should push the applied doc renames to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
const update = JSON.parse(updates[0])
|
||||
update.doc.should.equal(this.update.id)
|
||||
update.pathname.should.equal('/doc-path')
|
||||
update.new_pathname.should.equal('/new-doc-path')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document is loaded', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
MockWebApi.insertDoc(this.project_id, this.update.id, {})
|
||||
DocUpdaterClient.preloadDoc(this.project_id, this.update.id, error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should update the doc', function (done) {
|
||||
DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.update.id,
|
||||
(error, res, doc) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
doc.pathname.should.equal(this.update.newPathname)
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should push the applied doc renames to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
const update = JSON.parse(updates[0])
|
||||
update.doc.should.equal(this.update.id)
|
||||
update.pathname.should.equal('/doc-path')
|
||||
update.new_pathname.should.equal('/new-doc-path')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('renaming multiple documents and files', function () {
|
||||
before(function () {
|
||||
this.docUpdate0 = {
|
||||
type: 'rename-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/doc-path0',
|
||||
newPathname: '/new-doc-path0',
|
||||
}
|
||||
this.docUpdate1 = {
|
||||
type: 'rename-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/doc-path1',
|
||||
newPathname: '/new-doc-path1',
|
||||
}
|
||||
this.fileUpdate0 = {
|
||||
type: 'rename-file',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-path0',
|
||||
newPathname: '/new-file-path0',
|
||||
}
|
||||
this.fileUpdate1 = {
|
||||
type: 'rename-file',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-path1',
|
||||
newPathname: '/new-file-path1',
|
||||
}
|
||||
this.updates = [
|
||||
this.docUpdate0,
|
||||
this.docUpdate1,
|
||||
this.fileUpdate0,
|
||||
this.fileUpdate1,
|
||||
]
|
||||
})
|
||||
|
||||
describe('when the documents are not loaded', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should push the applied doc renames to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
let update = JSON.parse(updates[0])
|
||||
update.doc.should.equal(this.docUpdate0.id)
|
||||
update.pathname.should.equal('/doc-path0')
|
||||
update.new_pathname.should.equal('/new-doc-path0')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
update = JSON.parse(updates[1])
|
||||
update.doc.should.equal(this.docUpdate1.id)
|
||||
update.pathname.should.equal('/doc-path1')
|
||||
update.new_pathname.should.equal('/new-doc-path1')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.1`)
|
||||
|
||||
update = JSON.parse(updates[2])
|
||||
update.file.should.equal(this.fileUpdate0.id)
|
||||
update.pathname.should.equal('/file-path0')
|
||||
update.new_pathname.should.equal('/new-file-path0')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.2`)
|
||||
|
||||
update = JSON.parse(updates[3])
|
||||
update.file.should.equal(this.fileUpdate1.id)
|
||||
update.pathname.should.equal('/file-path1')
|
||||
update.new_pathname.should.equal('/new-file-path1')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.3`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('adding a file', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.fileUpdate = {
|
||||
type: 'add-file',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-path',
|
||||
url: 'filestore.example.com',
|
||||
}
|
||||
this.updates = [this.fileUpdate]
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should push the file addition to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
const update = JSON.parse(updates[0])
|
||||
update.file.should.equal(this.fileUpdate.id)
|
||||
update.pathname.should.equal('/file-path')
|
||||
update.url.should.equal('filestore.example.com')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('adding a doc', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.docUpdate = {
|
||||
type: 'add-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-path',
|
||||
docLines: 'a\nb',
|
||||
}
|
||||
this.updates = [this.docUpdate]
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
this.project_id,
|
||||
this.user_id,
|
||||
this.updates,
|
||||
this.version,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should push the doc addition to the project history api', function (done) {
|
||||
rclientProjectHistory.lrange(
|
||||
ProjectHistoryKeys.projectHistoryOps({ project_id: this.project_id }),
|
||||
0,
|
||||
-1,
|
||||
(error, updates) => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
|
||||
const update = JSON.parse(updates[0])
|
||||
update.doc.should.equal(this.docUpdate.id)
|
||||
update.pathname.should.equal('/file-path')
|
||||
update.docLines.should.equal('a\nb')
|
||||
update.meta.user_id.should.equal(this.user_id)
|
||||
update.meta.ts.should.be.a('string')
|
||||
update.version.should.equal(`${this.version}.0`)
|
||||
|
||||
done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with enough updates to flush to the history service', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.user_id = DocUpdaterClient.randomId()
|
||||
this.version0 = 12345
|
||||
this.version1 = this.version0 + 1
|
||||
const updates = []
|
||||
for (let v = 0; v <= 599; v++) {
|
||||
// Should flush after 500 ops
|
||||
updates.push({
|
||||
type: 'add-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-' + v,
|
||||
docLines: 'a\nb',
|
||||
})
|
||||
}
|
||||
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
|
||||
// Send updates in chunks to causes multiple flushes
|
||||
const projectId = this.project_id
|
||||
const userId = this.project_id
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
projectId,
|
||||
userId,
|
||||
updates.slice(0, 250),
|
||||
this.version0,
|
||||
function (error) {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
projectId,
|
||||
userId,
|
||||
updates.slice(250),
|
||||
this.version1,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 2000)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
it('should flush project history', function () {
|
||||
MockProjectHistoryApi.flushProject
|
||||
.calledWith(this.project_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with too few updates to flush to the history service', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.user_id = DocUpdaterClient.randomId()
|
||||
this.version0 = 12345
|
||||
this.version1 = this.version0 + 1
|
||||
|
||||
const updates = []
|
||||
for (let v = 0; v <= 42; v++) {
|
||||
// Should flush after 500 ops
|
||||
updates.push({
|
||||
type: 'add-doc',
|
||||
id: DocUpdaterClient.randomId(),
|
||||
pathname: '/file-' + v,
|
||||
docLines: 'a\nb',
|
||||
})
|
||||
}
|
||||
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
|
||||
// Send updates in chunks
|
||||
const projectId = this.project_id
|
||||
const userId = this.project_id
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
projectId,
|
||||
userId,
|
||||
updates.slice(0, 10),
|
||||
this.version0,
|
||||
function (error) {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
DocUpdaterClient.sendProjectUpdate(
|
||||
projectId,
|
||||
userId,
|
||||
updates.slice(10),
|
||||
this.version1,
|
||||
error => {
|
||||
if (error) {
|
||||
return done(error)
|
||||
}
|
||||
setTimeout(done, 2000)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
it('should not flush project history', function () {
|
||||
MockProjectHistoryApi.flushProject
|
||||
.calledWith(this.project_id)
|
||||
.should.equal(false)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,193 @@
|
|||
/* eslint-disable
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const MockTrackChangesApi = require('./helpers/MockTrackChangesApi')
|
||||
const MockProjectHistoryApi = require('./helpers/MockProjectHistoryApi')
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Deleting a document', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
this.update = {
|
||||
doc: this.doc_id,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: this.version,
|
||||
}
|
||||
this.result = ['one', 'one and a half', 'two', 'three']
|
||||
|
||||
sinon.spy(MockTrackChangesApi, 'flushDoc')
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockTrackChangesApi.flushDoc.restore()
|
||||
return MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
describe('when the updated doc exists in the doc updater', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.update,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.deleteDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.setDocument.restore()
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should send the updated document and version to the web api', function () {
|
||||
return MockWebApi.setDocument
|
||||
.calledWith(this.project_id, this.doc_id, this.result, this.version + 1)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
it('should need to reload the doc if read again', function (done) {
|
||||
MockWebApi.getDocument.resetHistory()
|
||||
MockWebApi.getDocument.called.should.equals(false)
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
MockWebApi.getDocument
|
||||
.calledWith(this.project_id, this.doc_id)
|
||||
.should.equal(true)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should flush track changes', function () {
|
||||
return MockTrackChangesApi.flushDoc
|
||||
.calledWith(this.doc_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
return it('should flush project history', function () {
|
||||
return MockProjectHistoryApi.flushProject
|
||||
.calledWith(this.project_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when the doc is not in the doc updater', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
})
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return DocUpdaterClient.deleteDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
return setTimeout(done, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.setDocument.restore()
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should not need to send the updated document to the web api', function () {
|
||||
return MockWebApi.setDocument.called.should.equal(false)
|
||||
})
|
||||
|
||||
it('should need to reload the doc if read again', function (done) {
|
||||
MockWebApi.getDocument.called.should.equals(false)
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
MockWebApi.getDocument
|
||||
.calledWith(this.project_id, this.doc_id)
|
||||
.should.equal(true)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should flush track changes', function () {
|
||||
return MockTrackChangesApi.flushDoc
|
||||
.calledWith(this.doc_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
return it('should flush project history', function () {
|
||||
return MockProjectHistoryApi.flushProject
|
||||
.calledWith(this.project_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,293 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const async = require('async')
|
||||
|
||||
const MockTrackChangesApi = require('./helpers/MockTrackChangesApi')
|
||||
const MockProjectHistoryApi = require('./helpers/MockProjectHistoryApi')
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Deleting a project', function () {
|
||||
before(function (done) {
|
||||
let doc_id0, doc_id1
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.docs = [
|
||||
{
|
||||
id: (doc_id0 = DocUpdaterClient.randomId()),
|
||||
lines: ['one', 'two', 'three'],
|
||||
update: {
|
||||
doc: doc_id0,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: 0,
|
||||
},
|
||||
updatedLines: ['one', 'one and a half', 'two', 'three'],
|
||||
},
|
||||
{
|
||||
id: (doc_id1 = DocUpdaterClient.randomId()),
|
||||
lines: ['four', 'five', 'six'],
|
||||
update: {
|
||||
doc: doc_id1,
|
||||
op: [
|
||||
{
|
||||
i: 'four and a half\n',
|
||||
p: 5,
|
||||
},
|
||||
],
|
||||
v: 0,
|
||||
},
|
||||
updatedLines: ['four', 'four and a half', 'five', 'six'],
|
||||
},
|
||||
]
|
||||
for (const doc of Array.from(this.docs)) {
|
||||
MockWebApi.insertDoc(this.project_id, doc.id, {
|
||||
lines: doc.lines,
|
||||
version: doc.update.v,
|
||||
})
|
||||
}
|
||||
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('with documents which have been updated', function () {
|
||||
before(function (done) {
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
sinon.spy(MockTrackChangesApi, 'flushDoc')
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
doc.update,
|
||||
error => {
|
||||
return callback(error)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.deleteProject(
|
||||
this.project_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.setDocument.restore()
|
||||
MockTrackChangesApi.flushDoc.restore()
|
||||
return MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should send each document to the web api', function () {
|
||||
return Array.from(this.docs).map(doc =>
|
||||
MockWebApi.setDocument
|
||||
.calledWith(this.project_id, doc.id, doc.updatedLines)
|
||||
.should.equal(true)
|
||||
)
|
||||
})
|
||||
|
||||
it('should need to reload the docs if read again', function (done) {
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
MockWebApi.getDocument
|
||||
.calledWith(this.project_id, doc.id)
|
||||
.should.equal(false)
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
(error, res, returnedDoc) => {
|
||||
MockWebApi.getDocument
|
||||
.calledWith(this.project_id, doc.id)
|
||||
.should.equal(true)
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
() => {
|
||||
MockWebApi.getDocument.restore()
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should flush each doc in track changes', function () {
|
||||
return Array.from(this.docs).map(doc =>
|
||||
MockTrackChangesApi.flushDoc.calledWith(doc.id).should.equal(true)
|
||||
)
|
||||
})
|
||||
|
||||
return it('should flush each doc in project history', function () {
|
||||
return MockProjectHistoryApi.flushProject
|
||||
.calledWith(this.project_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with the background=true parameter from realtime and no request to flush the queue', function () {
|
||||
before(function (done) {
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
sinon.spy(MockTrackChangesApi, 'flushDoc')
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
callback
|
||||
)
|
||||
}
|
||||
}),
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.deleteProjectOnShutdown(
|
||||
this.project_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.setDocument.restore()
|
||||
MockTrackChangesApi.flushDoc.restore()
|
||||
return MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should not send any documents to the web api', function () {
|
||||
return MockWebApi.setDocument.called.should.equal(false)
|
||||
})
|
||||
|
||||
it('should not flush any docs in track changes', function () {
|
||||
return MockTrackChangesApi.flushDoc.called.should.equal(false)
|
||||
})
|
||||
|
||||
return it('should not flush to project history', function () {
|
||||
return MockProjectHistoryApi.flushProject.called.should.equal(false)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('with the background=true parameter from realtime and a request to flush the queue', function () {
|
||||
before(function (done) {
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
sinon.spy(MockTrackChangesApi, 'flushDoc')
|
||||
sinon.spy(MockProjectHistoryApi, 'flushProject')
|
||||
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
callback
|
||||
)
|
||||
}
|
||||
}),
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.deleteProjectOnShutdown(
|
||||
this.project_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
// after deleting the project and putting it in the queue, flush the queue
|
||||
return setTimeout(
|
||||
() => DocUpdaterClient.flushOldProjects(done),
|
||||
2000
|
||||
)
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
MockWebApi.setDocument.restore()
|
||||
MockTrackChangesApi.flushDoc.restore()
|
||||
return MockProjectHistoryApi.flushProject.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should send each document to the web api', function () {
|
||||
return Array.from(this.docs).map(doc =>
|
||||
MockWebApi.setDocument
|
||||
.calledWith(this.project_id, doc.id, doc.updatedLines)
|
||||
.should.equal(true)
|
||||
)
|
||||
})
|
||||
|
||||
it('should flush each doc in track changes', function () {
|
||||
return Array.from(this.docs).map(doc =>
|
||||
MockTrackChangesApi.flushDoc.calledWith(doc.id).should.equal(true)
|
||||
)
|
||||
})
|
||||
|
||||
return it('should flush to project history', function () {
|
||||
return MockProjectHistoryApi.flushProject.called.should.equal(true)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,143 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const async = require('async')
|
||||
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Flushing a project', function () {
|
||||
before(function (done) {
|
||||
let doc_id0, doc_id1
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.docs = [
|
||||
{
|
||||
id: (doc_id0 = DocUpdaterClient.randomId()),
|
||||
lines: ['one', 'two', 'three'],
|
||||
update: {
|
||||
doc: doc_id0,
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: 0,
|
||||
},
|
||||
updatedLines: ['one', 'one and a half', 'two', 'three'],
|
||||
},
|
||||
{
|
||||
id: (doc_id1 = DocUpdaterClient.randomId()),
|
||||
lines: ['four', 'five', 'six'],
|
||||
update: {
|
||||
doc: doc_id1,
|
||||
op: [
|
||||
{
|
||||
i: 'four and a half\n',
|
||||
p: 5,
|
||||
},
|
||||
],
|
||||
v: 0,
|
||||
},
|
||||
updatedLines: ['four', 'four and a half', 'five', 'six'],
|
||||
},
|
||||
]
|
||||
for (const doc of Array.from(this.docs)) {
|
||||
MockWebApi.insertDoc(this.project_id, doc.id, {
|
||||
lines: doc.lines,
|
||||
version: doc.update.v,
|
||||
})
|
||||
}
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
return describe('with documents which have been updated', function () {
|
||||
before(function (done) {
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
return DocUpdaterClient.sendUpdate(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
doc.update,
|
||||
error => {
|
||||
return callback(error)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.flushProject(
|
||||
this.project_id,
|
||||
(error, res, body) => {
|
||||
this.statusCode = res.statusCode
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.setDocument.restore()
|
||||
})
|
||||
|
||||
it('should return a 204 status code', function () {
|
||||
return this.statusCode.should.equal(204)
|
||||
})
|
||||
|
||||
it('should send each document to the web api', function () {
|
||||
return Array.from(this.docs).map(doc =>
|
||||
MockWebApi.setDocument
|
||||
.calledWith(this.project_id, doc.id, doc.updatedLines)
|
||||
.should.equal(true)
|
||||
)
|
||||
})
|
||||
|
||||
return it('should update the lines in the doc updater', function (done) {
|
||||
return async.series(
|
||||
this.docs.map(doc => {
|
||||
return callback => {
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
doc.id,
|
||||
(error, res, returnedDoc) => {
|
||||
returnedDoc.lines.should.deep.equal(doc.updatedLines)
|
||||
return callback()
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
done
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,163 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
no-return-assign,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const { expect } = require('chai')
|
||||
const async = require('async')
|
||||
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Flushing a doc to Mongo', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
this.update = {
|
||||
doc: this.doc_id,
|
||||
meta: { user_id: 'last-author-fake-id' },
|
||||
op: [
|
||||
{
|
||||
i: 'one and a half\n',
|
||||
p: 4,
|
||||
},
|
||||
],
|
||||
v: this.version,
|
||||
}
|
||||
this.result = ['one', 'one and a half', 'two', 'three']
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('when the updated doc exists in the doc updater', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.sendUpdates(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
[this.update],
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return setTimeout(() => {
|
||||
return DocUpdaterClient.flushDoc(this.project_id, this.doc_id, done)
|
||||
}, 200)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.setDocument.restore()
|
||||
})
|
||||
|
||||
it('should flush the updated doc lines and version to the web api', function () {
|
||||
return MockWebApi.setDocument
|
||||
.calledWith(this.project_id, this.doc_id, this.result, this.version + 1)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
return it('should flush the last update author and time to the web api', function () {
|
||||
const lastUpdatedAt = MockWebApi.setDocument.lastCall.args[5]
|
||||
parseInt(lastUpdatedAt).should.be.closeTo(new Date().getTime(), 30000)
|
||||
|
||||
const lastUpdatedBy = MockWebApi.setDocument.lastCall.args[6]
|
||||
return lastUpdatedBy.should.equal('last-author-fake-id')
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the doc does not exist in the doc updater', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
})
|
||||
sinon.spy(MockWebApi, 'setDocument')
|
||||
return DocUpdaterClient.flushDoc(this.project_id, this.doc_id, done)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.setDocument.restore()
|
||||
})
|
||||
|
||||
return it('should not flush the doc to the web api', function () {
|
||||
return MockWebApi.setDocument.called.should.equal(false)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when the web api http request takes a long time on first request', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
let t = 30000
|
||||
sinon
|
||||
.stub(MockWebApi, 'setDocument')
|
||||
.callsFake(
|
||||
(
|
||||
project_id,
|
||||
doc_id,
|
||||
lines,
|
||||
version,
|
||||
ranges,
|
||||
lastUpdatedAt,
|
||||
lastUpdatedBy,
|
||||
callback
|
||||
) => {
|
||||
if (callback == null) {
|
||||
callback = function (error) {}
|
||||
}
|
||||
setTimeout(callback, t)
|
||||
return (t = 0)
|
||||
}
|
||||
)
|
||||
return DocUpdaterClient.preloadDoc(this.project_id, this.doc_id, done)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.setDocument.restore()
|
||||
})
|
||||
|
||||
return it('should still work', function (done) {
|
||||
const start = Date.now()
|
||||
return DocUpdaterClient.flushDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
res.statusCode.should.equal(204)
|
||||
const delta = Date.now() - start
|
||||
expect(delta).to.be.below(20000)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
|
@ -0,0 +1,290 @@
|
|||
/* eslint-disable
|
||||
camelcase,
|
||||
handle-callback-err,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const { expect } = require('chai')
|
||||
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Getting a document', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('when the document is not loaded', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, returnedDoc) => {
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should load the document from the web API', function () {
|
||||
return MockWebApi.getDocument
|
||||
.calledWith(this.project_id, this.doc_id)
|
||||
.should.equal(true)
|
||||
})
|
||||
|
||||
it('should return the document lines', function () {
|
||||
return this.returnedDoc.lines.should.deep.equal(this.lines)
|
||||
})
|
||||
|
||||
return it('should return the document at its current version', function () {
|
||||
return this.returnedDoc.version.should.equal(this.version)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document is already loaded', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, returnedDoc) => {
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should not load the document from the web API', function () {
|
||||
return MockWebApi.getDocument.called.should.equal(false)
|
||||
})
|
||||
|
||||
return it('should return the document lines', function () {
|
||||
return this.returnedDoc.lines.should.deep.equal(this.lines)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the request asks for some recent ops', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: (this.lines = ['one', 'two', 'three']),
|
||||
})
|
||||
|
||||
this.updates = __range__(0, 199, true).map(v => ({
|
||||
doc_id: this.doc_id,
|
||||
op: [{ i: v.toString(), p: 0 }],
|
||||
v,
|
||||
}))
|
||||
|
||||
return DocUpdaterClient.sendUpdates(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
this.updates,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
describe('when the ops are loaded', function () {
|
||||
before(function (done) {
|
||||
return DocUpdaterClient.getDocAndRecentOps(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
190,
|
||||
(error, res, returnedDoc) => {
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
return it('should return the recent ops', function () {
|
||||
this.returnedDoc.ops.length.should.equal(10)
|
||||
return Array.from(this.updates.slice(190, -1)).map((update, i) =>
|
||||
this.returnedDoc.ops[i].op.should.deep.equal(update.op)
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when the ops are not all loaded', function () {
|
||||
before(function (done) {
|
||||
// We only track 100 ops
|
||||
return DocUpdaterClient.getDocAndRecentOps(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
10,
|
||||
(error, res, returnedDoc) => {
|
||||
this.res = res
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
return it('should return UnprocessableEntity', function () {
|
||||
return this.res.statusCode.should.equal(422)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document does not exist', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
this.statusCode = res.statusCode
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
return it('should return 404', function () {
|
||||
return this.statusCode.should.equal(404)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the web api returns an error', function () {
|
||||
before(function (done) {
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon
|
||||
.stub(MockWebApi, 'getDocument')
|
||||
.callsFake((project_id, doc_id, callback) => {
|
||||
if (callback == null) {
|
||||
callback = function (error, doc) {}
|
||||
}
|
||||
return callback(new Error('oops'))
|
||||
})
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
this.statusCode = res.statusCode
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
return it('should return 500', function () {
|
||||
return this.statusCode.should.equal(500)
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when the web api http request takes a long time', function () {
|
||||
before(function (done) {
|
||||
this.timeout = 10000
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
sinon
|
||||
.stub(MockWebApi, 'getDocument')
|
||||
.callsFake((project_id, doc_id, callback) => {
|
||||
if (callback == null) {
|
||||
callback = function (error, doc) {}
|
||||
}
|
||||
return setTimeout(callback, 30000)
|
||||
})
|
||||
return done()
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
return it('should return quickly(ish)', function (done) {
|
||||
const start = Date.now()
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, doc) => {
|
||||
res.statusCode.should.equal(500)
|
||||
const delta = Date.now() - start
|
||||
expect(delta).to.be.below(20000)
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
function __range__(left, right, inclusive) {
|
||||
const range = []
|
||||
const ascending = left < right
|
||||
const end = !inclusive ? right : ascending ? right + 1 : right - 1
|
||||
for (let i = left; ascending ? i < end : i > end; ascending ? i++ : i--) {
|
||||
range.push(i)
|
||||
}
|
||||
return range
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
/* eslint-disable
|
||||
handle-callback-err,
|
||||
no-unused-vars,
|
||||
*/
|
||||
// TODO: This file was created by bulk-decaffeinate.
|
||||
// Fix any style issues and re-enable lint.
|
||||
/*
|
||||
* decaffeinate suggestions:
|
||||
* DS101: Remove unnecessary use of Array.from
|
||||
* DS102: Remove unnecessary code created because of implicit returns
|
||||
* DS207: Consider shorter variations of null checks
|
||||
* Full docs: https://github.com/decaffeinate/decaffeinate/blob/master/docs/suggestions.md
|
||||
*/
|
||||
const sinon = require('sinon')
|
||||
const { expect } = require('chai')
|
||||
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Getting documents for project', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('when project state hash does not match', function () {
|
||||
before(function (done) {
|
||||
this.projectStateHash = DocUpdaterClient.randomId()
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.getProjectDocs(
|
||||
this.project_id,
|
||||
this.projectStateHash,
|
||||
(error, res, returnedDocs) => {
|
||||
this.res = res
|
||||
this.returnedDocs = returnedDocs
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
return it('should return a 409 Conflict response', function () {
|
||||
return this.res.statusCode.should.equal(409)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when project state hash matches', function () {
|
||||
before(function (done) {
|
||||
this.projectStateHash = DocUpdaterClient.randomId()
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.getProjectDocs(
|
||||
this.project_id,
|
||||
this.projectStateHash,
|
||||
(error, res0, returnedDocs0) => {
|
||||
// set the hash
|
||||
this.res0 = res0
|
||||
this.returnedDocs0 = returnedDocs0
|
||||
return DocUpdaterClient.getProjectDocs(
|
||||
this.project_id,
|
||||
this.projectStateHash,
|
||||
(error, res, returnedDocs) => {
|
||||
// the hash should now match
|
||||
this.res = res
|
||||
this.returnedDocs = returnedDocs
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should return a 200 response', function () {
|
||||
return this.res.statusCode.should.equal(200)
|
||||
})
|
||||
|
||||
return it('should return the documents', function () {
|
||||
return this.returnedDocs.should.deep.equal([
|
||||
{ _id: this.doc_id, lines: this.lines, v: this.version },
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
return describe('when the doc has been removed', function () {
|
||||
before(function (done) {
|
||||
this.projectStateHash = DocUpdaterClient.randomId()
|
||||
;[this.project_id, this.doc_id] = Array.from([
|
||||
DocUpdaterClient.randomId(),
|
||||
DocUpdaterClient.randomId(),
|
||||
])
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
return DocUpdaterClient.getProjectDocs(
|
||||
this.project_id,
|
||||
this.projectStateHash,
|
||||
(error, res0, returnedDocs0) => {
|
||||
// set the hash
|
||||
this.res0 = res0
|
||||
this.returnedDocs0 = returnedDocs0
|
||||
return DocUpdaterClient.deleteDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, body) => {
|
||||
// delete the doc
|
||||
return DocUpdaterClient.getProjectDocs(
|
||||
this.project_id,
|
||||
this.projectStateHash,
|
||||
(error, res1, returnedDocs) => {
|
||||
// the hash would match, but the doc has been deleted
|
||||
this.res = res1
|
||||
this.returnedDocs = returnedDocs
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
return it('should return a 409 Conflict response', function () {
|
||||
return this.res.statusCode.should.equal(409)
|
||||
})
|
||||
})
|
||||
})
|
99
services/document-updater/test/acceptance/js/PeekingADoc.js
Normal file
99
services/document-updater/test/acceptance/js/PeekingADoc.js
Normal file
|
@ -0,0 +1,99 @@
|
|||
const sinon = require('sinon')
|
||||
const MockWebApi = require('./helpers/MockWebApi')
|
||||
const DocUpdaterClient = require('./helpers/DocUpdaterClient')
|
||||
const DocUpdaterApp = require('./helpers/DocUpdaterApp')
|
||||
|
||||
describe('Peeking a document', function () {
|
||||
before(function (done) {
|
||||
this.lines = ['one', 'two', 'three']
|
||||
this.version = 42
|
||||
return DocUpdaterApp.ensureRunning(done)
|
||||
})
|
||||
|
||||
describe('when the document is not loaded', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.doc_id = DocUpdaterClient.randomId()
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
|
||||
return DocUpdaterClient.peekDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, returnedDoc) => {
|
||||
this.error = error
|
||||
this.res = res
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should return a 404 response', function () {
|
||||
this.res.statusCode.should.equal(404)
|
||||
})
|
||||
|
||||
it('should not load the document from the web API', function () {
|
||||
return MockWebApi.getDocument.called.should.equal(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('when the document is already loaded', function () {
|
||||
before(function (done) {
|
||||
this.project_id = DocUpdaterClient.randomId()
|
||||
this.doc_id = DocUpdaterClient.randomId()
|
||||
|
||||
MockWebApi.insertDoc(this.project_id, this.doc_id, {
|
||||
lines: this.lines,
|
||||
version: this.version,
|
||||
})
|
||||
return DocUpdaterClient.preloadDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
error => {
|
||||
if (error != null) {
|
||||
throw error
|
||||
}
|
||||
sinon.spy(MockWebApi, 'getDocument')
|
||||
return DocUpdaterClient.getDoc(
|
||||
this.project_id,
|
||||
this.doc_id,
|
||||
(error, res, returnedDoc) => {
|
||||
this.res = res
|
||||
this.returnedDoc = returnedDoc
|
||||
return done()
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
after(function () {
|
||||
return MockWebApi.getDocument.restore()
|
||||
})
|
||||
|
||||
it('should return a 200 response', function () {
|
||||
this.res.statusCode.should.equal(200)
|
||||
})
|
||||
|
||||
it('should return the document lines', function () {
|
||||
return this.returnedDoc.lines.should.deep.equal(this.lines)
|
||||
})
|
||||
|
||||
it('should return the document version', function () {
|
||||
return this.returnedDoc.version.should.equal(this.version)
|
||||
})
|
||||
|
||||
it('should not load the document from the web API', function () {
|
||||
return MockWebApi.getDocument.called.should.equal(false)
|
||||
})
|
||||
})
|
||||
})
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue