mirror of
https://github.com/gohugoio/hugo.git
synced 2024-11-21 20:46:30 -05:00
deploy: Support configuration of upload order
This commit is contained in:
parent
f4956d9aae
commit
527cf1ab03
4 changed files with 182 additions and 32 deletions
112
deploy/deploy.go
112
deploy/deploy.go
|
@ -23,7 +23,9 @@ import (
|
||||||
"mime"
|
"mime"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -45,14 +47,15 @@ import (
|
||||||
type Deployer struct {
|
type Deployer struct {
|
||||||
localFs afero.Fs
|
localFs afero.Fs
|
||||||
|
|
||||||
target *target // the target to deploy to
|
target *target // the target to deploy to
|
||||||
matchers []*matcher // matchers to apply to uploaded files
|
matchers []*matcher // matchers to apply to uploaded files
|
||||||
quiet bool // true reduces STDOUT
|
ordering []*regexp.Regexp // orders uploads
|
||||||
confirm bool // true enables confirmation before making changes
|
quiet bool // true reduces STDOUT
|
||||||
dryRun bool // true skips conformations and prints changes instead of applying them
|
confirm bool // true enables confirmation before making changes
|
||||||
force bool // true forces upload of all files
|
dryRun bool // true skips conformations and prints changes instead of applying them
|
||||||
invalidateCDN bool // true enables invalidate CDN cache (if possible)
|
force bool // true forces upload of all files
|
||||||
maxDeletes int // caps the # of files to delete; -1 to disable
|
invalidateCDN bool // true enables invalidate CDN cache (if possible)
|
||||||
|
maxDeletes int // caps the # of files to delete; -1 to disable
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a new *Deployer.
|
// New constructs a new *Deployer.
|
||||||
|
@ -79,6 +82,7 @@ func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) {
|
||||||
localFs: localFs,
|
localFs: localFs,
|
||||||
target: tgt,
|
target: tgt,
|
||||||
matchers: dcfg.Matchers,
|
matchers: dcfg.Matchers,
|
||||||
|
ordering: dcfg.ordering,
|
||||||
quiet: cfg.GetBool("quiet"),
|
quiet: cfg.GetBool("quiet"),
|
||||||
confirm: cfg.GetBool("confirm"),
|
confirm: cfg.GetBool("confirm"),
|
||||||
dryRun: cfg.GetBool("dryRun"),
|
dryRun: cfg.GetBool("dryRun"),
|
||||||
|
@ -138,40 +142,55 @@ func (d *Deployer) Deploy(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Order the uploads. They are organized in groups; all uploads in a group
|
||||||
|
// must be complete before moving on to the next group.
|
||||||
|
uploadGroups := applyOrdering(d.ordering, uploads)
|
||||||
|
|
||||||
// Apply the changes in parallel, using an inverted worker
|
// Apply the changes in parallel, using an inverted worker
|
||||||
// pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
|
// pool (https://www.youtube.com/watch?v=5zXAHh5tJqQ&t=26m58s).
|
||||||
// sem prevents more than nParallel concurrent goroutines.
|
// sem prevents more than nParallel concurrent goroutines.
|
||||||
const nParallel = 10
|
const nParallel = 10
|
||||||
sem := make(chan struct{}, nParallel)
|
|
||||||
var errs []error
|
var errs []error
|
||||||
var errMu sync.Mutex // protects errs
|
var errMu sync.Mutex // protects errs
|
||||||
|
|
||||||
for _, upload := range uploads {
|
for _, uploads := range uploadGroups {
|
||||||
if d.dryRun {
|
// Short-circuit for an empty group.
|
||||||
if !d.quiet {
|
if len(uploads) == 0 {
|
||||||
jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Add a progress indicator, as this can take a while
|
// Within the group, apply uploads in parallel.
|
||||||
// depending on the number of files, upload speed, and size of the
|
sem := make(chan struct{}, nParallel)
|
||||||
// site.
|
for _, upload := range uploads {
|
||||||
|
if d.dryRun {
|
||||||
sem <- struct{}{}
|
if !d.quiet {
|
||||||
go func(upload *fileToUpload) {
|
jww.FEEDBACK.Printf("[DRY RUN] Would upload: %v\n", upload)
|
||||||
if err := doSingleUpload(ctx, bucket, upload); err != nil {
|
}
|
||||||
errMu.Lock()
|
continue
|
||||||
defer errMu.Unlock()
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
}
|
||||||
<-sem
|
|
||||||
}(upload)
|
sem <- struct{}{}
|
||||||
|
go func(upload *fileToUpload) {
|
||||||
|
if err := doSingleUpload(ctx, bucket, upload); err != nil {
|
||||||
|
errMu.Lock()
|
||||||
|
defer errMu.Unlock()
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
<-sem
|
||||||
|
}(upload)
|
||||||
|
}
|
||||||
|
// Wait for all uploads in the group to finish.
|
||||||
|
for n := nParallel; n > 0; n-- {
|
||||||
|
sem <- struct{}{}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
|
if d.maxDeletes != -1 && len(deletes) > d.maxDeletes {
|
||||||
jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
|
jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes)
|
||||||
} else {
|
} else {
|
||||||
|
// Apply deletes in parallel.
|
||||||
|
sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] })
|
||||||
|
sem := make(chan struct{}, nParallel)
|
||||||
for _, del := range deletes {
|
for _, del := range deletes {
|
||||||
if d.dryRun {
|
if d.dryRun {
|
||||||
if !d.quiet {
|
if !d.quiet {
|
||||||
|
@ -190,10 +209,10 @@ func (d *Deployer) Deploy(ctx context.Context) error {
|
||||||
<-sem
|
<-sem
|
||||||
}(del)
|
}(del)
|
||||||
}
|
}
|
||||||
}
|
// Wait for all deletes to finish.
|
||||||
// Wait for all uploads/deletes to finish.
|
for n := nParallel; n > 0; n-- {
|
||||||
for n := nParallel; n > 0; n-- {
|
sem <- struct{}{}
|
||||||
sem <- struct{}{}
|
}
|
||||||
}
|
}
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
if !d.quiet {
|
if !d.quiet {
|
||||||
|
@ -551,3 +570,36 @@ func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.Li
|
||||||
}
|
}
|
||||||
return uploads, deletes
|
return uploads, deletes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// applyOrdering returns an ordered slice of slices of uploads.
|
||||||
|
//
|
||||||
|
// The returned slice will have length len(ordering)+1.
|
||||||
|
//
|
||||||
|
// The subslice at index i, for i = 0 ... len(ordering)-1, will have all of the
|
||||||
|
// uploads whose Local.Path matched the regex at ordering[i] (but not any
|
||||||
|
// previous ordering regex).
|
||||||
|
// The subslice at index len(ordering) will have the remaining uploads that
|
||||||
|
// didn't match any ordering regex.
|
||||||
|
//
|
||||||
|
// The subslices are sorted by Local.Path.
|
||||||
|
func applyOrdering(ordering []*regexp.Regexp, uploads []*fileToUpload) [][]*fileToUpload {
|
||||||
|
|
||||||
|
// Sort the whole slice by Local.Path first.
|
||||||
|
sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.Path < uploads[j].Local.Path })
|
||||||
|
|
||||||
|
retval := make([][]*fileToUpload, len(ordering)+1)
|
||||||
|
for _, u := range uploads {
|
||||||
|
matched := false
|
||||||
|
for i, re := range ordering {
|
||||||
|
if re.MatchString(u.Local.Path) {
|
||||||
|
retval[i] = append(retval[i], u)
|
||||||
|
matched = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !matched {
|
||||||
|
retval[len(ordering)] = append(retval[len(ordering)], u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return retval
|
||||||
|
}
|
||||||
|
|
|
@ -27,6 +27,9 @@ const deploymentConfigKey = "deployment"
|
||||||
type deployConfig struct {
|
type deployConfig struct {
|
||||||
Targets []*target
|
Targets []*target
|
||||||
Matchers []*matcher
|
Matchers []*matcher
|
||||||
|
Order []string
|
||||||
|
|
||||||
|
ordering []*regexp.Regexp // compiled Order
|
||||||
}
|
}
|
||||||
|
|
||||||
type target struct {
|
type target struct {
|
||||||
|
@ -86,5 +89,12 @@ func decodeConfig(cfg config.Provider) (deployConfig, error) {
|
||||||
return dcfg, fmt.Errorf("invalid deployment.matchers.pattern: %v", err)
|
return dcfg, fmt.Errorf("invalid deployment.matchers.pattern: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, o := range dcfg.Order {
|
||||||
|
re, err := regexp.Compile(o)
|
||||||
|
if err != nil {
|
||||||
|
return dcfg, fmt.Errorf("invalid deployment.orderings.pattern: %v", err)
|
||||||
|
}
|
||||||
|
dcfg.ordering = append(dcfg.ordering, re)
|
||||||
|
}
|
||||||
return dcfg, nil
|
return dcfg, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,9 @@ func TestDecodeConfigFromTOML(t *testing.T) {
|
||||||
someOtherValue = "foo"
|
someOtherValue = "foo"
|
||||||
|
|
||||||
[deployment]
|
[deployment]
|
||||||
|
|
||||||
|
order = ["o1", "o2"]
|
||||||
|
|
||||||
[[deployment.targets]]
|
[[deployment.targets]]
|
||||||
Name = "name1"
|
Name = "name1"
|
||||||
URL = "url1"
|
URL = "url1"
|
||||||
|
@ -59,6 +62,11 @@ content-type = "contenttype2"
|
||||||
dcfg, err := decodeConfig(cfg)
|
dcfg, err := decodeConfig(cfg)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
|
assert.Equal(2, len(dcfg.Order))
|
||||||
|
assert.Equal("o1", dcfg.Order[0])
|
||||||
|
assert.Equal("o2", dcfg.Order[1])
|
||||||
|
assert.Equal(2, len(dcfg.ordering))
|
||||||
|
|
||||||
assert.Equal(2, len(dcfg.Targets))
|
assert.Equal(2, len(dcfg.Targets))
|
||||||
assert.Equal("name1", dcfg.Targets[0].Name)
|
assert.Equal("name1", dcfg.Targets[0].Name)
|
||||||
assert.Equal("url1", dcfg.Targets[0].URL)
|
assert.Equal("url1", dcfg.Targets[0].URL)
|
||||||
|
@ -69,11 +77,36 @@ content-type = "contenttype2"
|
||||||
|
|
||||||
assert.Equal(2, len(dcfg.Matchers))
|
assert.Equal(2, len(dcfg.Matchers))
|
||||||
assert.Equal("^pattern1$", dcfg.Matchers[0].Pattern)
|
assert.Equal("^pattern1$", dcfg.Matchers[0].Pattern)
|
||||||
|
assert.NotNil(dcfg.Matchers[0].re)
|
||||||
assert.Equal("cachecontrol1", dcfg.Matchers[0].CacheControl)
|
assert.Equal("cachecontrol1", dcfg.Matchers[0].CacheControl)
|
||||||
assert.Equal("contentencoding1", dcfg.Matchers[0].ContentEncoding)
|
assert.Equal("contentencoding1", dcfg.Matchers[0].ContentEncoding)
|
||||||
assert.Equal("contenttype1", dcfg.Matchers[0].ContentType)
|
assert.Equal("contenttype1", dcfg.Matchers[0].ContentType)
|
||||||
assert.True(dcfg.Matchers[0].Gzip)
|
assert.True(dcfg.Matchers[0].Gzip)
|
||||||
assert.True(dcfg.Matchers[0].Force)
|
assert.True(dcfg.Matchers[0].Force)
|
||||||
|
assert.Equal("^pattern2$", dcfg.Matchers[1].Pattern)
|
||||||
|
assert.NotNil(dcfg.Matchers[1].re)
|
||||||
|
assert.Equal("cachecontrol2", dcfg.Matchers[1].CacheControl)
|
||||||
|
assert.Equal("contentencoding2", dcfg.Matchers[1].ContentEncoding)
|
||||||
|
assert.Equal("contenttype2", dcfg.Matchers[1].ContentType)
|
||||||
|
assert.False(dcfg.Matchers[1].Gzip)
|
||||||
|
assert.False(dcfg.Matchers[1].Force)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidOrderingPattern(t *testing.T) {
|
||||||
|
assert := require.New(t)
|
||||||
|
|
||||||
|
tomlConfig := `
|
||||||
|
|
||||||
|
someOtherValue = "foo"
|
||||||
|
|
||||||
|
[deployment]
|
||||||
|
order = ["["] # invalid regular expression
|
||||||
|
`
|
||||||
|
cfg, err := config.FromConfigString(tomlConfig, "toml")
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
_, err = decodeConfig(cfg)
|
||||||
|
assert.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidMatcherPattern(t *testing.T) {
|
func TestInvalidMatcherPattern(t *testing.T) {
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -174,11 +175,10 @@ func TestDeploy_FindDiffs(t *testing.T) {
|
||||||
remote[r.Key] = r
|
remote[r.Key] = r
|
||||||
}
|
}
|
||||||
gotUpdates, gotDeletes := findDiffs(local, remote, tc.Force)
|
gotUpdates, gotDeletes := findDiffs(local, remote, tc.Force)
|
||||||
sort.Slice(gotUpdates, func(i, j int) bool { return gotUpdates[i].Local.Path < gotUpdates[j].Local.Path })
|
gotUpdates = applyOrdering(nil, gotUpdates)[0]
|
||||||
sort.Slice(gotDeletes, func(i, j int) bool { return gotDeletes[i] < gotDeletes[j] })
|
sort.Slice(gotDeletes, func(i, j int) bool { return gotDeletes[i] < gotDeletes[j] })
|
||||||
if diff := cmp.Diff(gotUpdates, tc.WantUpdates, cmpopts.IgnoreUnexported(localFile{})); diff != "" {
|
if diff := cmp.Diff(gotUpdates, tc.WantUpdates, cmpopts.IgnoreUnexported(localFile{})); diff != "" {
|
||||||
t.Errorf("updates differ:\n%s", diff)
|
t.Errorf("updates differ:\n%s", diff)
|
||||||
|
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(gotDeletes, tc.WantDeletes); diff != "" {
|
if diff := cmp.Diff(gotDeletes, tc.WantDeletes); diff != "" {
|
||||||
t.Errorf("deletes differ:\n%s", diff)
|
t.Errorf("deletes differ:\n%s", diff)
|
||||||
|
@ -306,3 +306,58 @@ func TestDeploy_LocalFile(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOrdering(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
Description string
|
||||||
|
Uploads []string
|
||||||
|
Ordering []*regexp.Regexp
|
||||||
|
Want [][]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Description: "empty",
|
||||||
|
Want: [][]string{nil},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "no ordering",
|
||||||
|
Uploads: []string{"c", "b", "a", "d"},
|
||||||
|
Want: [][]string{{"a", "b", "c", "d"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "one ordering",
|
||||||
|
Uploads: []string{"db", "c", "b", "a", "da"},
|
||||||
|
Ordering: []*regexp.Regexp{regexp.MustCompile("^d")},
|
||||||
|
Want: [][]string{{"da", "db"}, {"a", "b", "c"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Description: "two orderings",
|
||||||
|
Uploads: []string{"db", "c", "b", "a", "da"},
|
||||||
|
Ordering: []*regexp.Regexp{
|
||||||
|
regexp.MustCompile("^d"),
|
||||||
|
regexp.MustCompile("^b"),
|
||||||
|
},
|
||||||
|
Want: [][]string{{"da", "db"}, {"b"}, {"a", "c"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.Description, func(t *testing.T) {
|
||||||
|
uploads := make([]*fileToUpload, len(tc.Uploads))
|
||||||
|
for i, u := range tc.Uploads {
|
||||||
|
uploads[i] = &fileToUpload{Local: &localFile{Path: u}}
|
||||||
|
}
|
||||||
|
gotUploads := applyOrdering(tc.Ordering, uploads)
|
||||||
|
var got [][]string
|
||||||
|
for _, subslice := range gotUploads {
|
||||||
|
var gotsubslice []string
|
||||||
|
for _, u := range subslice {
|
||||||
|
gotsubslice = append(gotsubslice, u.Local.Path)
|
||||||
|
}
|
||||||
|
got = append(got, gotsubslice)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(got, tc.Want); diff != "" {
|
||||||
|
t.Error(diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue