mirror of
https://github.com/gohugoio/hugo.git
synced 2025-02-11 22:11:54 +00:00
Add Hugo Modules
This commit implements Hugo Modules. This is a broad subject, but some keywords include: * A new `module` configuration section where you can import almost anything. You can configure both your own file mounts nd the file mounts of the modules you import. This is the new recommended way of configuring what you earlier put in `configDir`, `staticDir` etc. And it also allows you to mount folders in non-Hugo-projects, e.g. the `SCSS` folder in the Bootstrap GitHub project. * A module consists of a set of mounts to the standard 7 component types in Hugo: `static`, `content`, `layouts`, `data`, `assets`, `i18n`, and `archetypes`. Yes, Theme Components can now include content, which should be very useful, especially in bigger multilingual projects. * Modules not in your local file cache will be downloaded automatically and even "hot replaced" while the server is running. * Hugo Modules supports and encourages semver versioned modules, and uses the minimal version selection algorithm to resolve versions. * A new set of CLI commands are provided to manage all of this: `hugo mod init`, `hugo mod get`, `hugo mod graph`, `hugo mod tidy`, and `hugo mod vendor`. All of the above is backed by Go Modules. Fixes #5973 Fixes #5996 Fixes #6010 Fixes #5911 Fixes #5940 Fixes #6074 Fixes #6082 Fixes #6092
This commit is contained in:
parent
47953148b6
commit
9f5a92078a
158 changed files with 9895 additions and 5433 deletions
|
@ -1,2 +1 @@
|
|||
gobench -package=./hugolib -bench="BenchmarkSiteBuilding/YAML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render" -count=3 > 1.bench
|
||||
benchcmp -best 0.bench 1.bench
|
||||
gobench -package=./hugolib -bench="BenchmarkSiteNew/Deep_content_tree"
|
49
cache/filecache/filecache.go
vendored
49
cache/filecache/filecache.go
vendored
|
@ -44,6 +44,9 @@ type Cache struct {
|
|||
// 0 is effectively turning this cache off.
|
||||
maxAge time.Duration
|
||||
|
||||
// When set, we just remove this entire root directory on expiration.
|
||||
pruneAllRootDir string
|
||||
|
||||
nlocker *lockTracker
|
||||
}
|
||||
|
||||
|
@ -77,11 +80,12 @@ type ItemInfo struct {
|
|||
}
|
||||
|
||||
// NewCache creates a new file cache with the given filesystem and max age.
|
||||
func NewCache(fs afero.Fs, maxAge time.Duration) *Cache {
|
||||
func NewCache(fs afero.Fs, maxAge time.Duration, pruneAllRootDir string) *Cache {
|
||||
return &Cache{
|
||||
Fs: fs,
|
||||
nlocker: &lockTracker{Locker: locker.NewLocker(), seen: make(map[string]struct{})},
|
||||
maxAge: maxAge,
|
||||
Fs: fs,
|
||||
nlocker: &lockTracker{Locker: locker.NewLocker(), seen: make(map[string]struct{})},
|
||||
maxAge: maxAge,
|
||||
pruneAllRootDir: pruneAllRootDir,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -307,9 +311,15 @@ func (f Caches) Get(name string) *Cache {
|
|||
// NewCaches creates a new set of file caches from the given
|
||||
// configuration.
|
||||
func NewCaches(p *helpers.PathSpec) (Caches, error) {
|
||||
dcfg, err := decodeConfig(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var dcfg Configs
|
||||
if c, ok := p.Cfg.Get("filecacheConfigs").(Configs); ok {
|
||||
dcfg = c
|
||||
} else {
|
||||
var err error
|
||||
dcfg, err = DecodeConfig(p.Fs.Source, p.Cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fs := p.Fs.Source
|
||||
|
@ -319,30 +329,25 @@ func NewCaches(p *helpers.PathSpec) (Caches, error) {
|
|||
var cfs afero.Fs
|
||||
|
||||
if v.isResourceDir {
|
||||
cfs = p.BaseFs.Resources.Fs
|
||||
cfs = p.BaseFs.ResourcesCache
|
||||
} else {
|
||||
cfs = fs
|
||||
}
|
||||
|
||||
var baseDir string
|
||||
if !strings.HasPrefix(v.Dir, "_gen") {
|
||||
// We do cache eviction (file removes) and since the user can set
|
||||
// his/hers own cache directory, we really want to make sure
|
||||
// we do not delete any files that do not belong to this cache.
|
||||
// We do add the cache name as the root, but this is an extra safe
|
||||
// guard. We skip the files inside /resources/_gen/ because
|
||||
// that would be breaking.
|
||||
baseDir = filepath.Join(v.Dir, filecacheRootDirname, k)
|
||||
} else {
|
||||
baseDir = filepath.Join(v.Dir, k)
|
||||
}
|
||||
if err = cfs.MkdirAll(baseDir, 0777); err != nil && !os.IsExist(err) {
|
||||
baseDir := v.Dir
|
||||
|
||||
if err := cfs.MkdirAll(baseDir, 0777); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs := afero.NewBasePathFs(cfs, baseDir)
|
||||
|
||||
m[k] = NewCache(bfs, v.MaxAge)
|
||||
var pruneAllRootDir string
|
||||
if k == cacheKeyModules {
|
||||
pruneAllRootDir = "pkg"
|
||||
}
|
||||
|
||||
m[k] = NewCache(bfs, v.MaxAge, pruneAllRootDir)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
|
56
cache/filecache/filecache_config.go
vendored
56
cache/filecache/filecache_config.go
vendored
|
@ -19,6 +19,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/config"
|
||||
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
@ -32,7 +34,7 @@ const (
|
|||
resourcesGenDir = ":resourceDir/_gen"
|
||||
)
|
||||
|
||||
var defaultCacheConfig = cacheConfig{
|
||||
var defaultCacheConfig = Config{
|
||||
MaxAge: -1, // Never expire
|
||||
Dir: ":cacheDir/:project",
|
||||
}
|
||||
|
@ -42,9 +44,20 @@ const (
|
|||
cacheKeyGetCSV = "getcsv"
|
||||
cacheKeyImages = "images"
|
||||
cacheKeyAssets = "assets"
|
||||
cacheKeyModules = "modules"
|
||||
)
|
||||
|
||||
var defaultCacheConfigs = map[string]cacheConfig{
|
||||
type Configs map[string]Config
|
||||
|
||||
func (c Configs) CacheDirModules() string {
|
||||
return c[cacheKeyModules].Dir
|
||||
}
|
||||
|
||||
var defaultCacheConfigs = Configs{
|
||||
cacheKeyModules: {
|
||||
MaxAge: -1,
|
||||
Dir: ":cacheDir/modules",
|
||||
},
|
||||
cacheKeyGetJSON: defaultCacheConfig,
|
||||
cacheKeyGetCSV: defaultCacheConfig,
|
||||
cacheKeyImages: {
|
||||
|
@ -57,9 +70,7 @@ var defaultCacheConfigs = map[string]cacheConfig{
|
|||
},
|
||||
}
|
||||
|
||||
type cachesConfig map[string]cacheConfig
|
||||
|
||||
type cacheConfig struct {
|
||||
type Config struct {
|
||||
// Max age of cache entries in this cache. Any items older than this will
|
||||
// be removed and not returned from the cache.
|
||||
// a negative value means forever, 0 means cache is disabled.
|
||||
|
@ -88,13 +99,18 @@ func (f Caches) ImageCache() *Cache {
|
|||
return f[cacheKeyImages]
|
||||
}
|
||||
|
||||
// ModulesCache gets the file cache for Hugo Modules.
|
||||
func (f Caches) ModulesCache() *Cache {
|
||||
return f[cacheKeyModules]
|
||||
}
|
||||
|
||||
// AssetsCache gets the file cache for assets (processed resources, SCSS etc.).
|
||||
func (f Caches) AssetsCache() *Cache {
|
||||
return f[cacheKeyAssets]
|
||||
}
|
||||
|
||||
func decodeConfig(p *helpers.PathSpec) (cachesConfig, error) {
|
||||
c := make(cachesConfig)
|
||||
func DecodeConfig(fs afero.Fs, cfg config.Provider) (Configs, error) {
|
||||
c := make(Configs)
|
||||
valid := make(map[string]bool)
|
||||
// Add defaults
|
||||
for k, v := range defaultCacheConfigs {
|
||||
|
@ -102,11 +118,9 @@ func decodeConfig(p *helpers.PathSpec) (cachesConfig, error) {
|
|||
valid[k] = true
|
||||
}
|
||||
|
||||
cfg := p.Cfg
|
||||
|
||||
m := cfg.GetStringMap(cachesConfigKey)
|
||||
|
||||
_, isOsFs := p.Fs.Source.(*afero.OsFs)
|
||||
_, isOsFs := fs.(*afero.OsFs)
|
||||
|
||||
for k, v := range m {
|
||||
cc := defaultCacheConfig
|
||||
|
@ -148,7 +162,7 @@ func decodeConfig(p *helpers.PathSpec) (cachesConfig, error) {
|
|||
|
||||
for i, part := range parts {
|
||||
if strings.HasPrefix(part, ":") {
|
||||
resolved, isResource, err := resolveDirPlaceholder(p, part)
|
||||
resolved, isResource, err := resolveDirPlaceholder(fs, cfg, part)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
|
@ -176,6 +190,18 @@ func decodeConfig(p *helpers.PathSpec) (cachesConfig, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(v.Dir, "_gen") {
|
||||
// We do cache eviction (file removes) and since the user can set
|
||||
// his/hers own cache directory, we really want to make sure
|
||||
// we do not delete any files that do not belong to this cache.
|
||||
// We do add the cache name as the root, but this is an extra safe
|
||||
// guard. We skip the files inside /resources/_gen/ because
|
||||
// that would be breaking.
|
||||
v.Dir = filepath.Join(v.Dir, filecacheRootDirname, k)
|
||||
} else {
|
||||
v.Dir = filepath.Join(v.Dir, k)
|
||||
}
|
||||
|
||||
if disabled {
|
||||
v.MaxAge = 0
|
||||
}
|
||||
|
@ -187,15 +213,17 @@ func decodeConfig(p *helpers.PathSpec) (cachesConfig, error) {
|
|||
}
|
||||
|
||||
// Resolves :resourceDir => /myproject/resources etc., :cacheDir => ...
|
||||
func resolveDirPlaceholder(p *helpers.PathSpec, placeholder string) (cacheDir string, isResource bool, err error) {
|
||||
func resolveDirPlaceholder(fs afero.Fs, cfg config.Provider, placeholder string) (cacheDir string, isResource bool, err error) {
|
||||
workingDir := cfg.GetString("workingDir")
|
||||
|
||||
switch strings.ToLower(placeholder) {
|
||||
case ":resourcedir":
|
||||
return "", true, nil
|
||||
case ":cachedir":
|
||||
d, err := helpers.GetCacheDir(p.Fs.Source, p.Cfg)
|
||||
d, err := helpers.GetCacheDir(fs, cfg)
|
||||
return d, false, err
|
||||
case ":project":
|
||||
return filepath.Base(p.WorkingDir), false, nil
|
||||
return filepath.Base(workingDir), false, nil
|
||||
}
|
||||
|
||||
return "", false, errors.Errorf("%q is not a valid placeholder (valid values are :cacheDir or :resourceDir)", placeholder)
|
||||
|
|
45
cache/filecache/filecache_config_test.go
vendored
45
cache/filecache/filecache_config_test.go
vendored
|
@ -20,10 +20,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/gohugoio/hugo/config"
|
||||
"github.com/gohugoio/hugo/hugofs"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -57,22 +56,19 @@ dir = "/path/to/c3"
|
|||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
fs := afero.NewMemMapFs()
|
||||
decoded, err := DecodeConfig(fs, cfg)
|
||||
assert.NoError(err)
|
||||
|
||||
decoded, err := decodeConfig(p)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(4, len(decoded))
|
||||
assert.Equal(5, len(decoded))
|
||||
|
||||
c2 := decoded["getcsv"]
|
||||
assert.Equal("11h0m0s", c2.MaxAge.String())
|
||||
assert.Equal(filepath.FromSlash("/path/to/c2"), c2.Dir)
|
||||
assert.Equal(filepath.FromSlash("/path/to/c2/filecache/getcsv"), c2.Dir)
|
||||
|
||||
c3 := decoded["images"]
|
||||
assert.Equal(time.Duration(-1), c3.MaxAge)
|
||||
assert.Equal(filepath.FromSlash("/path/to/c3"), c3.Dir)
|
||||
assert.Equal(filepath.FromSlash("/path/to/c3/filecache/images"), c3.Dir)
|
||||
|
||||
}
|
||||
|
||||
|
@ -105,14 +101,11 @@ dir = "/path/to/c3"
|
|||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
fs := afero.NewMemMapFs()
|
||||
decoded, err := DecodeConfig(fs, cfg)
|
||||
assert.NoError(err)
|
||||
|
||||
decoded, err := decodeConfig(p)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(4, len(decoded))
|
||||
assert.Equal(5, len(decoded))
|
||||
|
||||
for _, v := range decoded {
|
||||
assert.Equal(time.Duration(0), v.MaxAge)
|
||||
|
@ -133,24 +126,22 @@ func TestDecodeConfigDefault(t *testing.T) {
|
|||
cfg.Set("cacheDir", "/cache/thecache")
|
||||
}
|
||||
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
assert.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
decoded, err := decodeConfig(p)
|
||||
decoded, err := DecodeConfig(fs, cfg)
|
||||
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(4, len(decoded))
|
||||
assert.Equal(5, len(decoded))
|
||||
|
||||
imgConfig := decoded[cacheKeyImages]
|
||||
jsonConfig := decoded[cacheKeyGetJSON]
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
assert.Equal("_gen", imgConfig.Dir)
|
||||
assert.Equal(filepath.FromSlash("_gen/images"), imgConfig.Dir)
|
||||
} else {
|
||||
assert.Equal("_gen", imgConfig.Dir)
|
||||
assert.Equal("/cache/thecache/hugoproject", jsonConfig.Dir)
|
||||
assert.Equal("_gen/images", imgConfig.Dir)
|
||||
assert.Equal("/cache/thecache/hugoproject/filecache/getjson", jsonConfig.Dir)
|
||||
}
|
||||
|
||||
assert.True(imgConfig.isResourceDir)
|
||||
|
@ -183,11 +174,9 @@ dir = "/"
|
|||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
assert.NoError(err)
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
_, err = decodeConfig(p)
|
||||
_, err = DecodeConfig(fs, cfg)
|
||||
assert.Error(err)
|
||||
|
||||
}
|
||||
|
|
127
cache/filecache/filecache_pruner.go
vendored
127
cache/filecache/filecache_pruner.go
vendored
|
@ -28,53 +28,100 @@ import (
|
|||
func (c Caches) Prune() (int, error) {
|
||||
counter := 0
|
||||
for k, cache := range c {
|
||||
err := afero.Walk(cache.Fs, "", func(name string, info os.FileInfo, err error) error {
|
||||
if info == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = cleanID(name)
|
||||
|
||||
if info.IsDir() {
|
||||
f, err := cache.Fs.Open(name)
|
||||
if err != nil {
|
||||
// This cache dir may not exist.
|
||||
return nil
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
// Empty dir.
|
||||
return cache.Fs.Remove(name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
shouldRemove := cache.isExpired(info.ModTime())
|
||||
|
||||
if !shouldRemove && len(cache.nlocker.seen) > 0 {
|
||||
// Remove it if it's not been touched/used in the last build.
|
||||
_, seen := cache.nlocker.seen[name]
|
||||
shouldRemove = !seen
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
err := cache.Fs.Remove(name)
|
||||
if err == nil {
|
||||
counter++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
count, err := cache.Prune(false)
|
||||
|
||||
if err != nil {
|
||||
return counter, errors.Wrapf(err, "failed to prune cache %q", k)
|
||||
}
|
||||
|
||||
counter += count
|
||||
|
||||
}
|
||||
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
// Prune removes expired and unused items from this cache.
|
||||
// If force is set, everything will be removed not considering expiry time.
|
||||
func (c *Cache) Prune(force bool) (int, error) {
|
||||
if c.pruneAllRootDir != "" {
|
||||
return c.pruneRootDir(force)
|
||||
}
|
||||
|
||||
counter := 0
|
||||
|
||||
err := afero.Walk(c.Fs, "", func(name string, info os.FileInfo, err error) error {
|
||||
if info == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = cleanID(name)
|
||||
|
||||
if info.IsDir() {
|
||||
f, err := c.Fs.Open(name)
|
||||
if err != nil {
|
||||
// This cache dir may not exist.
|
||||
return nil
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
// Empty dir.
|
||||
return c.Fs.Remove(name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
shouldRemove := force || c.isExpired(info.ModTime())
|
||||
|
||||
if !shouldRemove && len(c.nlocker.seen) > 0 {
|
||||
// Remove it if it's not been touched/used in the last build.
|
||||
_, seen := c.nlocker.seen[name]
|
||||
shouldRemove = !seen
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
err := c.Fs.Remove(name)
|
||||
if err == nil {
|
||||
counter++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return counter, err
|
||||
}
|
||||
|
||||
func (c *Cache) pruneRootDir(force bool) (int, error) {
|
||||
|
||||
info, err := c.Fs.Stat(c.pruneAllRootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !force && !c.isExpired(info.ModTime()) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
counter := 0
|
||||
// Module cache has 0555 directories; make them writable in order to remove content.
|
||||
afero.Walk(c.Fs, c.pruneAllRootDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
counter++
|
||||
c.Fs.Chmod(path, 0777)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return 1, c.Fs.RemoveAll(c.pruneAllRootDir)
|
||||
|
||||
}
|
||||
|
|
11
cache/filecache/filecache_pruner_test.go
vendored
11
cache/filecache/filecache_pruner_test.go
vendored
|
@ -18,9 +18,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/config"
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
"github.com/gohugoio/hugo/hugofs"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -54,14 +52,9 @@ maxAge = "200ms"
|
|||
dir = ":resourceDir/_gen"
|
||||
`
|
||||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
|
||||
for _, name := range []string{cacheKeyGetCSV, cacheKeyGetJSON, cacheKeyAssets, cacheKeyImages} {
|
||||
msg := fmt.Sprintf("cache: %s", name)
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
assert.NoError(err)
|
||||
p := newPathsSpec(t, afero.NewMemMapFs(), configStr)
|
||||
caches, err := NewCaches(p)
|
||||
assert.NoError(err)
|
||||
cache := caches[name]
|
||||
|
|
64
cache/filecache/filecache_test.go
vendored
64
cache/filecache/filecache_test.go
vendored
|
@ -25,6 +25,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/langs"
|
||||
"github.com/gohugoio/hugo/modules"
|
||||
|
||||
"github.com/gohugoio/hugo/common/hugio"
|
||||
"github.com/gohugoio/hugo/config"
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
|
@ -83,12 +86,7 @@ dir = ":cacheDir/c"
|
|||
configStr = replacer.Replace(configStr)
|
||||
configStr = strings.Replace(configStr, "\\", winPathSep, -1)
|
||||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
|
||||
fs := hugofs.NewFrom(osfs, cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
assert.NoError(err)
|
||||
p := newPathsSpec(t, osfs, configStr)
|
||||
|
||||
caches, err := NewCaches(p)
|
||||
assert.NoError(err)
|
||||
|
@ -207,11 +205,7 @@ dir = "/cache/c"
|
|||
|
||||
`
|
||||
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
fs := hugofs.NewMem(cfg)
|
||||
p, err := helpers.NewPathSpec(fs, cfg)
|
||||
assert.NoError(err)
|
||||
p := newPathsSpec(t, afero.NewMemMapFs(), configStr)
|
||||
|
||||
caches, err := NewCaches(p)
|
||||
assert.NoError(err)
|
||||
|
@ -255,3 +249,51 @@ func TestCleanID(t *testing.T) {
|
|||
assert.Equal(filepath.FromSlash("a/b/c.txt"), cleanID(filepath.FromSlash("/a/b//c.txt")))
|
||||
assert.Equal(filepath.FromSlash("a/b/c.txt"), cleanID(filepath.FromSlash("a/b//c.txt")))
|
||||
}
|
||||
|
||||
func initConfig(fs afero.Fs, cfg config.Provider) error {
|
||||
if _, err := langs.LoadLanguageSettings(cfg, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modConfig, err := modules.DecodeConfig(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
workingDir := cfg.GetString("workingDir")
|
||||
themesDir := cfg.GetString("themesDir")
|
||||
if !filepath.IsAbs(themesDir) {
|
||||
themesDir = filepath.Join(workingDir, themesDir)
|
||||
}
|
||||
modulesClient := modules.NewClient(modules.ClientConfig{
|
||||
Fs: fs,
|
||||
WorkingDir: workingDir,
|
||||
ThemesDir: themesDir,
|
||||
ModuleConfig: modConfig,
|
||||
IgnoreVendor: true,
|
||||
})
|
||||
|
||||
moduleConfig, err := modulesClient.Collect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := modules.ApplyProjectConfigDefaults(cfg, moduleConfig.ActiveModules[len(moduleConfig.ActiveModules)-1]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Set("allModules", moduleConfig.ActiveModules)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newPathsSpec(t *testing.T, fs afero.Fs, configStr string) *helpers.PathSpec {
|
||||
assert := require.New(t)
|
||||
cfg, err := config.FromConfigString(configStr, "toml")
|
||||
assert.NoError(err)
|
||||
initConfig(fs, cfg)
|
||||
p, err := helpers.NewPathSpec(hugofs.NewFrom(fs, cfg), cfg)
|
||||
assert.NoError(err)
|
||||
return p
|
||||
|
||||
}
|
||||
|
|
|
@ -16,6 +16,11 @@ package commands
|
|||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/gohugoio/hugo/modules"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
|
@ -27,8 +32,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/common/loggers"
|
||||
|
@ -88,6 +91,8 @@ type commandeer struct {
|
|||
configured bool
|
||||
paused bool
|
||||
|
||||
fullRebuildSem *semaphore.Weighted
|
||||
|
||||
// Any error from the last build.
|
||||
buildErr error
|
||||
}
|
||||
|
@ -153,6 +158,7 @@ func newCommandeer(mustHaveConfigFile, running bool, h *hugoBuilderCommon, f fla
|
|||
doWithCommandeer: doWithCommandeer,
|
||||
visitedURLs: types.NewEvictingStringQueue(10),
|
||||
debounce: rebuildDebouncer,
|
||||
fullRebuildSem: semaphore.NewWeighted(1),
|
||||
// This will be replaced later, but we need something to log to before the configuration is read.
|
||||
logger: loggers.NewLogger(jww.LevelError, jww.LevelError, os.Stdout, ioutil.Discard, running),
|
||||
}
|
||||
|
@ -282,6 +288,7 @@ func (c *commandeer) loadConfig(mustHaveConfigFile, running bool) error {
|
|||
WorkingDir: dir,
|
||||
Filename: c.h.cfgFile,
|
||||
AbsConfigDir: c.h.getConfigDir(dir),
|
||||
Environ: os.Environ(),
|
||||
Environment: environment},
|
||||
doWithCommandeer,
|
||||
doWithConfig)
|
||||
|
@ -290,7 +297,7 @@ func (c *commandeer) loadConfig(mustHaveConfigFile, running bool) error {
|
|||
if mustHaveConfigFile {
|
||||
return err
|
||||
}
|
||||
if err != hugolib.ErrNoConfigFile {
|
||||
if err != hugolib.ErrNoConfigFile && !modules.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -388,21 +395,6 @@ func (c *commandeer) loadConfig(mustHaveConfigFile, running bool) error {
|
|||
|
||||
cfg.Logger.INFO.Println("Using config file:", config.ConfigFileUsed())
|
||||
|
||||
themeDir := c.hugo.PathSpec.GetFirstThemeDir()
|
||||
if themeDir != "" {
|
||||
if _, err := sourceFs.Stat(themeDir); os.IsNotExist(err) {
|
||||
return newSystemError("Unable to find theme Directory:", themeDir)
|
||||
}
|
||||
}
|
||||
|
||||
dir, themeVersionMismatch, minVersion := c.isThemeVsHugoVersionMismatch(sourceFs)
|
||||
|
||||
if themeVersionMismatch {
|
||||
name := filepath.Base(dir)
|
||||
cfg.Logger.ERROR.Printf("%s theme does not support Hugo version %s. Minimum version required is %s\n",
|
||||
strings.ToUpper(name), hugo.CurrentVersion.ReleaseVersion(), minVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ func (b *commandsBuilder) addAll() *commandsBuilder {
|
|||
newImportCmd(),
|
||||
newGenCmd(),
|
||||
createReleaser(),
|
||||
b.newModCmd(),
|
||||
)
|
||||
|
||||
return b
|
||||
|
@ -243,20 +244,26 @@ func (cc *hugoBuilderCommon) getEnvironment(isServer bool) string {
|
|||
return hugo.EnvironmentProduction
|
||||
}
|
||||
|
||||
func (cc *hugoBuilderCommon) handleCommonBuilderFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringVarP(&cc.source, "source", "s", "", "filesystem path to read files relative from")
|
||||
cmd.PersistentFlags().SetAnnotation("source", cobra.BashCompSubdirsInDir, []string{})
|
||||
cmd.PersistentFlags().StringVarP(&cc.environment, "environment", "e", "", "build environment")
|
||||
cmd.PersistentFlags().StringP("themesDir", "", "", "filesystem path to themes directory")
|
||||
cmd.PersistentFlags().BoolP("ignoreVendor", "", false, "ignores any _vendor directory")
|
||||
}
|
||||
|
||||
func (cc *hugoBuilderCommon) handleFlags(cmd *cobra.Command) {
|
||||
cc.handleCommonBuilderFlags(cmd)
|
||||
cmd.Flags().Bool("cleanDestinationDir", false, "remove files from destination not found in static directories")
|
||||
cmd.Flags().BoolP("buildDrafts", "D", false, "include content marked as draft")
|
||||
cmd.Flags().BoolP("buildFuture", "F", false, "include content with publishdate in the future")
|
||||
cmd.Flags().BoolP("buildExpired", "E", false, "include expired content")
|
||||
cmd.Flags().StringVarP(&cc.source, "source", "s", "", "filesystem path to read files relative from")
|
||||
cmd.Flags().StringVarP(&cc.environment, "environment", "e", "", "build environment")
|
||||
cmd.Flags().StringP("contentDir", "c", "", "filesystem path to content directory")
|
||||
cmd.Flags().StringP("layoutDir", "l", "", "filesystem path to layout directory")
|
||||
cmd.Flags().StringP("cacheDir", "", "", "filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/")
|
||||
cmd.Flags().BoolP("ignoreCache", "", false, "ignores the cache directory")
|
||||
cmd.Flags().StringP("destination", "d", "", "filesystem path to write files to")
|
||||
cmd.Flags().StringSliceP("theme", "t", []string{}, "themes to use (located in /themes/THEMENAME/)")
|
||||
cmd.Flags().StringP("themesDir", "", "", "filesystem path to themes directory")
|
||||
cmd.Flags().StringVarP(&cc.baseURL, "baseURL", "b", "", "hostname (and path) to the root, e.g. http://spf13.com/")
|
||||
cmd.Flags().Bool("enableGitInfo", false, "add Git revision, date and author info to the pages")
|
||||
cmd.Flags().BoolVar(&cc.gc, "gc", false, "enable to run some cleanup tasks (remove unused cache files) after the build")
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/gohugoio/hugo/parser/metadecoders"
|
||||
"github.com/gohugoio/hugo/parser/pageparser"
|
||||
|
||||
src "github.com/gohugoio/hugo/source"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/gohugoio/hugo/hugolib"
|
||||
|
@ -152,8 +151,8 @@ func (cc *convertCmd) convertAndSavePage(p page.Page, site *hugolib.Site, target
|
|||
|
||||
site.Log.INFO.Println("Attempting to convert", p.File().Filename())
|
||||
|
||||
f, _ := p.File().(src.ReadableFile)
|
||||
file, err := f.Open()
|
||||
f := p.File()
|
||||
file, err := f.FileInfo().Meta().Open()
|
||||
if err != nil {
|
||||
site.Log.ERROR.Println(errMsg)
|
||||
file.Close()
|
||||
|
|
326
commands/hugo.go
326
commands/hugo.go
|
@ -16,19 +16,18 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"runtime/trace"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/gohugoio/hugo/hugofs"
|
||||
|
||||
"github.com/gohugoio/hugo/resources/page"
|
||||
|
||||
"github.com/gohugoio/hugo/common/hugo"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/gohugoio/hugo/common/herrors"
|
||||
|
@ -49,7 +48,6 @@ import (
|
|||
|
||||
"github.com/gohugoio/hugo/config"
|
||||
|
||||
"github.com/gohugoio/hugo/parser/metadecoders"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
|
@ -196,6 +194,7 @@ func initializeFlags(cmd *cobra.Command, cfg config.Provider) {
|
|||
"forceSyncStatic",
|
||||
"noTimes",
|
||||
"noChmod",
|
||||
"ignoreVendor",
|
||||
"templateMetrics",
|
||||
"templateMetricsHints",
|
||||
|
||||
|
@ -291,6 +290,7 @@ func ifTerminal(s string) string {
|
|||
}
|
||||
|
||||
func (c *commandeer) fullBuild() error {
|
||||
|
||||
var (
|
||||
g errgroup.Group
|
||||
langCount map[string]uint64
|
||||
|
@ -309,13 +309,9 @@ func (c *commandeer) fullBuild() error {
|
|||
|
||||
cnt, err := c.copyStatic()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "Error copying static files")
|
||||
}
|
||||
c.logger.INFO.Println("No Static directory found")
|
||||
return errors.Wrap(err, "Error copying static files")
|
||||
}
|
||||
langCount = cnt
|
||||
langCount = cnt
|
||||
return nil
|
||||
}
|
||||
buildSitesFunc := func() error {
|
||||
|
@ -503,7 +499,11 @@ func (c *commandeer) build() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.FEEDBACK.Println("Watching for changes in", c.hugo.PathSpec.AbsPathify(c.Cfg.GetString("contentDir")))
|
||||
|
||||
baseWatchDir := c.Cfg.GetString("workingDir")
|
||||
rootWatchDirs := getRootWatchDirsStr(baseWatchDir, watchDirs)
|
||||
|
||||
c.logger.FEEDBACK.Printf("Watching for changes in %s%s{%s}\n", baseWatchDir, helpers.FilePathSeparator, rootWatchDirs)
|
||||
c.logger.FEEDBACK.Println("Press Ctrl+C to stop")
|
||||
watcher, err := c.newWatcher(watchDirs...)
|
||||
checkErr(c.Logger, err)
|
||||
|
@ -547,7 +547,11 @@ func (c *commandeer) serverBuild() error {
|
|||
}
|
||||
|
||||
func (c *commandeer) copyStatic() (map[string]uint64, error) {
|
||||
return c.doWithPublishDirs(c.copyStaticTo)
|
||||
m, err := c.doWithPublishDirs(c.copyStaticTo)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
return m, nil
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (c *commandeer) doWithPublishDirs(f func(sourceFs *filesystems.SourceFilesystem) (uint64, error)) (map[string]uint64, error) {
|
||||
|
@ -566,6 +570,7 @@ func (c *commandeer) doWithPublishDirs(f func(sourceFs *filesystems.SourceFilesy
|
|||
if err != nil {
|
||||
return langCount, err
|
||||
}
|
||||
|
||||
if lang == "" {
|
||||
// Not multihost
|
||||
for _, l := range c.languages {
|
||||
|
@ -594,6 +599,16 @@ func (fs *countingStatFs) Stat(name string) (os.FileInfo, error) {
|
|||
return f, err
|
||||
}
|
||||
|
||||
func chmodFilter(dst, src os.FileInfo) bool {
|
||||
// Hugo publishes data from multiple sources, potentially
|
||||
// with overlapping directory structures. We cannot sync permissions
|
||||
// for directories as that would mean that we might end up with write-protected
|
||||
// directories inside /public.
|
||||
// One example of this would be syncing from the Go Module cache,
|
||||
// which have 0555 directories.
|
||||
return src.IsDir()
|
||||
}
|
||||
|
||||
func (c *commandeer) copyStaticTo(sourceFs *filesystems.SourceFilesystem) (uint64, error) {
|
||||
publishDir := c.hugo.PathSpec.PublishDir
|
||||
// If root, remove the second '/'
|
||||
|
@ -610,6 +625,7 @@ func (c *commandeer) copyStaticTo(sourceFs *filesystems.SourceFilesystem) (uint6
|
|||
syncer := fsync.NewSyncer()
|
||||
syncer.NoTimes = c.Cfg.GetBool("noTimes")
|
||||
syncer.NoChmod = c.Cfg.GetBool("noChmod")
|
||||
syncer.ChmodFilter = chmodFilter
|
||||
syncer.SrcFs = fs
|
||||
syncer.DestFs = c.Fs.Destination
|
||||
// Now that we are using a unionFs for the static directories
|
||||
|
@ -652,120 +668,39 @@ func (c *commandeer) timeTrack(start time.Time, name string) {
|
|||
|
||||
// getDirList provides NewWatcher() with a list of directories to watch for changes.
|
||||
func (c *commandeer) getDirList() ([]string, error) {
|
||||
var a []string
|
||||
var dirnames []string
|
||||
|
||||
// To handle nested symlinked content dirs
|
||||
var seen = make(map[string]bool)
|
||||
var nested []string
|
||||
|
||||
newWalker := func(allowSymbolicDirs bool) func(path string, fi os.FileInfo, err error) error {
|
||||
return func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.logger.ERROR.Println("Walker: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip .git directories.
|
||||
// Related to https://github.com/gohugoio/hugo/issues/3468.
|
||||
if fi.Name() == ".git" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
link, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
c.logger.ERROR.Printf("Cannot read symbolic link '%s', error was: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
linkfi, err := helpers.LstatIfPossible(c.Fs.Source, link)
|
||||
if err != nil {
|
||||
c.logger.ERROR.Printf("Cannot stat %q: %s", link, err)
|
||||
return nil
|
||||
}
|
||||
if !allowSymbolicDirs && !linkfi.Mode().IsRegular() {
|
||||
c.logger.ERROR.Printf("Symbolic links for directories not supported, skipping %q", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
if allowSymbolicDirs && linkfi.IsDir() {
|
||||
// afero.Walk will not walk symbolic links, so wee need to do it.
|
||||
if !seen[path] {
|
||||
seen[path] = true
|
||||
nested = append(nested, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fi = linkfi
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
if fi.Name() == ".git" ||
|
||||
fi.Name() == "node_modules" || fi.Name() == "bower_components" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
a = append(a, path)
|
||||
}
|
||||
walkFn := func(path string, fi hugofs.FileMetaInfo, err error) error {
|
||||
if err != nil {
|
||||
c.logger.ERROR.Println("walker: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
if fi.Name() == ".git" ||
|
||||
fi.Name() == "node_modules" || fi.Name() == "bower_components" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
dirnames = append(dirnames, fi.Meta().Filename())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
symLinkWalker := newWalker(true)
|
||||
regularWalker := newWalker(false)
|
||||
watchDirs := c.hugo.PathSpec.BaseFs.WatchDirs()
|
||||
for _, watchDir := range watchDirs {
|
||||
|
||||
// SymbolicWalk will log anny ERRORs
|
||||
// Also note that the Dirnames fetched below will contain any relevant theme
|
||||
// directories.
|
||||
for _, contentDir := range c.hugo.PathSpec.BaseFs.Content.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, contentDir, symLinkWalker)
|
||||
}
|
||||
|
||||
for _, staticDir := range c.hugo.PathSpec.BaseFs.Data.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, staticDir, regularWalker)
|
||||
}
|
||||
|
||||
for _, staticDir := range c.hugo.PathSpec.BaseFs.I18n.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, staticDir, regularWalker)
|
||||
}
|
||||
|
||||
for _, staticDir := range c.hugo.PathSpec.BaseFs.Layouts.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, staticDir, regularWalker)
|
||||
}
|
||||
|
||||
for _, staticFilesystem := range c.hugo.PathSpec.BaseFs.Static {
|
||||
for _, staticDir := range staticFilesystem.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, staticDir, regularWalker)
|
||||
w := hugofs.NewWalkway(hugofs.WalkwayConfig{Logger: c.logger, Info: watchDir, WalkFn: walkFn})
|
||||
if err := w.Walk(); err != nil {
|
||||
c.logger.ERROR.Println("walker: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, assetDir := range c.hugo.PathSpec.BaseFs.Assets.Dirnames {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, assetDir, regularWalker)
|
||||
}
|
||||
dirnames = helpers.UniqueStringsSorted(dirnames)
|
||||
|
||||
if len(nested) > 0 {
|
||||
for {
|
||||
|
||||
toWalk := nested
|
||||
nested = nested[:0]
|
||||
|
||||
for _, d := range toWalk {
|
||||
_ = helpers.SymbolicWalk(c.Fs.Source, d, symLinkWalker)
|
||||
}
|
||||
|
||||
if len(nested) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a = helpers.UniqueStrings(a)
|
||||
sort.Strings(a)
|
||||
|
||||
return a, nil
|
||||
return dirnames, nil
|
||||
}
|
||||
|
||||
func (c *commandeer) buildSites() (err error) {
|
||||
|
@ -812,26 +747,60 @@ func (c *commandeer) partialReRender(urls ...string) error {
|
|||
return c.hugo.Build(hugolib.BuildCfg{RecentlyVisited: visited, PartialReRender: true})
|
||||
}
|
||||
|
||||
func (c *commandeer) fullRebuild() {
|
||||
c.commandeerHugoState = &commandeerHugoState{}
|
||||
err := c.loadConfig(true, true)
|
||||
if err != nil {
|
||||
// Set the processing on pause until the state is recovered.
|
||||
c.paused = true
|
||||
c.handleBuildErr(err, "Failed to reload config")
|
||||
|
||||
} else {
|
||||
c.paused = false
|
||||
}
|
||||
|
||||
if !c.paused {
|
||||
err := c.buildSites()
|
||||
if err != nil {
|
||||
c.logger.ERROR.Println(err)
|
||||
} else if !c.h.buildWatch && !c.Cfg.GetBool("disableLiveReload") {
|
||||
livereload.ForceRefresh()
|
||||
func (c *commandeer) fullRebuild(changeType string) {
|
||||
if changeType == configChangeGoMod {
|
||||
// go.mod may be changed during the build itself, and
|
||||
// we really want to prevent superfluous builds.
|
||||
if !c.fullRebuildSem.TryAcquire(1) {
|
||||
return
|
||||
}
|
||||
c.fullRebuildSem.Release(1)
|
||||
}
|
||||
|
||||
c.fullRebuildSem.Acquire(context.Background(), 1)
|
||||
|
||||
go func() {
|
||||
|
||||
defer c.fullRebuildSem.Release(1)
|
||||
|
||||
c.printChangeDetected(changeType)
|
||||
|
||||
defer func() {
|
||||
|
||||
// Allow any file system events to arrive back.
|
||||
// This will block any rebuild on config changes for the
|
||||
// duration of the sleep.
|
||||
time.Sleep(2 * time.Second)
|
||||
}()
|
||||
|
||||
defer c.timeTrack(time.Now(), "Total")
|
||||
|
||||
c.commandeerHugoState = &commandeerHugoState{}
|
||||
err := c.loadConfig(true, true)
|
||||
if err != nil {
|
||||
// Set the processing on pause until the state is recovered.
|
||||
c.paused = true
|
||||
c.handleBuildErr(err, "Failed to reload config")
|
||||
|
||||
} else {
|
||||
c.paused = false
|
||||
}
|
||||
|
||||
if !c.paused {
|
||||
_, err := c.copyStatic()
|
||||
if err != nil {
|
||||
c.logger.ERROR.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = c.buildSites()
|
||||
if err != nil {
|
||||
c.logger.ERROR.Println(err)
|
||||
} else if !c.h.buildWatch && !c.Cfg.GetBool("disableLiveReload") {
|
||||
livereload.ForceRefresh()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// newWatcher creates a new watcher to watch filesystem events.
|
||||
|
@ -886,26 +855,53 @@ func (c *commandeer) newWatcher(dirList ...string) (*watcher.Batcher, error) {
|
|||
return watcher, nil
|
||||
}
|
||||
|
||||
func (c *commandeer) printChangeDetected(typ string) {
|
||||
msg := "\nChange"
|
||||
if typ != "" {
|
||||
msg += " of " + typ
|
||||
}
|
||||
msg += " detected, rebuilding site."
|
||||
|
||||
c.logger.FEEDBACK.Println(msg)
|
||||
const layout = "2006-01-02 15:04:05.000 -0700"
|
||||
c.logger.FEEDBACK.Println(time.Now().Format(layout))
|
||||
}
|
||||
|
||||
const (
|
||||
configChangeConfig = "config file"
|
||||
configChangeGoMod = "go.mod file"
|
||||
)
|
||||
|
||||
func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
||||
staticSyncer *staticSyncer,
|
||||
evs []fsnotify.Event,
|
||||
configSet map[string]bool) {
|
||||
|
||||
var isHandled bool
|
||||
|
||||
for _, ev := range evs {
|
||||
isConfig := configSet[ev.Name]
|
||||
configChangeType := configChangeConfig
|
||||
if isConfig {
|
||||
if strings.Contains(ev.Name, "go.mod") {
|
||||
configChangeType = configChangeGoMod
|
||||
}
|
||||
}
|
||||
if !isConfig {
|
||||
// It may be one of the /config folders
|
||||
dirname := filepath.Dir(ev.Name)
|
||||
if dirname != "." && configSet[dirname] {
|
||||
isConfig = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
isHandled = true
|
||||
|
||||
if ev.Op&fsnotify.Chmod == fsnotify.Chmod {
|
||||
continue
|
||||
}
|
||||
|
||||
if ev.Op&fsnotify.Remove == fsnotify.Remove || ev.Op&fsnotify.Rename == fsnotify.Rename {
|
||||
for _, configFile := range c.configFiles {
|
||||
counter := 0
|
||||
|
@ -917,13 +913,20 @@ func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
|||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// A write event will follow.
|
||||
continue
|
||||
}
|
||||
|
||||
// Config file(s) changed. Need full rebuild.
|
||||
c.fullRebuild()
|
||||
break
|
||||
c.fullRebuild(configChangeType)
|
||||
}
|
||||
}
|
||||
|
||||
if isHandled {
|
||||
return
|
||||
}
|
||||
|
||||
if c.paused {
|
||||
// Wait for the server to get into a consistent state before
|
||||
// we continue with processing.
|
||||
|
@ -933,7 +936,9 @@ func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
|||
if len(evs) > 50 {
|
||||
// This is probably a mass edit of the content dir.
|
||||
// Schedule a full rebuild for when it slows down.
|
||||
c.debounce(c.fullRebuild)
|
||||
c.debounce(func() {
|
||||
c.fullRebuild("")
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1015,7 +1020,7 @@ func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
|||
continue
|
||||
}
|
||||
|
||||
walkAdder := func(path string, f os.FileInfo, err error) error {
|
||||
walkAdder := func(path string, f hugofs.FileMetaInfo, err error) error {
|
||||
if f.IsDir() {
|
||||
c.logger.FEEDBACK.Println("adding created directory to watchlist", path)
|
||||
if err := watcher.Add(path); err != nil {
|
||||
|
@ -1046,9 +1051,7 @@ func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
|||
}
|
||||
|
||||
if len(staticEvents) > 0 {
|
||||
c.logger.FEEDBACK.Println("\nStatic file changes detected")
|
||||
const layout = "2006-01-02 15:04:05.000 -0700"
|
||||
c.logger.FEEDBACK.Println(time.Now().Format(layout))
|
||||
c.printChangeDetected("Static files")
|
||||
|
||||
if c.Cfg.GetBool("forceSyncStatic") {
|
||||
c.logger.FEEDBACK.Printf("Syncing all static files\n")
|
||||
|
@ -1087,10 +1090,7 @@ func (c *commandeer) handleEvents(watcher *watcher.Batcher,
|
|||
doLiveReload := !c.h.buildWatch && !c.Cfg.GetBool("disableLiveReload")
|
||||
onePageName := pickOneWriteOrCreatePath(partitionedEvents.ContentEvents)
|
||||
|
||||
c.logger.FEEDBACK.Println("\nChange detected, rebuilding site")
|
||||
const layout = "2006-01-02 15:04:05.000 -0700"
|
||||
c.logger.FEEDBACK.Println(time.Now().Format(layout))
|
||||
|
||||
c.printChangeDetected("")
|
||||
c.changeDetector.PrepareNew()
|
||||
if err := c.rebuildSites(dynamicEvents); err != nil {
|
||||
c.handleBuildErr(err, "Rebuild failed")
|
||||
|
@ -1167,41 +1167,3 @@ func pickOneWriteOrCreatePath(events []fsnotify.Event) string {
|
|||
|
||||
return name
|
||||
}
|
||||
|
||||
// isThemeVsHugoVersionMismatch returns whether the current Hugo version is
|
||||
// less than any of the themes' min_version.
|
||||
func (c *commandeer) isThemeVsHugoVersionMismatch(fs afero.Fs) (dir string, mismatch bool, requiredMinVersion string) {
|
||||
if !c.hugo.PathSpec.ThemeSet() {
|
||||
return
|
||||
}
|
||||
|
||||
for _, absThemeDir := range c.hugo.BaseFs.AbsThemeDirs {
|
||||
|
||||
path := filepath.Join(absThemeDir, "theme.toml")
|
||||
|
||||
exists, err := helpers.Exists(path, fs)
|
||||
|
||||
if err != nil || !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
tomlMeta, err := metadecoders.Default.UnmarshalToMap(b, metadecoders.TOML)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if minVersion, ok := tomlMeta["min_version"]; ok {
|
||||
if hugo.CompareVersion(minVersion) > 0 {
|
||||
return absThemeDir, true, fmt.Sprint(minVersion)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -27,6 +26,8 @@ import (
|
|||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/gohugoio/hugo/common/hugio"
|
||||
|
||||
"github.com/gohugoio/hugo/parser/metadecoders"
|
||||
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
|
@ -113,7 +114,7 @@ func (i *importCmd) importFromJekyll(cmd *cobra.Command, args []string) error {
|
|||
jww.FEEDBACK.Println("Importing...")
|
||||
|
||||
fileCount := 0
|
||||
callback := func(path string, fi os.FileInfo, err error) error {
|
||||
callback := func(path string, fi hugofs.FileMetaInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -302,66 +303,10 @@ func (i *importCmd) createConfigFromJekyll(fs afero.Fs, inpath string, kind meta
|
|||
return helpers.WriteToDisk(filepath.Join(inpath, "config."+string(kind)), &buf, fs)
|
||||
}
|
||||
|
||||
func copyFile(source string, dest string) error {
|
||||
sf, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
df, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
_, err = io.Copy(df, sf)
|
||||
if err == nil {
|
||||
si, err := os.Stat(source)
|
||||
if err != nil {
|
||||
err = os.Chmod(dest, si.Mode())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDir(source string, dest string) error {
|
||||
fi, err := os.Stat(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return errors.New(source + " is not a directory")
|
||||
}
|
||||
err = os.MkdirAll(dest, fi.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||