mirror of
https://github.com/gohugoio/hugo.git
synced 2025-02-11 21:32:30 +00:00
all: Rework page store, add a dynacache, improve partial rebuilds, and some general spring cleaning
There are some breaking changes in this commit, see #11455. Closes #11455 Closes #11549 This fixes a set of bugs (see issue list) and it is also paying some technical debt accumulated over the years. We now build with Staticcheck enabled in the CI build. The performance should be about the same as before for regular sized Hugo sites, but it should perform and scale much better to larger data sets, as objects that uses lots of memory (e.g. rendered Markdown, big JSON files read into maps with transform.Unmarshal etc.) will now get automatically garbage collected if needed. Performance on partial rebuilds when running the server in fast render mode should be the same, but the change detection should be much more accurate. A list of the notable new features: * A new dependency tracker that covers (almost) all of Hugo's API and is used to do fine grained partial rebuilds when running the server. * A new and simpler tree document store which allows fast lookups and prefix-walking in all dimensions (e.g. language) concurrently. * You can now configure an upper memory limit allowing for much larger data sets and/or running on lower specced PCs. We have lifted the "no resources in sub folders" restriction for branch bundles (e.g. sections). Memory Limit * Hugos will, by default, set aside a quarter of the total system memory, but you can set this via the OS environment variable HUGO_MEMORYLIMIT (in gigabytes). This is backed by a partitioned LRU cache used throughout Hugo. A cache that gets dynamically resized in low memory situations, allowing Go's Garbage Collector to free the memory. New Dependency Tracker: Hugo has had a rule based coarse grained approach to server rebuilds that has worked mostly pretty well, but there have been some surprises (e.g. stale content). This is now revamped with a new dependency tracker that can quickly calculate the delta given a changed resource (e.g. a content file, template, JS file etc.). This handles transitive relations, e.g. $page -> js.Build -> JS import, or $page1.Content -> render hook -> site.GetPage -> $page2.Title, or $page1.Content -> shortcode -> partial -> site.RegularPages -> $page2.Content -> shortcode ..., and should also handle changes to aggregated values (e.g. site.Lastmod) effectively. This covers all of Hugo's API with 2 known exceptions (a list that may not be fully exhaustive): Changes to files loaded with template func os.ReadFile may not be handled correctly. We recommend loading resources with resources.Get Changes to Hugo objects (e.g. Page) passed in the template context to lang.Translate may not be detected correctly. We recommend having simple i18n templates without too much data context passed in other than simple types such as strings and numbers. Note that the cachebuster configuration (when A changes then rebuild B) works well with the above, but we recommend that you revise that configuration, as it in most situations should not be needed. One example where it is still needed is with TailwindCSS and using changes to hugo_stats.json to trigger new CSS rebuilds. Document Store: Previously, a little simplified, we split the document store (where we store pages and resources) in a tree per language. This worked pretty well, but the structure made some operations harder than they needed to be. We have now restructured it into one Radix tree for all languages. Internally the language is considered to be a dimension of that tree, and the tree can be viewed in all dimensions concurrently. This makes some operations re. language simpler (e.g. finding translations is just a slice range), but the idea is that it should also be relatively inexpensive to add more dimensions if needed (e.g. role). Fixes #10169 Fixes #10364 Fixes #10482 Fixes #10630 Fixes #10656 Fixes #10694 Fixes #10918 Fixes #11262 Fixes #11439 Fixes #11453 Fixes #11457 Fixes #11466 Fixes #11540 Fixes #11551 Fixes #11556 Fixes #11654 Fixes #11661 Fixes #11663 Fixes #11664 Fixes #11669 Fixes #11671 Fixes #11807 Fixes #11808 Fixes #11809 Fixes #11815 Fixes #11840 Fixes #11853 Fixes #11860 Fixes #11883 Fixes #11904 Fixes #7388 Fixes #7425 Fixes #7436 Fixes #7544 Fixes #7882 Fixes #7960 Fixes #8255 Fixes #8307 Fixes #8863 Fixes #8927 Fixes #9192 Fixes #9324
This commit is contained in:
parent
5fd1e74903
commit
7285e74090
437 changed files with 19304 additions and 18384 deletions
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
|
@ -87,6 +87,12 @@ jobs:
|
|||
curl -LJO "https://github.com/sass/dart-sass/releases/download/${env:SASS_VERSION}/dart-sass-${env:SASS_VERSION}-windows-x64.zip";
|
||||
Expand-Archive -Path "dart-sass-${env:SASS_VERSION}-windows-x64.zip" -DestinationPath .;
|
||||
echo "$env:GITHUB_WORKSPACE/dart-sass/" | Out-File -FilePath $Env:GITHUB_PATH -Encoding utf-8 -Append
|
||||
- if: matrix.os == 'ubuntu-latest'
|
||||
name: Install staticcheck
|
||||
run: go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
- if: matrix.os == 'ubuntu-latest'
|
||||
name: Run staticcheck
|
||||
run: staticcheck ./...
|
||||
- if: matrix.os != 'windows-latest'
|
||||
name: Check
|
||||
run: |
|
||||
|
|
550
cache/dynacache/dynacache.go
vendored
Normal file
550
cache/dynacache/dynacache.go
vendored
Normal file
|
@ -0,0 +1,550 @@
|
|||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dynacache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bep/lazycache"
|
||||
"github.com/bep/logg"
|
||||
"github.com/gohugoio/hugo/common/herrors"
|
||||
"github.com/gohugoio/hugo/common/loggers"
|
||||
"github.com/gohugoio/hugo/common/paths"
|
||||
"github.com/gohugoio/hugo/common/rungroup"
|
||||
"github.com/gohugoio/hugo/config"
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
"github.com/gohugoio/hugo/identity"
|
||||
"github.com/gohugoio/hugo/resources/resource"
|
||||
)
|
||||
|
||||
const minMaxSize = 10
|
||||
|
||||
// New creates a new cache.
|
||||
func New(opts Options) *Cache {
|
||||
if opts.CheckInterval == 0 {
|
||||
opts.CheckInterval = time.Second * 2
|
||||
}
|
||||
|
||||
if opts.MaxSize == 0 {
|
||||
opts.MaxSize = 100000
|
||||
}
|
||||
if opts.Log == nil {
|
||||
panic("nil Log")
|
||||
}
|
||||
|
||||
if opts.MinMaxSize == 0 {
|
||||
opts.MinMaxSize = 30
|
||||
}
|
||||
|
||||
stats := &stats{
|
||||
opts: opts,
|
||||
adjustmentFactor: 1.0,
|
||||
currentMaxSize: opts.MaxSize,
|
||||
availableMemory: config.GetMemoryLimit(),
|
||||
}
|
||||
|
||||
infol := opts.Log.InfoCommand("dynacache")
|
||||
|
||||
c := &Cache{
|
||||
partitions: make(map[string]PartitionManager),
|
||||
opts: opts,
|
||||
stats: stats,
|
||||
infol: infol,
|
||||
}
|
||||
|
||||
c.stop = c.start()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Options for the cache.
|
||||
type Options struct {
|
||||
Log loggers.Logger
|
||||
CheckInterval time.Duration
|
||||
MaxSize int
|
||||
MinMaxSize int
|
||||
Running bool
|
||||
}
|
||||
|
||||
// Options for a partition.
|
||||
type OptionsPartition struct {
|
||||
// When to clear the this partition.
|
||||
ClearWhen ClearWhen
|
||||
|
||||
// Weight is a number between 1 and 100 that indicates how, in general, how big this partition may get.
|
||||
Weight int
|
||||
}
|
||||
|
||||
func (o OptionsPartition) WeightFraction() float64 {
|
||||
return float64(o.Weight) / 100
|
||||
}
|
||||
|
||||
func (o OptionsPartition) CalculateMaxSize(maxSizePerPartition int) int {
|
||||
return int(math.Floor(float64(maxSizePerPartition) * o.WeightFraction()))
|
||||
}
|
||||
|
||||
// A dynamic partitioned cache.
|
||||
type Cache struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
partitions map[string]PartitionManager
|
||||
opts Options
|
||||
infol logg.LevelLogger
|
||||
|
||||
stats *stats
|
||||
stopOnce sync.Once
|
||||
stop func()
|
||||
}
|
||||
|
||||
// ClearMatching clears all partition for which the predicate returns true.
|
||||
func (c *Cache) ClearMatching(predicate func(k, v any) bool) {
|
||||
g := rungroup.Run[PartitionManager](context.Background(), rungroup.Config[PartitionManager]{
|
||||
NumWorkers: len(c.partitions),
|
||||
Handle: func(ctx context.Context, partition PartitionManager) error {
|
||||
partition.clearMatching(predicate)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
for _, p := range c.partitions {
|
||||
g.Enqueue(p)
|
||||
}
|
||||
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
// ClearOnRebuild prepares the cache for a new rebuild taking the given changeset into account.
|
||||
func (c *Cache) ClearOnRebuild(changeset ...identity.Identity) {
|
||||
g := rungroup.Run[PartitionManager](context.Background(), rungroup.Config[PartitionManager]{
|
||||
NumWorkers: len(c.partitions),
|
||||
Handle: func(ctx context.Context, partition PartitionManager) error {
|
||||
partition.clearOnRebuild(changeset...)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
for _, p := range c.partitions {
|
||||
g.Enqueue(p)
|
||||
}
|
||||
|
||||
g.Wait()
|
||||
|
||||
// Clear any entries marked as stale above.
|
||||
g = rungroup.Run[PartitionManager](context.Background(), rungroup.Config[PartitionManager]{
|
||||
NumWorkers: len(c.partitions),
|
||||
Handle: func(ctx context.Context, partition PartitionManager) error {
|
||||
partition.clearStale()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
for _, p := range c.partitions {
|
||||
g.Enqueue(p)
|
||||
}
|
||||
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
type keysProvider interface {
|
||||
Keys() []string
|
||||
}
|
||||
|
||||
// Keys returns a list of keys in all partitions.
|
||||
func (c *Cache) Keys(predicate func(s string) bool) []string {
|
||||
if predicate == nil {
|
||||
predicate = func(s string) bool { return true }
|
||||
}
|
||||
var keys []string
|
||||
for pn, g := range c.partitions {
|
||||
pkeys := g.(keysProvider).Keys()
|
||||
for _, k := range pkeys {
|
||||
p := path.Join(pn, k)
|
||||
if predicate(p) {
|
||||
keys = append(keys, p)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func calculateMaxSizePerPartition(maxItemsTotal, totalWeightQuantity, numPartitions int) int {
|
||||
if numPartitions == 0 {
|
||||
panic("numPartitions must be > 0")
|
||||
}
|
||||
if totalWeightQuantity == 0 {
|
||||
panic("totalWeightQuantity must be > 0")
|
||||
}
|
||||
|
||||
avgWeight := float64(totalWeightQuantity) / float64(numPartitions)
|
||||
return int(math.Floor(float64(maxItemsTotal) / float64(numPartitions) * (100.0 / avgWeight)))
|
||||
}
|
||||
|
||||
// Stop stops the cache.
|
||||
func (c *Cache) Stop() {
|
||||
c.stopOnce.Do(func() {
|
||||
c.stop()
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Cache) adjustCurrentMaxSize() {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
if len(c.partitions) == 0 {
|
||||
return
|
||||
}
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
s := c.stats
|
||||
s.memstatsCurrent = m
|
||||
// fmt.Printf("\n\nAvailable = %v\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\nMaxSize = %d\nAdjustmentFactor=%f\n\n", helpers.FormatByteCount(s.availableMemory), helpers.FormatByteCount(m.Alloc), helpers.FormatByteCount(m.TotalAlloc), helpers.FormatByteCount(m.Sys), m.NumGC, c.stats.currentMaxSize, s.adjustmentFactor)
|
||||
|
||||
if s.availableMemory >= s.memstatsCurrent.Alloc {
|
||||
if s.adjustmentFactor <= 1.0 {
|
||||
s.adjustmentFactor += 0.2
|
||||
}
|
||||
} else {
|
||||
// We're low on memory.
|
||||
s.adjustmentFactor -= 0.4
|
||||
}
|
||||
|
||||
if s.adjustmentFactor <= 0 {
|
||||
s.adjustmentFactor = 0.05
|
||||
}
|
||||
|
||||
if !s.adjustCurrentMaxSize() {
|
||||
return
|
||||
}
|
||||
|
||||
totalWeight := 0
|
||||
for _, pm := range c.partitions {
|
||||
totalWeight += pm.getOptions().Weight
|
||||
}
|
||||
|
||||
maxSizePerPartition := calculateMaxSizePerPartition(c.stats.currentMaxSize, totalWeight, len(c.partitions))
|
||||
|
||||
evicted := 0
|
||||
for _, p := range c.partitions {
|
||||
evicted += p.adjustMaxSize(p.getOptions().CalculateMaxSize(maxSizePerPartition))
|
||||
}
|
||||
|
||||
if evicted > 0 {
|
||||
c.infol.
|
||||
WithFields(
|
||||
logg.Fields{
|
||||
{Name: "evicted", Value: evicted},
|
||||
{Name: "numGC", Value: m.NumGC},
|
||||
{Name: "limit", Value: helpers.FormatByteCount(c.stats.availableMemory)},
|
||||
{Name: "alloc", Value: helpers.FormatByteCount(m.Alloc)},
|
||||
{Name: "totalAlloc", Value: helpers.FormatByteCount(m.TotalAlloc)},
|
||||
},
|
||||
).Logf("adjusted partitions' max size")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) start() func() {
|
||||
ticker := time.NewTicker(c.opts.CheckInterval)
|
||||
quit := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.adjustCurrentMaxSize()
|
||||
case <-quit:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return func() {
|
||||
close(quit)
|
||||
}
|
||||
}
|
||||
|
||||
var partitionNameRe = regexp.MustCompile(`^\/[a-zA-Z0-9]{4}(\/[a-zA-Z0-9]+)?(\/[a-zA-Z0-9]+)?`)
|
||||
|
||||
// GetOrCreatePartition gets or creates a partition with the given name.
|
||||
func GetOrCreatePartition[K comparable, V any](c *Cache, name string, opts OptionsPartition) *Partition[K, V] {
|
||||
if c == nil {
|
||||
panic("nil Cache")
|
||||
}
|
||||
if opts.Weight < 1 || opts.Weight > 100 {
|
||||
panic("invalid Weight, must be between 1 and 100")
|
||||
}
|
||||
|
||||
if partitionNameRe.FindString(name) != name {
|
||||
panic(fmt.Sprintf("invalid partition name %q", name))
|
||||
}
|
||||
|
||||
c.mu.RLock()
|
||||
p, found := c.partitions[name]
|
||||
c.mu.RUnlock()
|
||||
if found {
|
||||
return p.(*Partition[K, V])
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Double check.
|
||||
p, found = c.partitions[name]
|
||||
if found {
|
||||
return p.(*Partition[K, V])
|
||||
}
|
||||
|
||||
// At this point, we don't know the the number of partitions or their configuration, but
|
||||
// this will be re-adjusted later.
|
||||
const numberOfPartitionsEstimate = 10
|
||||
maxSize := opts.CalculateMaxSize(c.opts.MaxSize / numberOfPartitionsEstimate)
|
||||
|
||||
// Create a new partition and cache it.
|
||||
partition := &Partition[K, V]{
|
||||
c: lazycache.New(lazycache.Options[K, V]{MaxEntries: maxSize}),
|
||||
maxSize: maxSize,
|
||||
trace: c.opts.Log.Logger().WithLevel(logg.LevelTrace).WithField("partition", name),
|
||||
opts: opts,
|
||||
}
|
||||
c.partitions[name] = partition
|
||||
|
||||
return partition
|
||||
}
|
||||
|
||||
// Partition is a partition in the cache.
|
||||
type Partition[K comparable, V any] struct {
|
||||
c *lazycache.Cache[K, V]
|
||||
|
||||
zero V
|
||||
|
||||
trace logg.LevelLogger
|
||||
opts OptionsPartition
|
||||
|
||||
maxSize int
|
||||
}
|
||||
|
||||
// GetOrCreate gets or creates a value for the given key.
|
||||
func (p *Partition[K, V]) GetOrCreate(key K, create func(key K) (V, error)) (V, error) {
|
||||
v, _, err := p.c.GetOrCreate(key, create)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// GetOrCreateWitTimeout gets or creates a value for the given key and times out if the create function
|
||||
// takes too long.
|
||||
func (p *Partition[K, V]) GetOrCreateWitTimeout(key K, duration time.Duration, create func(key K) (V, error)) (V, error) {
|
||||
resultch := make(chan V, 1)
|
||||
errch := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
v, _, err := p.c.GetOrCreate(key, create)
|
||||
if err != nil {
|
||||
errch <- err
|
||||
return
|
||||
}
|
||||
resultch <- v
|
||||
}()
|
||||
|
||||
select {
|
||||
case v := <-resultch:
|
||||
return v, nil
|
||||
case err := <-errch:
|
||||
return p.zero, err
|
||||
case <-time.After(duration):
|
||||
return p.zero, &herrors.TimeoutError{
|
||||
Duration: duration,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) clearMatching(predicate func(k, v any) bool) {
|
||||
p.c.DeleteFunc(func(key K, v V) bool {
|
||||
if predicate(key, v) {
|
||||
p.trace.Log(
|
||||
logg.StringFunc(
|
||||
func() string {
|
||||
return fmt.Sprintf("clearing cache key %v", key)
|
||||
},
|
||||
),
|
||||
)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) clearOnRebuild(changeset ...identity.Identity) {
|
||||
opts := p.getOptions()
|
||||
if opts.ClearWhen == ClearNever {
|
||||
return
|
||||
}
|
||||
|
||||
if opts.ClearWhen == ClearOnRebuild {
|
||||
// Clear all.
|
||||
p.Clear()
|
||||
return
|
||||
}
|
||||
|
||||
depsFinder := identity.NewFinder(identity.FinderConfig{})
|
||||
|
||||
shouldDelete := func(key K, v V) bool {
|
||||
// We always clear elements marked as stale.
|
||||
if resource.IsStaleAny(v) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Now check if this entry has changed based on the changeset
|
||||
// based on filesystem events.
|
||||
if len(changeset) == 0 {
|
||||
// Nothing changed.
|
||||
return false
|
||||
}
|
||||
|
||||
var probablyDependent bool
|
||||
identity.WalkIdentitiesShallow(v, func(level int, id2 identity.Identity) bool {
|
||||
for _, id := range changeset {
|
||||
if r := depsFinder.Contains(id, id2, -1); r > 0 {
|
||||
// It's probably dependent, evict from cache.
|
||||
probablyDependent = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return probablyDependent
|
||||
}
|
||||
|
||||
// First pass.
|
||||
// Second pass needs to be done in a separate loop to catch any
|
||||
// elements marked as stale in the other partitions.
|
||||
p.c.DeleteFunc(func(key K, v V) bool {
|
||||
if shouldDelete(key, v) {
|
||||
p.trace.Log(
|
||||
logg.StringFunc(
|
||||
func() string {
|
||||
return fmt.Sprintf("first pass: clearing cache key %v", key)
|
||||
},
|
||||
),
|
||||
)
|
||||
resource.MarkStale(v)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) Keys() []K {
|
||||
var keys []K
|
||||
p.c.DeleteFunc(func(key K, v V) bool {
|
||||
keys = append(keys, key)
|
||||
return false
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) clearStale() {
|
||||
p.c.DeleteFunc(func(key K, v V) bool {
|
||||
isStale := resource.IsStaleAny(v)
|
||||
if isStale {
|
||||
p.trace.Log(
|
||||
logg.StringFunc(
|
||||
func() string {
|
||||
return fmt.Sprintf("second pass: clearing cache key %v", key)
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return isStale
|
||||
})
|
||||
}
|
||||
|
||||
// adjustMaxSize adjusts the max size of the and returns the number of items evicted.
|
||||
func (p *Partition[K, V]) adjustMaxSize(newMaxSize int) int {
|
||||
if newMaxSize < minMaxSize {
|
||||
newMaxSize = minMaxSize
|
||||
}
|
||||
p.maxSize = newMaxSize
|
||||
// fmt.Println("Adjusting max size of partition from", oldMaxSize, "to", newMaxSize)
|
||||
return p.c.Resize(newMaxSize)
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) getMaxSize() int {
|
||||
return p.maxSize
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) getOptions() OptionsPartition {
|
||||
return p.opts
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) Clear() {
|
||||
p.c.DeleteFunc(func(key K, v V) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Partition[K, V]) Get(ctx context.Context, key K) (V, bool) {
|
||||
return p.c.Get(key)
|
||||
}
|
||||
|
||||
type PartitionManager interface {
|
||||
adjustMaxSize(addend int) int
|
||||
getMaxSize() int
|
||||
getOptions() OptionsPartition
|
||||
clearOnRebuild(changeset ...identity.Identity)
|
||||
clearMatching(predicate func(k, v any) bool)
|
||||
clearStale()
|
||||
}
|
||||
|
||||
const (
|
||||
ClearOnRebuild ClearWhen = iota + 1
|
||||
ClearOnChange
|
||||
ClearNever
|
||||
)
|
||||
|
||||
type ClearWhen int
|
||||
|
||||
type stats struct {
|
||||
opts Options
|
||||
memstatsCurrent runtime.MemStats
|
||||
currentMaxSize int
|
||||
availableMemory uint64
|
||||
|
||||
adjustmentFactor float64
|
||||
}
|
||||
|
||||
func (s *stats) adjustCurrentMaxSize() bool {
|
||||
newCurrentMaxSize := int(math.Floor(float64(s.opts.MaxSize) * s.adjustmentFactor))
|
||||
|
||||
if newCurrentMaxSize < s.opts.MaxSize {
|
||||
newCurrentMaxSize = int(s.opts.MinMaxSize)
|
||||
}
|
||||
changed := newCurrentMaxSize != s.currentMaxSize
|
||||
s.currentMaxSize = newCurrentMaxSize
|
||||
return changed
|
||||
}
|
||||
|
||||
// CleanKey turns s into a format suitable for a cache key for this package.
|
||||
// The key will be a Unix-styled path with a leading slash but no trailing slash.
|
||||
func CleanKey(s string) string {
|
||||
return path.Clean(paths.ToSlashPreserveLeading(s))
|
||||
}
|
175
cache/dynacache/dynacache_test.go
vendored
Normal file
175
cache/dynacache/dynacache_test.go
vendored
Normal file
|
@ -0,0 +1,175 @@
|
|||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dynacache
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
qt "github.com/frankban/quicktest"
|
||||
"github.com/gohugoio/hugo/common/loggers"
|
||||
"github.com/gohugoio/hugo/identity"
|
||||
"github.com/gohugoio/hugo/resources/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
_ resource.StaleInfo = (*testItem)(nil)
|
||||
_ identity.Identity = (*testItem)(nil)
|
||||
)
|
||||
|
||||
type testItem struct {
|
||||
name string
|
||||
isStale bool
|
||||
}
|
||||
|
||||
func (t testItem) IsStale() bool {
|
||||
return t.isStale
|
||||
}
|
||||
|
||||
func (t testItem) IdentifierBase() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := qt.New(t)
|
||||
|
||||
cache := New(Options{
|
||||
Log: loggers.NewDefault(),
|
||||
})
|
||||
|
||||
c.Cleanup(func() {
|
||||
cache.Stop()
|
||||
})
|
||||
|
||||
opts := OptionsPartition{Weight: 30}
|
||||
|
||||
c.Assert(cache, qt.Not(qt.IsNil))
|
||||
|
||||
p1 := GetOrCreatePartition[string, testItem](cache, "/aaaa/bbbb", opts)
|
||||
c.Assert(p1, qt.Not(qt.IsNil))
|
||||
|
||||
p2 := GetOrCreatePartition[string, testItem](cache, "/aaaa/bbbb", opts)
|
||||
|
||||
c.Assert(func() { GetOrCreatePartition[string, testItem](cache, "foo bar", opts) }, qt.PanicMatches, ".*invalid partition name.*")
|
||||
c.Assert(func() { GetOrCreatePartition[string, testItem](cache, "/aaaa/cccc", OptionsPartition{Weight: 1234}) }, qt.PanicMatches, ".*invalid Weight.*")
|
||||
|
||||
c.Assert(p2, qt.Equals, p1)
|
||||
|
||||
p3 := GetOrCreatePartition[string, testItem](cache, "/aaaa/cccc", opts)
|
||||
c.Assert(p3, qt.Not(qt.IsNil))
|
||||
c.Assert(p3, qt.Not(qt.Equals), p1)
|
||||
|
||||
c.Assert(func() { New(Options{}) }, qt.PanicMatches, ".*nil Log.*")
|
||||
}
|
||||
|
||||
func TestCalculateMaxSizePerPartition(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := qt.New(t)
|
||||
|
||||
c.Assert(calculateMaxSizePerPartition(1000, 500, 5), qt.Equals, 200)
|
||||
c.Assert(calculateMaxSizePerPartition(1000, 250, 5), qt.Equals, 400)
|
||||
c.Assert(func() { calculateMaxSizePerPartition(1000, 250, 0) }, qt.PanicMatches, ".*must be > 0.*")
|
||||
c.Assert(func() { calculateMaxSizePerPartition(1000, 0, 1) }, qt.PanicMatches, ".*must be > 0.*")
|
||||
}
|
||||
|
||||
func TestCleanKey(t *testing.T) {
|
||||
c := qt.New(t)
|
||||
|
||||
c.Assert(CleanKey("a/b/c"), qt.Equals, "/a/b/c")
|
||||
c.Assert(CleanKey("/a/b/c"), qt.Equals, "/a/b/c")
|
||||
c.Assert(CleanKey("a/b/c/"), qt.Equals, "/a/b/c")
|
||||
c.Assert(CleanKey(filepath.FromSlash("/a/b/c/")), qt.Equals, "/a/b/c")
|
||||
}
|
||||
|
||||
func newTestCache(t *testing.T) *Cache {
|
||||
cache := New(
|
||||
Options{
|
||||
Log: loggers.NewDefault(),
|
||||
},
|
||||
)
|
||||
|
||||
p1 := GetOrCreatePartition[string, testItem](cache, "/aaaa/bbbb", OptionsPartition{Weight: 30, ClearWhen: ClearOnRebuild})
|
||||
p2 := GetOrCreatePartition[string, testItem](cache, "/aaaa/cccc", OptionsPartition{Weight: 30, ClearWhen: ClearOnChange})
|
||||
|
||||
p1.GetOrCreate("clearOnRebuild", func(string) (testItem, error) {
|
||||
return testItem{}, nil
|
||||
})
|
||||
|
||||
p2.GetOrCreate("clearBecauseStale", func(string) (testItem, error) {
|
||||
return testItem{
|
||||
isStale: true,
|
||||
}, nil
|
||||
})
|
||||
|
||||
p2.GetOrCreate("clearBecauseIdentityChanged", func(string) (testItem, error) {
|
||||
return testItem{
|
||||
name: "changed",
|
||||
}, nil
|
||||
})
|
||||
|
||||
p2.GetOrCreate("clearNever", func(string) (testItem, error) {
|
||||
return testItem{
|
||||
isStale: false,
|
||||
}, nil
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
cache.Stop()
|
||||
})
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := qt.New(t)
|
||||
|
||||
predicateAll := func(string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
cache := newTestCache(t)
|
||||
|
||||
c.Assert(cache.Keys(predicateAll), qt.HasLen, 4)
|
||||
|
||||
cache.ClearOnRebuild()
|
||||
|
||||
// Stale items are always cleared.
|
||||
c.Assert(cache.Keys(predicateAll), qt.HasLen, 2)
|
||||
|
||||
cache = newTestCache(t)
|
||||
cache.ClearOnRebuild(identity.StringIdentity("changed"))
|
||||
|
||||
c.Assert(cache.Keys(nil), qt.HasLen, 1)
|
||||
|
||||
cache = newTestCache(t)
|
||||
|
||||
cache.ClearMatching(func(k, v any) bool {
|
||||
return k.(string) == "clearOnRebuild"
|
||||
})
|
||||
|
||||
c.Assert(cache.Keys(predicateAll), qt.HasLen, 3)
|
||||
|
||||
cache.adjustCurrentMaxSize()
|
||||
}
|
||||
|
||||
func TestAdjustCurrentMaxSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := qt.New(t)
|
||||
cache := newTestCache(t)
|
||||
alloc := cache.stats.memstatsCurrent.Alloc
|
||||
cache.adjustCurrentMaxSize()
|
||||
c.Assert(cache.stats.memstatsCurrent.Alloc, qt.Not(qt.Equals), alloc)
|
||||
}
|
8
cache/filecache/filecache.go
vendored
8
cache/filecache/filecache.go
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gohugoio/hugo/common/hugio"
|
||||
"github.com/gohugoio/hugo/hugofs"
|
||||
|
||||
"github.com/gohugoio/hugo/helpers"
|
||||
|
||||
|
@ -109,7 +110,7 @@ func (l *lockedFile) Close() error {
|
|||
func (c *Cache) init() error {
|
||||
c.initOnce.Do(func() {
|
||||
// Create the base dir if it does not exist.
|
||||
if err := c.Fs.MkdirAll("", 0777); err != nil && !os.IsExist(err) {
|
||||
if err := c.Fs.MkdirAll("", 0o777); err != nil && !os.IsExist(err) {
|
||||
c.initErr = err
|
||||
}
|
||||
})
|
||||
|
@ -146,7 +147,8 @@ func (c *Cache) WriteCloser(id string) (ItemInfo, io.WriteCloser, error) {
|
|||
// it when done.
|
||||
func (c *Cache) ReadOrCreate(id string,
|
||||
read func(info ItemInfo, r io.ReadSeeker) error,
|
||||
create func(info ItemInfo, w io.WriteCloser) error) (info ItemInfo, err error) {
|
||||
create func(info ItemInfo, w io.WriteCloser) error,
|
||||
) (info ItemInfo, err error) {
|
||||
if err := c.init(); err != nil {
|
||||
return ItemInfo{}, err
|
||||
}
|
||||
|
@ -380,7 +382,7 @@ func NewCaches(p *helpers.PathSpec) (Caches, error) {
|
|||
|
||||
baseDir := v.DirCompiled
|
||||
|
||||
bfs := afero.NewBasePathFs(cfs, baseDir)
|
||||
bfs := hugofs.NewBasePathFs(cfs, baseDir)
|
||||
|
||||
var pruneAllRootDir string
|
||||
if k == CacheKeyModules {
|
||||
|
|
12
cache/filecache/filecache_test.go
vendored
12
cache/filecache/filecache_test.go
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -17,7 +17,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
@ -86,17 +85,8 @@ dir = ":cacheDir/c"
|
|||
cache := caches.Get("GetJSON")
|
||||
c.Assert(cache, qt.Not(qt.IsNil))
|
||||
|
||||
bfs, ok := cache.Fs.(*afero.BasePathFs)
|
||||
c.Assert(ok, qt.Equals, true)
|
||||
filename, err := bfs.RealPath("key")
|
||||
c.Assert(err, qt.IsNil)
|
||||
|
||||
cache = caches.Get("Images")
|
||||
c.Assert(cache, qt.Not(qt.IsNil))
|
||||
bfs, ok = cache.Fs.(*afero.BasePathFs)
|
||||
c.Assert(ok, qt.Equals, true)
|
||||
filename, _ = bfs.RealPath("key")
|
||||
c.Assert(filename, qt.Equals, filepath.FromSlash("_gen/images/key"))
|
||||
|
||||
rf := func(s string) func() (io.ReadCloser, error) {
|
||||
return func() (io.ReadCloser, error) {
|
||||
|
|
8
cache/filecache/integration_test.go
vendored
8
cache/filecache/integration_test.go
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -15,7 +15,6 @@ package filecache_test
|
|||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -47,7 +46,6 @@ title: "Home"
|
|||
_, err := b.H.BaseFs.ResourcesCache.Stat(filepath.Join("_gen", "images"))
|
||||
|
||||
b.Assert(err, qt.IsNil)
|
||||
|
||||
}
|
||||
|
||||
func TestPruneImages(t *testing.T) {
|
||||
|
@ -55,6 +53,7 @@ func TestPruneImages(t *testing.T) {
|
|||
// TODO(bep)
|
||||
t.Skip("skip flaky test on CI server")
|
||||
}
|
||||
t.Skip("skip flaky test")
|
||||
files := `
|
||||
-- hugo.toml --
|
||||
baseURL = "https://example.com"
|
||||
|
@ -92,7 +91,7 @@ iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAA
|
|||
|
||||
// TODO(bep) we need a way to test full rebuilds.
|
||||
// For now, just sleep a little so the cache elements expires.
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
b.RenameFile("assets/a/pixel.png", "assets/b/pixel2.png").Build()
|
||||
|
||||
|
@ -104,5 +103,4 @@ iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAA
|
|||
b.Assert(err, qt.Not(qt.IsNil))
|
||||
_, err = b.H.BaseFs.ResourcesCache.Stat(imagesCacheDir)
|
||||
b.Assert(err, qt.IsNil)
|
||||
|
||||
}
|
||||
|
|
78
cache/namedmemcache/named_cache.go
vendored
78
cache/namedmemcache/named_cache.go
vendored
|
@ -1,78 +0,0 @@
|
|||
// Copyright 2018 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package namedmemcache provides a memory cache with a named lock. This is suitable
|
||||
// for situations where creating the cached resource can be time consuming or otherwise
|
||||
// resource hungry, or in situations where a "once only per key" is a requirement.
|
||||
package namedmemcache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/BurntSushi/locker"
|
||||
)
|
||||
|
||||
// Cache holds the cached values.
|
||||
type Cache struct {
|
||||
nlocker *locker.Locker
|
||||
cache map[string]cacheEntry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
value any
|
||||
err error
|
||||
}
|
||||
|
||||
// New creates a new cache.
|
||||
func New() *Cache {
|
||||
return &Cache{
|
||||
nlocker: locker.NewLocker(),
|
||||
cache: make(map[string]cacheEntry),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear clears the cache state.
|
||||
func (c *Cache) Clear() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.cache = make(map[string]cacheEntry)
|
||||
c.nlocker = locker.NewLocker()
|
||||
}
|
||||
|
||||
// GetOrCreate tries to get the value with the given cache key, if not found
|
||||
// create will be called and cached.
|
||||
// This method is thread safe. It also guarantees that the create func for a given
|
||||
// key is invoked only once for this cache.
|
||||
func (c *Cache) GetOrCreate(key string, create func() (any, error)) (any, error) {
|
||||
c.mu.RLock()
|
||||
entry, found := c.cache[key]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if found {
|
||||
return entry.value, entry.err
|
||||
}
|
||||
|
||||
c.nlocker.Lock(key)
|
||||
defer c.nlocker.Unlock(key)
|
||||
|
||||
// Create it.
|
||||
value, err := create()
|
||||
|
||||
c.mu.Lock()
|
||||
c.cache[key] = cacheEntry{value: value, err: err}
|
||||
c.mu.Unlock()
|
||||
|
||||
return value, err
|
||||
}
|
80
cache/namedmemcache/named_cache_test.go
vendored
80
cache/namedmemcache/named_cache_test.go
vendored
|
@ -1,80 +0,0 @@
|
|||
// Copyright 2018 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namedmemcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
qt "github.com/frankban/quicktest"
|
||||
)
|
||||
|
||||
func TestNamedCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := qt.New(t)
|
||||
|
||||
cache := New()
|
||||
|
||||
counter := 0
|
||||
create := func() (any, error) {
|
||||
counter++
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
v1, err := cache.GetOrCreate("a1", create)
|
||||
c.Assert(err, qt.IsNil)
|
||||
c.Assert(v1, qt.Equals, 1)
|
||||
v2, err := cache.GetOrCreate("a2", create)
|
||||
c.Assert(err, qt.IsNil)
|
||||
c.Assert(v2, qt.Equals, 2)
|
||||
}
|
||||
|
||||
cache.Clear()
|
||||
|
||||
v3, err := cache.GetOrCreate("a2", create)
|
||||
c.Assert(err, qt.IsNil)
|
||||
c.Assert(v3, qt.Equals, 3)
|
||||
}
|
||||
|
||||
func TestNamedCacheConcurrent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c := qt.New(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
cache := New()
|
||||
|
||||
create := func(i int) func() (any, error) {
|
||||
return func() (any, error) {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 100; j++ {
|
||||
id := fmt.Sprintf("id%d", j)
|
||||
v, err := cache.GetOrCreate(id, create(j))
|
||||
c.Assert(err, qt.IsNil)
|
||||
c.Assert(v, qt.Equals, j)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -259,7 +259,7 @@ func (r *rootCommand) ConfigFromProvider(key int32, cfg config.Provider) (*commo
|
|||
publishDirStatic := cfg.GetString("publishDirStatic")
|
||||
workingDir := cfg.GetString("workingDir")
|
||||
absPublishDirStatic := paths.AbsPathify(workingDir, publishDirStatic)
|
||||
staticFs := afero.NewBasePathFs(afero.NewOsFs(), absPublishDirStatic)
|
||||
staticFs := hugofs.NewBasePathFs(afero.NewOsFs(), absPublishDirStatic)
|
||||
|
||||
// Serve from both the static and dynamic fs,
|
||||
// the first will take priority.
|
||||
|
@ -405,8 +405,14 @@ func (r *rootCommand) PreRun(cd, runner *simplecobra.Commandeer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
r.commonConfigs = lazycache.New[int32, *commonConfig](lazycache.Options{MaxEntries: 5})
|
||||
r.hugoSites = lazycache.New[int32, *hugolib.HugoSites](lazycache.Options{MaxEntries: 5})
|
||||
r.commonConfigs = lazycache.New(lazycache.Options[int32, *commonConfig]{MaxEntries: 5})
|
||||
// We don't want to keep stale HugoSites in memory longer than needed.
|
||||
r.hugoSites = lazycache.New(lazycache.Options[int32, *hugolib.HugoSites]{
|
||||
MaxEntries: 1,
|
||||
OnEvict: func(key int32, value *hugolib.HugoSites) {
|
||||
value.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -37,5 +37,4 @@ func newExec() (*simplecobra.Exec, error) {
|
|||
}
|
||||
|
||||
return simplecobra.New(rootCmd)
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -37,7 +37,6 @@ func newConfigCommand() *configCommand {
|
|||
&configMountsCommand{},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type configCommand struct {
|
||||
|
@ -190,7 +189,6 @@ func (m *configModMounts) MarshalJSON() ([]byte, error) {
|
|||
Dir: m.m.Dir(),
|
||||
Mounts: mounts,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
type configMountsCommand struct {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -134,7 +134,7 @@ func (c *convertCommand) convertAndSavePage(p page.Page, site *hugolib.Site, tar
|
|||
}
|
||||
}
|
||||
|
||||
if p.File().IsZero() {
|
||||
if p.File() == nil {
|
||||
// No content file.
|
||||
return nil
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ func (c *convertCommand) convertContents(format metadecoders.Format) error {
|
|||
|
||||
var pagesBackedByFile page.Pages
|
||||
for _, p := range site.AllPages() {
|
||||
if p.File().IsZero() {
|
||||
if p.File() == nil {
|
||||
continue
|
||||
}
|
||||
pagesBackedByFile = append(pagesBackedByFile, p)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -14,7 +14,7 @@
|
|||
//go:build !nodeploy
|
||||
// +build !nodeploy
|
||||
|
||||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -38,7 +38,6 @@ import (
|
|||
)
|
||||
|
||||
func newDeployCommand() simplecobra.Commander {
|
||||
|
||||
return &simpleCommand{
|
||||
name: "deploy",
|
||||
short: "Deploy your site to a Cloud provider.",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -14,7 +14,7 @@
|
|||
//go:build nodeploy
|
||||
// +build nodeploy
|
||||
|
||||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -101,7 +101,7 @@ See https://xyproto.github.io/splash/docs/all.html for a preview of the availabl
|
|||
}
|
||||
if found, _ := helpers.Exists(genmandir, hugofs.Os); !found {
|
||||
r.Println("Directory", genmandir, "does not exist, creating...")
|
||||
if err := hugofs.Os.MkdirAll(genmandir, 0777); err != nil {
|
||||
if err := hugofs.Os.MkdirAll(genmandir, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ url: %s
|
|||
}
|
||||
if found, _ := helpers.Exists(gendocdir, hugofs.Os); !found {
|
||||
r.Println("Directory", gendocdir, "does not exist, creating...")
|
||||
if err := hugofs.Os.MkdirAll(gendocdir, 0777); err != nil {
|
||||
if err := hugofs.Os.MkdirAll(gendocdir, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -177,7 +177,6 @@ url: %s
|
|||
cmd.PersistentFlags().SetAnnotation("dir", cobra.BashCompSubdirsInDir, []string{})
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var docsHelperTarget string
|
||||
|
@ -241,7 +240,6 @@ url: %s
|
|||
newDocsHelper(),
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type genCommand struct {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -110,12 +110,11 @@ func flagsToCfgWithAdditionalConfigBase(cd *simplecobra.Commandeer, cfg config.P
|
|||
})
|
||||
|
||||
return cfg
|
||||
|
||||
}
|
||||
|
||||
func mkdir(x ...string) {
|
||||
p := filepath.Join(x...)
|
||||
err := os.MkdirAll(p, 0777) // before umask
|
||||
err := os.MkdirAll(p, 0o777) // before umask
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Hugo Authors. All rights reserved.
|
||||
// Copyright 2024 The Hugo Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -24,6 +24,7 @@ import (
|
|||
"runtime/trace"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/bep/logg"
|
||||
|
@ -34,6 +35,7 @@ import (
|
|||
"github.com/gohugoio/hugo/common/hugo"
|
||||
"github.com/gohugoio/hugo/common/loggers"
|
||||
"github.com/gohugoio/hugo/common/maps"
|
||||
"github.com/gohugoio/hugo/common/paths"
|
||||
"github.com/gohugoio/hugo/common/terminal"
|
||||
"github.com/gohugoio/hugo/common/types"
|
||||
"github.com/gohugoio/hugo/config"
|
||||
|
@ -83,7 +85,6 @@ func (c *hugoBuilder) withConf(fn func(conf *commonConfig)) {
|
|||
c.confmu.Lock()
|
||||
defer c.confmu.Unlock()
|
||||
fn(c.conf)
|
||||
|
||||
}
|
||||
|
||||
type hugoBuilderErrState struct {
|
||||
|
@ -135,46 +136,12 @@ func (c *hugoBuilder) errCount() int {
|
|||
|
||||
// getDirList provides NewWatcher() with a list of directories to watch for changes.
|
||||
func (c *hugoBuilder) getDirList() ([]string, error) {
|
||||
var filenames []string
|
||||
|
||||
walkFn := func(path string, fi hugofs.FileMetaInfo, err error) error {
|
||||
if err != nil {
|
||||
c.r.logger.Errorln("walker: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
if fi.Name() == ".git" ||
|
||||
fi.Name() == "node_modules" || fi.Name() == "bower_components" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
filenames = append(filenames, fi.Meta().Filename)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
h, err := c.hugo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
watchFiles := h.PathSpec.BaseFs.WatchDirs()
|
||||
for _, fi := range watchFiles {
|
||||
if !fi.IsDir() {
|
||||
filenames = append(filenames, fi.Meta().Filename)
|
||||
continue
|
||||
}
|
||||
|
||||
w := hugofs.NewWalkway(hugofs.WalkwayConfig{Logger: c.r.logger, Info: fi, WalkFn: walkFn})
|
||||
if err := w.Walk(); err != nil {
|
||||
c.r.logger.Errorln("walker: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
filenames = helpers.UniqueStringsSorted(filenames)
|
||||
|
||||
return filenames, nil
|
||||
return helpers.UniqueStringsSorted(h.PathSpec.BaseFs.WatchFilenames()), nil
|
||||
}
|
||||
|
||||
func (c *hugoBuilder) initCPUProfile() (func(), error) {
|
||||
|
@ -441,7 +408,7 @@ func (c *hugoBuilder) copyStatic() (map[string]uint64, error) {
|
|||
}
|
||||
|
||||
func (c *hugoBuilder) copyStaticTo(sourceFs *filesystems.SourceFilesystem) (uint64, error) {
|
||||
infol := c.r.logger.InfoCommand("copy static")
|
||||
infol := c.r.logger.InfoCommand("static")
|
||||
publishDir := helpers.FilePathSeparator
|
||||
|
||||
if sourceFs.PublishFolder != "" {
|
||||
|
@ -467,11 +434,11 @@ func (c *hugoBuilder) copyStaticTo(sourceFs *filesystems.SourceFilesystem) (uint
|
|||
if syncer.Delete {
|
||||
infol.Logf("removing all files from destination that don't exist in static dirs")
|
||||
|
||||
syncer.DeleteFilter = func(f os.FileInfo) bool {
|
||||
syncer.DeleteFilter = func(f fsync.FileInfo) bool {
|
||||
return f.IsDir() && strings.HasPrefix(f.Name(), ".")
|
||||
}
|
||||
}
|
||||
infol.Logf("syncing static files to %s", publishDir)
|
||||
start := time.Now()
|
||||
|
||||
// because we are using a baseFs (to get the union right).
|
||||
// set sync src to root
|
||||
|
@ -479,9 +446,10 @@ func (c *hugoBuilder) copyStaticTo(sourceFs *filesystems.SourceFilesystem) (uint
|
|||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
loggers.TimeTrackf(infol, start, nil, "syncing static files to %s", publishDir)
|
||||
|
||||
// Sync runs Stat 3 times for every source file (which sounds much)
|
||||
numFiles := fs.statCounter / 3
|
||||
// Sync runs Stat 2 times for every source file.
|
||||
numFiles := fs.statCounter / 2
|
||||
|
||||
return numFiles, err
|
||||
}
|
||||
|
@ -652,13 +620,31 @@ func (c *hugoBuilder) handleBuildErr(err error, msg string) {
|
|||
func (c *hugoBuilder) handleEvents(watcher *watcher.Batcher,
|
||||
staticSyncer *staticSyncer,
|
||||
evs []fsnotify |