mirror of
https://github.com/gohugoio/hugo.git
synced 2024-11-07 20:30:36 -05:00
7285e74090
There are some breaking changes in this commit, see #11455. Closes #11455 Closes #11549 This fixes a set of bugs (see issue list) and it is also paying some technical debt accumulated over the years. We now build with Staticcheck enabled in the CI build. The performance should be about the same as before for regular sized Hugo sites, but it should perform and scale much better to larger data sets, as objects that uses lots of memory (e.g. rendered Markdown, big JSON files read into maps with transform.Unmarshal etc.) will now get automatically garbage collected if needed. Performance on partial rebuilds when running the server in fast render mode should be the same, but the change detection should be much more accurate. A list of the notable new features: * A new dependency tracker that covers (almost) all of Hugo's API and is used to do fine grained partial rebuilds when running the server. * A new and simpler tree document store which allows fast lookups and prefix-walking in all dimensions (e.g. language) concurrently. * You can now configure an upper memory limit allowing for much larger data sets and/or running on lower specced PCs. We have lifted the "no resources in sub folders" restriction for branch bundles (e.g. sections). Memory Limit * Hugos will, by default, set aside a quarter of the total system memory, but you can set this via the OS environment variable HUGO_MEMORYLIMIT (in gigabytes). This is backed by a partitioned LRU cache used throughout Hugo. A cache that gets dynamically resized in low memory situations, allowing Go's Garbage Collector to free the memory. New Dependency Tracker: Hugo has had a rule based coarse grained approach to server rebuilds that has worked mostly pretty well, but there have been some surprises (e.g. stale content). This is now revamped with a new dependency tracker that can quickly calculate the delta given a changed resource (e.g. a content file, template, JS file etc.). This handles transitive relations, e.g. $page -> js.Build -> JS import, or $page1.Content -> render hook -> site.GetPage -> $page2.Title, or $page1.Content -> shortcode -> partial -> site.RegularPages -> $page2.Content -> shortcode ..., and should also handle changes to aggregated values (e.g. site.Lastmod) effectively. This covers all of Hugo's API with 2 known exceptions (a list that may not be fully exhaustive): Changes to files loaded with template func os.ReadFile may not be handled correctly. We recommend loading resources with resources.Get Changes to Hugo objects (e.g. Page) passed in the template context to lang.Translate may not be detected correctly. We recommend having simple i18n templates without too much data context passed in other than simple types such as strings and numbers. Note that the cachebuster configuration (when A changes then rebuild B) works well with the above, but we recommend that you revise that configuration, as it in most situations should not be needed. One example where it is still needed is with TailwindCSS and using changes to hugo_stats.json to trigger new CSS rebuilds. Document Store: Previously, a little simplified, we split the document store (where we store pages and resources) in a tree per language. This worked pretty well, but the structure made some operations harder than they needed to be. We have now restructured it into one Radix tree for all languages. Internally the language is considered to be a dimension of that tree, and the tree can be viewed in all dimensions concurrently. This makes some operations re. language simpler (e.g. finding translations is just a slice range), but the idea is that it should also be relatively inexpensive to add more dimensions if needed (e.g. role). Fixes #10169 Fixes #10364 Fixes #10482 Fixes #10630 Fixes #10656 Fixes #10694 Fixes #10918 Fixes #11262 Fixes #11439 Fixes #11453 Fixes #11457 Fixes #11466 Fixes #11540 Fixes #11551 Fixes #11556 Fixes #11654 Fixes #11661 Fixes #11663 Fixes #11664 Fixes #11669 Fixes #11671 Fixes #11807 Fixes #11808 Fixes #11809 Fixes #11815 Fixes #11840 Fixes #11853 Fixes #11860 Fixes #11883 Fixes #11904 Fixes #7388 Fixes #7425 Fixes #7436 Fixes #7544 Fixes #7882 Fixes #7960 Fixes #8255 Fixes #8307 Fixes #8863 Fixes #8927 Fixes #9192 Fixes #9324
452 lines
9.8 KiB
Go
452 lines
9.8 KiB
Go
// Copyright 2019 The Hugo Authors. All rights reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package page
|
|
|
|
import (
|
|
"path"
|
|
"path/filepath"
|
|
"strings"
|
|
"sync"
|
|
|
|
"github.com/gohugoio/hugo/common/paths"
|
|
"github.com/gohugoio/hugo/common/urls"
|
|
"github.com/gohugoio/hugo/helpers"
|
|
"github.com/gohugoio/hugo/output"
|
|
"github.com/gohugoio/hugo/resources/kinds"
|
|
)
|
|
|
|
const slash = "/"
|
|
|
|
// TargetPathDescriptor describes how a file path for a given resource
|
|
// should look like on the file system. The same descriptor is then later used to
|
|
// create both the permalinks and the relative links, paginator URLs etc.
|
|
//
|
|
// The big motivating behind this is to have only one source of truth for URLs,
|
|
// and by that also get rid of most of the fragile string parsing/encoding etc.
|
|
|
|
type TargetPathDescriptor struct {
|
|
PathSpec *helpers.PathSpec
|
|
|
|
Type output.Format
|
|
Kind string
|
|
|
|
Path *paths.Path
|
|
Section *paths.Path
|
|
|
|
// For regular content pages this is either
|
|
// 1) the Slug, if set,
|
|
// 2) the file base name (TranslationBaseName).
|
|
BaseName string
|
|
|
|
// Typically a language prefix added to file paths.
|
|
PrefixFilePath string
|
|
|
|
// Typically a language prefix added to links.
|
|
PrefixLink string
|
|
|
|
// If in multihost mode etc., every link/path needs to be prefixed, even
|
|
// if set in URL.
|
|
ForcePrefix bool
|
|
|
|
// URL from front matter if set. Will override any Slug etc.
|
|
URL string
|
|
|
|
// Used to create paginator links.
|
|
Addends string
|
|
|
|
// The expanded permalink if defined for the section, ready to use.
|
|
ExpandedPermalink string
|
|
|
|
// Some types cannot have uglyURLs, even if globally enabled, RSS being one example.
|
|
UglyURLs bool
|
|
}
|
|
|
|
// TODO(bep) move this type.
|
|
type TargetPaths struct {
|
|
// Where to store the file on disk relative to the publish dir. OS slashes.
|
|
TargetFilename string
|
|
|
|
// The directory to write sub-resources of the above.
|
|
SubResourceBaseTarget string
|
|
|
|
// The base for creating links to sub-resources of the above.
|
|
SubResourceBaseLink string
|
|
|
|
// The relative permalink to this resources. Unix slashes.
|
|
Link string
|
|
}
|
|
|
|
func (p TargetPaths) RelPermalink(s *helpers.PathSpec) string {
|
|
return s.PrependBasePath(p.Link, false)
|
|
}
|
|
|
|
func (p TargetPaths) PermalinkForOutputFormat(s *helpers.PathSpec, f output.Format) string {
|
|
var baseURL urls.BaseURL
|
|
var err error
|
|
if f.Protocol != "" {
|
|
baseURL, err = s.Cfg.BaseURL().WithProtocol(f.Protocol)
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
} else {
|
|
baseURL = s.Cfg.BaseURL()
|
|
}
|
|
baseURLstr := baseURL.String()
|
|
return s.PermalinkForBaseURL(p.Link, baseURLstr)
|
|
}
|
|
|
|
func CreateTargetPaths(d TargetPathDescriptor) (tp TargetPaths) {
|
|
// Normalize all file Windows paths to simplify what's next.
|
|
if helpers.FilePathSeparator != "/" {
|
|
d.PrefixFilePath = filepath.ToSlash(d.PrefixFilePath)
|
|
}
|
|
|
|
if !d.Type.Root && d.URL != "" && !strings.HasPrefix(d.URL, "/") {
|
|
// Treat this as a context relative URL
|
|
d.ForcePrefix = true
|
|
}
|
|
|
|
if d.URL != "" {
|
|
d.URL = filepath.ToSlash(d.URL)
|
|
if strings.Contains(d.URL, "..") {
|
|
d.URL = path.Join("/", d.URL)
|
|
}
|
|
}
|
|
|
|
if d.Type.Root && !d.ForcePrefix {
|
|
d.PrefixFilePath = ""
|
|
d.PrefixLink = ""
|
|
}
|
|
|
|
pb := getPagePathBuilder(d)
|
|
defer putPagePathBuilder(pb)
|
|
|
|
pb.fullSuffix = d.Type.MediaType.FirstSuffix.FullSuffix
|
|
|
|
// The top level index files, i.e. the home page etc., needs
|
|
// the index base even when uglyURLs is enabled.
|
|
needsBase := true
|
|
|
|
pb.isUgly = (d.UglyURLs || d.Type.Ugly) && !d.Type.NoUgly
|
|
pb.baseNameSameAsType = !d.Path.IsBundle() && d.BaseName != "" && d.BaseName == d.Type.BaseName
|
|
|
|
if d.ExpandedPermalink == "" && pb.baseNameSameAsType {
|
|
pb.isUgly = true
|
|
}
|
|
|
|
if d.Type == output.HTTPStatusHTMLFormat || d.Type == output.SitemapFormat || d.Type == output.RobotsTxtFormat {
|
|
pb.noSubResources = true
|
|
} else if d.Kind != kinds.KindPage && d.URL == "" && d.Section.Base() != "/" {
|
|
if d.ExpandedPermalink != "" {
|
|
pb.Add(d.ExpandedPermalink)
|
|
} else {
|
|
pb.Add(d.Section.Base())
|
|
}
|
|
needsBase = false
|
|
}
|
|
|
|
if d.Type.Path != "" {
|
|
pb.Add(d.Type.Path)
|
|
}
|
|
|
|
if d.Kind != kinds.KindHome && d.URL != "" {
|
|
pb.Add(paths.FieldsSlash(d.URL)...)
|
|
|
|
if d.Addends != "" {
|
|
pb.Add(d.Addends)
|
|
}
|
|
|
|
hasDot := strings.Contains(d.URL, ".")
|
|
hasSlash := strings.HasSuffix(d.URL, "/")
|
|
|
|
if hasSlash || !hasDot {
|
|
pb.Add(d.Type.BaseName + pb.fullSuffix)
|
|
} else if hasDot {
|
|
pb.fullSuffix = paths.Ext(d.URL)
|
|
}
|
|
|
|
if pb.IsHtmlIndex() {
|
|
pb.linkUpperOffset = 1
|
|
}
|
|
|
|
if d.ForcePrefix {
|
|
|
|
// Prepend language prefix if not already set in URL
|
|
if d.PrefixFilePath != "" && !strings.HasPrefix(d.URL, "/"+d.PrefixFilePath) {
|
|
pb.prefixPath = d.PrefixFilePath
|
|
}
|
|
|
|
if d.PrefixLink != "" && !strings.HasPrefix(d.URL, "/"+d.PrefixLink) {
|
|
pb.prefixLink = d.PrefixLink
|
|
}
|
|
}
|
|
} else if !kinds.IsBranch(d.Kind) {
|
|
if d.ExpandedPermalink != "" {
|
|
pb.Add(d.ExpandedPermalink)
|
|
} else {
|
|
if dir := d.Path.ContainerDir(); dir != "" {
|
|
pb.Add(dir)
|
|
}
|
|
if d.BaseName != "" {
|
|
pb.Add(d.BaseName)
|
|
} else {
|
|
pb.Add(d.Path.BaseNameNoIdentifier())
|
|
}
|
|
}
|
|
|
|
if d.Addends != "" {
|
|
pb.Add(d.Addends)
|
|
}
|
|
|
|
if pb.isUgly {
|
|
pb.ConcatLast(pb.fullSuffix)
|
|
} else {
|
|
pb.Add(d.Type.BaseName + pb.fullSuffix)
|
|
}
|
|
|
|
if pb.IsHtmlIndex() {
|
|
pb.linkUpperOffset = 1
|
|
}
|
|
|
|
if d.PrefixFilePath != "" {
|
|
pb.prefixPath = d.PrefixFilePath
|
|
}
|
|
|
|
if d.PrefixLink != "" {
|
|
pb.prefixLink = d.PrefixLink
|
|
}
|
|
} else {
|
|
if d.Addends != "" {
|
|
pb.Add(d.Addends)
|
|
}
|
|
|
|
needsBase = needsBase && d.Addends == ""
|
|
|
|
if needsBase || !pb.isUgly {
|
|
pb.Add(d.Type.BaseName + pb.fullSuffix)
|
|
} else {
|
|
pb.ConcatLast(pb.fullSuffix)
|
|
}
|
|
|
|
if pb.IsHtmlIndex() {
|
|
pb.linkUpperOffset = 1
|
|
}
|
|
|
|
if d.PrefixFilePath != "" {
|
|
pb.prefixPath = d.PrefixFilePath
|
|
}
|
|
|
|
if d.PrefixLink != "" {
|
|
pb.prefixLink = d.PrefixLink
|
|
}
|
|
}
|
|
|
|
// if page URL is explicitly set in frontmatter,
|
|
// preserve its value without sanitization
|
|
if d.Kind != kinds.KindPage || d.URL == "" {
|
|
// Note: MakePathSanitized will lower case the path if
|
|
// disablePathToLower isn't set.
|
|
pb.Sanitize()
|
|
}
|
|
|
|
link := pb.Link()
|
|
pagePath := pb.PathFile()
|
|
|
|
tp.TargetFilename = filepath.FromSlash(pagePath)
|
|
if !pb.noSubResources {
|
|
tp.SubResourceBaseTarget = pb.PathDir()
|
|
tp.SubResourceBaseLink = pb.LinkDir()
|
|
}
|
|
if d.URL != "" {
|
|
tp.Link = paths.URLEscape(link)
|
|
} else {
|
|
// This is slightly faster for when we know we don't have any
|
|
// query or scheme etc.
|
|
tp.Link = paths.PathEscape(link)
|
|
}
|
|
if tp.Link == "" {
|
|
tp.Link = "/"
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
// When adding state here, remember to update putPagePathBuilder.
|
|
type pagePathBuilder struct {
|
|
els []string
|
|
|
|
d TargetPathDescriptor
|
|
|
|
// Builder state.
|
|
isUgly bool
|
|
baseNameSameAsType bool
|
|
noSubResources bool
|
|
fullSuffix string // File suffix including any ".".
|
|
prefixLink string
|
|
prefixPath string
|
|
linkUpperOffset int
|
|
}
|
|
|
|
func (p *pagePathBuilder) Add(el ...string) {
|
|
// Filter empty and slashes.
|
|
n := 0
|
|
for _, e := range el {
|
|
if e != "" && e != slash {
|
|
el[n] = e
|
|
n++
|
|
}
|
|
}
|
|
el = el[:n]
|
|
|
|
p.els = append(p.els, el...)
|
|
}
|
|
|
|
func (p *pagePathBuilder) ConcatLast(s string) {
|
|
if len(p.els) == 0 {
|
|
p.Add(s)
|
|
return
|
|
}
|
|
old := p.els[len(p.els)-1]
|
|
if old == "" {
|
|
p.els[len(p.els)-1] = s
|
|
return
|
|
}
|
|
if old[len(old)-1] == '/' {
|
|
old = old[:len(old)-1]
|
|
}
|
|
p.els[len(p.els)-1] = old + s
|
|
}
|
|
|
|
func (p *pagePathBuilder) IsHtmlIndex() bool {
|
|
return p.Last() == "index.html"
|
|
}
|
|
|
|
func (p *pagePathBuilder) Last() string {
|
|
if p.els == nil {
|
|
return ""
|
|
}
|
|
return p.els[len(p.els)-1]
|
|
}
|
|
|
|
func (p *pagePathBuilder) Link() string {
|
|
link := p.Path(p.linkUpperOffset)
|
|
|
|
if p.baseNameSameAsType {
|
|
link = strings.TrimSuffix(link, p.d.BaseName)
|
|
}
|
|
|
|
if p.prefixLink != "" {
|
|
link = "/" + p.prefixLink + link
|
|
}
|
|
|
|
if p.linkUpperOffset > 0 && !strings.HasSuffix(link, "/") {
|
|
link += "/"
|
|
}
|
|
|
|
return link
|
|
}
|
|
|
|
func (p *pagePathBuilder) LinkDir() string {
|
|
if p.noSubResources {
|
|
return ""
|
|
}
|
|
|
|
pathDir := p.PathDirBase()
|
|
|
|
if p.prefixLink != "" {
|
|
pathDir = "/" + p.prefixLink + pathDir
|
|
}
|
|
|
|
return pathDir
|
|
}
|
|
|
|
func (p *pagePathBuilder) Path(upperOffset int) string {
|
|
upper := len(p.els)
|
|
if upperOffset > 0 {
|
|
upper -= upperOffset
|
|
}
|
|
pth := path.Join(p.els[:upper]...)
|
|
return paths.AddLeadingSlash(pth)
|
|
}
|
|
|
|
func (p *pagePathBuilder) PathDir() string {
|
|
dir := p.PathDirBase()
|
|
if p.prefixPath != "" {
|
|
dir = "/" + p.prefixPath + dir
|
|
}
|
|
return dir
|
|
}
|
|
|
|
func (p *pagePathBuilder) PathDirBase() string {
|
|
if p.noSubResources {
|
|
return ""
|
|
}
|
|
|
|
dir := p.Path(0)
|
|
isIndex := strings.HasPrefix(p.Last(), p.d.Type.BaseName+".")
|
|
|
|
if isIndex {
|
|
dir = paths.Dir(dir)
|
|
} else {
|
|
dir = strings.TrimSuffix(dir, p.fullSuffix)
|
|
}
|
|
|
|
if dir == "/" {
|
|
dir = ""
|
|
}
|
|
|
|
return dir
|
|
}
|
|
|
|
func (p *pagePathBuilder) PathFile() string {
|
|
dir := p.Path(0)
|
|
if p.prefixPath != "" {
|
|
dir = "/" + p.prefixPath + dir
|
|
}
|
|
return dir
|
|
}
|
|
|
|
func (p *pagePathBuilder) Prepend(el ...string) {
|
|
p.els = append(p.els[:0], append(el, p.els[0:]...)...)
|
|
}
|
|
|
|
func (p *pagePathBuilder) Sanitize() {
|
|
for i, el := range p.els {
|
|
p.els[i] = p.d.PathSpec.MakePathSanitized(el)
|
|
}
|
|
}
|
|
|
|
var pagePathBuilderPool = &sync.Pool{
|
|
New: func() any {
|
|
return &pagePathBuilder{}
|
|
},
|
|
}
|
|
|
|
func getPagePathBuilder(d TargetPathDescriptor) *pagePathBuilder {
|
|
b := pagePathBuilderPool.Get().(*pagePathBuilder)
|
|
b.d = d
|
|
return b
|
|
}
|
|
|
|
func putPagePathBuilder(b *pagePathBuilder) {
|
|
b.els = b.els[:0]
|
|
b.fullSuffix = ""
|
|
b.baseNameSameAsType = false
|
|
b.isUgly = false
|
|
b.noSubResources = false
|
|
b.prefixLink = ""
|
|
b.prefixPath = ""
|
|
b.linkUpperOffset = 0
|
|
pagePathBuilderPool.Put(b)
|
|
}
|