mirror of
https://github.com/gohugoio/hugo.git
synced 2024-11-29 22:40:03 -05:00
3cdf19e9b7
This commit is not the smallest in Hugo's history. Some hightlights include: * Page bundles (for complete articles, keeping images and content together etc.). * Bundled images can be processed in as many versions/sizes as you need with the three methods `Resize`, `Fill` and `Fit`. * Processed images are cached inside `resources/_gen/images` (default) in your project. * Symbolic links (both files and dirs) are now allowed anywhere inside /content * A new table based build summary * The "Total in nn ms" now reports the total including the handling of the files inside /static. So if it now reports more than you're used to, it is just **more real** and probably faster than before (see below). A site building benchmark run compared to `v0.31.1` shows that this should be slightly faster and use less memory: ```bash ▶ ./benchSite.sh "TOML,num_langs=.*,num_root_sections=5,num_pages=(500|1000),tags_per_page=5,shortcodes,render" benchmark old ns/op new ns/op delta BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 101785785 78067944 -23.30% BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 185481057 149159919 -19.58% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 103149918 85679409 -16.94% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 203515478 169208775 -16.86% benchmark old allocs new allocs delta BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 532464 391539 -26.47% BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 1056549 772702 -26.87% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 555974 406630 -26.86% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 1086545 789922 -27.30% benchmark old bytes new bytes delta BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 53243246 43598155 -18.12% BenchmarkSiteBuilding/TOML,num_langs=1,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 105811617 86087116 -18.64% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=500,tags_per_page=5,shortcodes,render-4 54558852 44545097 -18.35% BenchmarkSiteBuilding/TOML,num_langs=3,num_root_sections=5,num_pages=1000,tags_per_page=5,shortcodes,render-4 106903858 86978413 -18.64% ``` Fixes #3651 Closes #3158 Fixes #1014 Closes #2021 Fixes #1240 Updates #3757
194 lines
5.2 KiB
Go
194 lines
5.2 KiB
Go
// Copyright 2016 The Hugo Authors. All rights reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package hugolib
|
|
|
|
import (
|
|
"path"
|
|
"path/filepath"
|
|
|
|
"github.com/gohugoio/hugo/cache"
|
|
)
|
|
|
|
// PageCollections contains the page collections for a site.
|
|
type PageCollections struct {
|
|
// Includes only pages of all types, and only pages in the current language.
|
|
Pages Pages
|
|
|
|
// Includes all pages in all languages, including the current one.
|
|
// Includes pages of all types.
|
|
AllPages Pages
|
|
|
|
// A convenience cache for the traditional index types, taxonomies, home page etc.
|
|
// This is for the current language only.
|
|
indexPages Pages
|
|
|
|
// A convenience cache for the regular pages.
|
|
// This is for the current language only.
|
|
RegularPages Pages
|
|
|
|
// A convenience cache for the all the regular pages.
|
|
AllRegularPages Pages
|
|
|
|
// Includes absolute all pages (of all types), including drafts etc.
|
|
rawAllPages Pages
|
|
|
|
pageCache *cache.PartitionedLazyCache
|
|
}
|
|
|
|
func (c *PageCollections) refreshPageCaches() {
|
|
c.indexPages = c.findPagesByKindNotIn(KindPage, c.Pages)
|
|
c.RegularPages = c.findPagesByKindIn(KindPage, c.Pages)
|
|
c.AllRegularPages = c.findPagesByKindIn(KindPage, c.AllPages)
|
|
|
|
cacheLoader := func(kind string) func() (map[string]interface{}, error) {
|
|
return func() (map[string]interface{}, error) {
|
|
cache := make(map[string]interface{})
|
|
switch kind {
|
|
case KindPage:
|
|
// Note that we deliberately use the pages from all sites
|
|
// in this cache, as we intend to use this in the ref and relref
|
|
// shortcodes. If the user says "sect/doc1.en.md", he/she knows
|
|
// what he/she is looking for.
|
|
for _, p := range c.AllRegularPages {
|
|
cache[filepath.ToSlash(p.Source.Path())] = p
|
|
// Ref/Relref supports this potentially ambiguous lookup.
|
|
cache[p.Source.LogicalName()] = p
|
|
}
|
|
default:
|
|
for _, p := range c.indexPages {
|
|
key := path.Join(p.sections...)
|
|
cache[key] = p
|
|
}
|
|
}
|
|
|
|
return cache, nil
|
|
}
|
|
}
|
|
|
|
partitions := make([]cache.Partition, len(allKindsInPages))
|
|
|
|
for i, kind := range allKindsInPages {
|
|
partitions[i] = cache.Partition{Key: kind, Load: cacheLoader(kind)}
|
|
}
|
|
|
|
c.pageCache = cache.NewPartitionedLazyCache(partitions...)
|
|
}
|
|
|
|
func newPageCollections() *PageCollections {
|
|
return &PageCollections{}
|
|
}
|
|
|
|
func newPageCollectionsFromPages(pages Pages) *PageCollections {
|
|
return &PageCollections{rawAllPages: pages}
|
|
}
|
|
|
|
func (c *PageCollections) getPage(typ string, sections ...string) *Page {
|
|
var key string
|
|
if len(sections) == 1 {
|
|
key = filepath.ToSlash(sections[0])
|
|
} else {
|
|
key = path.Join(sections...)
|
|
}
|
|
|
|
p, _ := c.pageCache.Get(typ, key)
|
|
if p == nil {
|
|
return nil
|
|
}
|
|
return p.(*Page)
|
|
|
|
}
|
|
|
|
func (*PageCollections) findPagesByKindIn(kind string, inPages Pages) Pages {
|
|
var pages Pages
|
|
for _, p := range inPages {
|
|
if p.Kind == kind {
|
|
pages = append(pages, p)
|
|
}
|
|
}
|
|
return pages
|
|
}
|
|
|
|
func (*PageCollections) findPagesByKindNotIn(kind string, inPages Pages) Pages {
|
|
var pages Pages
|
|
for _, p := range inPages {
|
|
if p.Kind != kind {
|
|
pages = append(pages, p)
|
|
}
|
|
}
|
|
return pages
|
|
}
|
|
|
|
func (c *PageCollections) findPagesByKind(kind string) Pages {
|
|
return c.findPagesByKindIn(kind, c.Pages)
|
|
}
|
|
|
|
func (c *PageCollections) addPage(page *Page) {
|
|
c.rawAllPages = append(c.rawAllPages, page)
|
|
}
|
|
|
|
// When we get a REMOVE event we're not always getting all the individual files,
|
|
// so we need to remove all below a given path.
|
|
func (c *PageCollections) removePageByPathPrefix(path string) {
|
|
for {
|
|
i := c.rawAllPages.findFirstPagePosByFilePathPrefix(path)
|
|
if i == -1 {
|
|
break
|
|
}
|
|
c.rawAllPages = append(c.rawAllPages[:i], c.rawAllPages[i+1:]...)
|
|
}
|
|
}
|
|
|
|
func (c *PageCollections) removePageByPath(path string) {
|
|
if i := c.rawAllPages.findPagePosByFilePath(path); i >= 0 {
|
|
c.clearResourceCacheForPage(c.rawAllPages[i])
|
|
c.rawAllPages = append(c.rawAllPages[:i], c.rawAllPages[i+1:]...)
|
|
}
|
|
|
|
}
|
|
|
|
func (c *PageCollections) removePage(page *Page) {
|
|
if i := c.rawAllPages.findPagePos(page); i >= 0 {
|
|
c.clearResourceCacheForPage(c.rawAllPages[i])
|
|
c.rawAllPages = append(c.rawAllPages[:i], c.rawAllPages[i+1:]...)
|
|
}
|
|
|
|
}
|
|
|
|
func (c *PageCollections) findPagesByShortcode(shortcode string) Pages {
|
|
var pages Pages
|
|
|
|
for _, p := range c.rawAllPages {
|
|
if p.shortcodeState != nil {
|
|
if _, ok := p.shortcodeState.nameSet[shortcode]; ok {
|
|
pages = append(pages, p)
|
|
}
|
|
}
|
|
}
|
|
return pages
|
|
}
|
|
|
|
func (c *PageCollections) replacePage(page *Page) {
|
|
// will find existing page that matches filepath and remove it
|
|
c.removePage(page)
|
|
c.addPage(page)
|
|
}
|
|
|
|
func (c *PageCollections) clearResourceCacheForPage(page *Page) {
|
|
if len(page.Resources) > 0 {
|
|
first := page.Resources[0]
|
|
dir := path.Dir(first.RelPermalink())
|
|
// This is done to keep the memory usage in check when doing live reloads.
|
|
page.s.resourceSpec.DeleteCacheByPrefix(dir)
|
|
}
|
|
}
|