2019-01-02 06:33:26 -05:00
|
|
|
// Copyright 2019 The Hugo Authors. All rights reserved.
|
2014-10-16 20:20:09 -04:00
|
|
|
//
|
2015-11-23 22:16:36 -05:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
2014-10-16 20:20:09 -04:00
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
2015-11-23 22:16:36 -05:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2014-10-16 20:20:09 -04:00
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2014-12-26 10:07:03 -05:00
|
|
|
// Package helpers implements general utility functions that work with
|
|
|
|
// and on content. The helper functions defined here lay down the
|
|
|
|
// foundation of how Hugo works with files and filepaths, and perform
|
|
|
|
// string operations on content.
|
2014-10-16 20:20:09 -04:00
|
|
|
package helpers
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"html/template"
|
2020-12-02 07:23:25 -05:00
|
|
|
"strings"
|
2016-08-16 16:50:15 -04:00
|
|
|
"unicode"
|
2015-09-03 06:22:20 -04:00
|
|
|
"unicode/utf8"
|
2014-10-16 20:20:09 -04:00
|
|
|
|
2021-12-12 06:11:11 -05:00
|
|
|
"github.com/gohugoio/hugo/common/hexec"
|
2019-08-16 09:55:03 -04:00
|
|
|
"github.com/gohugoio/hugo/common/loggers"
|
|
|
|
|
2019-11-27 07:42:36 -05:00
|
|
|
"github.com/spf13/afero"
|
|
|
|
|
2019-08-16 09:55:03 -04:00
|
|
|
"github.com/gohugoio/hugo/markup/converter"
|
2022-02-17 07:04:00 -05:00
|
|
|
"github.com/gohugoio/hugo/markup/converter/hooks"
|
2019-08-16 09:55:03 -04:00
|
|
|
|
|
|
|
"github.com/gohugoio/hugo/markup"
|
Add support for theme composition and inheritance
This commit adds support for theme composition and inheritance in Hugo.
With this, it helps thinking about a theme as a set of ordered components:
```toml
theme = ["my-shortcodes", "base-theme", "hyde"]
```
The theme definition example above in `config.toml` creates a theme with the 3 components with presedence from left to right.
So, Hugo will, for any given file, data entry etc., look first in the project, and then in `my-shortcode`, `base-theme` and lastly `hyde`.
Hugo uses two different algorithms to merge the filesystems, depending on the file type:
* For `i18n` and `data` files, Hugo merges deeply using the translation id and data key inside the files.
* For `static`, `layouts` (templates) and `archetypes` files, these are merged on file level. So the left-most file will be chosen.
The name used in the `theme` definition above must match a folder in `/your-site/themes`, e.g. `/your-site/themes/my-shortcodes`. There are plans to improve on this and get a URL scheme so this can be resolved automatically.
Also note that a component that is part of a theme can have its own configuration file, e.g. `config.toml`. There are currently some restrictions to what a theme component can configure:
* `params` (global and per language)
* `menu` (global and per language)
* `outputformats` and `mediatypes`
The same rules apply here: The left-most param/menu etc. with the same ID will win. There are some hidden and experimental namespace support in the above, which we will work to improve in the future, but theme authors are encouraged to create their own namespaces to avoid naming conflicts.
A final note: Themes/components can also have a `theme` definition in their `config.toml` and similar, which is the "inheritance" part of this commit's title. This is currently not supported by the Hugo theme site. We will have to wait for some "auto dependency" feature to be implemented for that to happen, but this can be a powerful feature if you want to create your own theme-variant based on others.
Fixes #4460
Fixes #4450
2018-03-01 09:01:25 -05:00
|
|
|
|
2017-06-13 13:07:35 -04:00
|
|
|
"github.com/gohugoio/hugo/config"
|
2014-10-16 20:20:09 -04:00
|
|
|
)
|
|
|
|
|
2019-04-05 13:11:04 -04:00
|
|
|
var (
|
|
|
|
openingPTag = []byte("<p>")
|
|
|
|
closingPTag = []byte("</p>")
|
|
|
|
paragraphIndicator = []byte("<p")
|
2020-03-27 12:36:50 -04:00
|
|
|
closingIndicator = []byte("</")
|
2019-04-05 13:11:04 -04:00
|
|
|
)
|
|
|
|
|
2017-08-02 08:25:05 -04:00
|
|
|
// ContentSpec provides functionality to render markdown content.
|
2017-02-04 22:20:06 -05:00
|
|
|
type ContentSpec struct {
|
2020-01-04 05:28:19 -05:00
|
|
|
Converters markup.ConverterProvider
|
|
|
|
anchorNameSanitizer converter.AnchorNameSanitizer
|
2022-03-17 17:03:27 -04:00
|
|
|
getRenderer func(t hooks.RendererType, id any) any
|
2019-08-16 09:55:03 -04:00
|
|
|
|
2023-01-04 12:24:36 -05:00
|
|
|
Cfg config.AllProvider
|
2017-02-04 22:20:06 -05:00
|
|
|
}
|
|
|
|
|
2017-08-02 08:25:05 -04:00
|
|
|
// NewContentSpec returns a ContentSpec initialized
|
|
|
|
// with the appropriate fields from the given config.Provider.
|
2023-01-04 12:24:36 -05:00
|
|
|
func NewContentSpec(cfg config.AllProvider, logger loggers.Logger, contentFs afero.Fs, ex *hexec.Exec) (*ContentSpec, error) {
|
2017-09-25 02:59:02 -04:00
|
|
|
spec := &ContentSpec{
|
2019-01-02 06:33:26 -05:00
|
|
|
Cfg: cfg,
|
2017-04-06 13:37:41 -04:00
|
|
|
}
|
2017-09-25 02:59:02 -04:00
|
|
|
|
2019-08-16 09:55:03 -04:00
|
|
|
converterProvider, err := markup.NewConverterProvider(converter.ProviderConfig{
|
2023-01-04 12:24:36 -05:00
|
|
|
Conf: cfg,
|
2019-08-16 09:55:03 -04:00
|
|
|
ContentFs: contentFs,
|
|
|
|
Logger: logger,
|
2021-12-12 06:11:11 -05:00
|
|
|
Exec: ex,
|
2019-08-16 09:55:03 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-11-03 14:09:34 -05:00
|
|
|
}
|
|
|
|
|
2019-08-16 09:55:03 -04:00
|
|
|
spec.Converters = converterProvider
|
|
|
|
p := converterProvider.Get("markdown")
|
|
|
|
conv, err := p.New(converter.DocumentContext{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-11-03 14:09:34 -05:00
|
|
|
}
|
2020-01-04 05:28:19 -05:00
|
|
|
if as, ok := conv.(converter.AnchorNameSanitizer); ok {
|
|
|
|
spec.anchorNameSanitizer = as
|
|
|
|
} else {
|
|
|
|
// Use Goldmark's sanitizer
|
|
|
|
p := converterProvider.Get("goldmark")
|
|
|
|
conv, err := p.New(converter.DocumentContext{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
spec.anchorNameSanitizer = conv.(converter.AnchorNameSanitizer)
|
|
|
|
}
|
2015-11-03 14:09:34 -05:00
|
|
|
|
2019-08-16 09:55:03 -04:00
|
|
|
return spec, nil
|
2015-01-25 06:08:02 -05:00
|
|
|
}
|
|
|
|
|
2016-03-14 12:27:15 -04:00
|
|
|
// stripEmptyNav strips out empty <nav> tags from content.
|
|
|
|
func stripEmptyNav(in []byte) []byte {
|
2014-10-16 20:20:09 -04:00
|
|
|
return bytes.Replace(in, []byte("<nav>\n</nav>\n\n"), []byte(``), -1)
|
|
|
|
}
|
|
|
|
|
2014-12-26 10:07:03 -05:00
|
|
|
// BytesToHTML converts bytes to type template.HTML.
|
2014-10-16 20:20:09 -04:00
|
|
|
func BytesToHTML(b []byte) template.HTML {
|
|
|
|
return template.HTML(string(b))
|
|
|
|
}
|
|
|
|
|
2014-12-26 10:07:03 -05:00
|
|
|
// ExtractTOC extracts Table of Contents from content.
|
2014-10-16 20:20:09 -04:00
|
|
|
func ExtractTOC(content []byte) (newcontent []byte, toc []byte) {
|
2018-04-19 12:06:40 -04:00
|
|
|
if !bytes.Contains(content, []byte("<nav>")) {
|
|
|
|
return content, nil
|
|
|
|
}
|
2014-10-16 20:20:09 -04:00
|
|
|
origContent := make([]byte, len(content))
|
|
|
|
copy(origContent, content)
|
|
|
|
first := []byte(`<nav>
|
|
|
|
<ul>`)
|
|
|
|
|
|
|
|
last := []byte(`</ul>
|
|
|
|
</nav>`)
|
|
|
|
|
|
|
|
replacement := []byte(`<nav id="TableOfContents">
|
|
|
|
<ul>`)
|
|
|
|
|
|
|
|
startOfTOC := bytes.Index(content, first)
|
|
|
|
|
|
|
|
peekEnd := len(content)
|
|
|
|
if peekEnd > 70+startOfTOC {
|
|
|
|
peekEnd = 70 + startOfTOC
|
|
|
|
}
|
|
|
|
|
|
|
|
if startOfTOC < 0 {
|
2016-03-14 12:27:15 -04:00
|
|
|
return stripEmptyNav(content), toc
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|
|
|
|
// Need to peek ahead to see if this nav element is actually the right one.
|
2014-10-29 01:08:31 -04:00
|
|
|
correctNav := bytes.Index(content[startOfTOC:peekEnd], []byte(`<li><a href="#`))
|
2014-10-16 20:20:09 -04:00
|
|
|
if correctNav < 0 { // no match found
|
|
|
|
return content, toc
|
|
|
|
}
|
|
|
|
lengthOfTOC := bytes.Index(content[startOfTOC:], last) + len(last)
|
|
|
|
endOfTOC := startOfTOC + lengthOfTOC
|
|
|
|
|
|
|
|
newcontent = append(content[:startOfTOC], content[endOfTOC:]...)
|
|
|
|
toc = append(replacement, origContent[startOfTOC+len(first):endOfTOC]...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-04 05:28:19 -05:00
|
|
|
func (c *ContentSpec) SanitizeAnchorName(s string) string {
|
|
|
|
return c.anchorNameSanitizer.SanitizeAnchorName(s)
|
|
|
|
}
|
|
|
|
|
2019-11-06 14:10:47 -05:00
|
|
|
func (c *ContentSpec) ResolveMarkup(in string) string {
|
2023-01-04 12:24:36 -05:00
|
|
|
if c == nil {
|
|
|
|
panic("nil ContentSpec")
|
|
|
|
}
|
2019-11-06 14:10:47 -05:00
|
|
|
in = strings.ToLower(in)
|
|
|
|
switch in {
|
|
|
|
case "md", "markdown", "mdown":
|
|
|
|
return "markdown"
|
|
|
|
case "html", "htm":
|
|
|
|
return "html"
|
|
|
|
default:
|
|
|
|
if conv := c.Converters.Get(in); conv != nil {
|
|
|
|
return conv.Name()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2016-08-17 00:37:19 -04:00
|
|
|
// TotalWords counts instance of one or more consecutive white space
|
|
|
|
// characters, as defined by unicode.IsSpace, in s.
|
|
|
|
// This is a cheaper way of word counting than the obvious len(strings.Fields(s)).
|
2014-10-16 20:20:09 -04:00
|
|
|
func TotalWords(s string) int {
|
2016-08-17 00:37:19 -04:00
|
|
|
n := 0
|
|
|
|
inWord := false
|
|
|
|
for _, r := range s {
|
|
|
|
wasInWord := inWord
|
|
|
|
inWord = !unicode.IsSpace(r)
|
|
|
|
if inWord && !wasInWord {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2016-02-05 12:40:49 -05:00
|
|
|
// TruncateWordsByRune truncates words by runes.
|
2018-04-27 04:17:01 -04:00
|
|
|
func (c *ContentSpec) TruncateWordsByRune(in []string) (string, bool) {
|
|
|
|
words := make([]string, len(in))
|
|
|
|
copy(words, in)
|
|
|
|
|
2015-09-03 06:22:20 -04:00
|
|
|
count := 0
|
2015-09-03 06:22:20 -04:00
|
|
|
for index, word := range words {
|
2023-01-04 12:24:36 -05:00
|
|
|
if count >= c.Cfg.SummaryLength() {
|
2015-09-03 06:22:20 -04:00
|
|
|
return strings.Join(words[:index], " "), true
|
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
runeCount := utf8.RuneCountInString(word)
|
|
|
|
if len(word) == runeCount {
|
2015-09-03 06:22:20 -04:00
|
|
|
count++
|
2023-01-04 12:24:36 -05:00
|
|
|
} else if count+runeCount < c.Cfg.SummaryLength() {
|
2015-09-03 06:22:20 -04:00
|
|
|
count += runeCount
|
2015-09-03 06:22:20 -04:00
|
|
|
} else {
|
2016-02-06 06:29:13 -05:00
|
|
|
for ri := range word {
|
2023-01-04 12:24:36 -05:00
|
|
|
if count >= c.Cfg.SummaryLength() {
|
2015-09-03 06:22:20 -04:00
|
|
|
truncatedWords := append(words[:index], word[:ri])
|
|
|
|
return strings.Join(truncatedWords, " "), true
|
2015-09-03 06:22:20 -04:00
|
|
|
}
|
2016-02-05 12:40:49 -05:00
|
|
|
count++
|
2015-09-03 06:22:20 -04:00
|
|
|
}
|
|
|
|
}
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
|
|
|
|
return strings.Join(words, " "), false
|
|
|
|
}
|
|
|
|
|
2016-08-16 16:50:15 -04:00
|
|
|
// TruncateWordsToWholeSentence takes content and truncates to whole sentence
|
|
|
|
// limited by max number of words. It also returns whether it is truncated.
|
2017-09-29 03:04:55 -04:00
|
|
|
func (c *ContentSpec) TruncateWordsToWholeSentence(s string) (string, bool) {
|
2016-08-16 16:50:15 -04:00
|
|
|
var (
|
|
|
|
wordCount = 0
|
|
|
|
lastWordIndex = -1
|
|
|
|
)
|
|
|
|
|
|
|
|
for i, r := range s {
|
|
|
|
if unicode.IsSpace(r) {
|
|
|
|
wordCount++
|
|
|
|
lastWordIndex = i
|
|
|
|
|
2023-01-04 12:24:36 -05:00
|
|
|
if wordCount >= c.Cfg.SummaryLength() {
|
2016-08-16 16:50:15 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if lastWordIndex == -1 {
|
|
|
|
return s, false
|
|
|
|
}
|
|
|
|
|
|
|
|
endIndex := -1
|
|
|
|
|
|
|
|
for j, r := range s[lastWordIndex:] {
|
|
|
|
if isEndOfSentence(r) {
|
|
|
|
endIndex = j + lastWordIndex + utf8.RuneLen(r)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if endIndex == -1 {
|
|
|
|
return s, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return strings.TrimSpace(s[:endIndex]), endIndex < len(s)
|
|
|
|
}
|
|
|
|
|
2019-04-05 13:11:04 -04:00
|
|
|
// TrimShortHTML removes the <p>/</p> tags from HTML input in the situation
|
|
|
|
// where said tags are the only <p> tags in the input and enclose the content
|
|
|
|
// of the input (whitespace excluded).
|
|
|
|
func (c *ContentSpec) TrimShortHTML(input []byte) []byte {
|
2020-03-27 12:36:50 -04:00
|
|
|
firstOpeningP := bytes.Index(input, paragraphIndicator)
|
|
|
|
lastOpeningP := bytes.LastIndex(input, paragraphIndicator)
|
|
|
|
|
|
|
|
lastClosingP := bytes.LastIndex(input, closingPTag)
|
|
|
|
lastClosing := bytes.LastIndex(input, closingIndicator)
|
|
|
|
|
|
|
|
if firstOpeningP == lastOpeningP && lastClosingP == lastClosing {
|
2019-04-05 13:11:04 -04:00
|
|
|
input = bytes.TrimSpace(input)
|
|
|
|
input = bytes.TrimPrefix(input, openingPTag)
|
|
|
|
input = bytes.TrimSuffix(input, closingPTag)
|
|
|
|
input = bytes.TrimSpace(input)
|
|
|
|
}
|
|
|
|
return input
|
|
|
|
}
|
|
|
|
|
2016-08-16 16:50:15 -04:00
|
|
|
func isEndOfSentence(r rune) bool {
|
|
|
|
return r == '.' || r == '?' || r == '!' || r == '"' || r == '\n'
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kept only for benchmark.
|
2017-09-29 03:04:55 -04:00
|
|
|
func (c *ContentSpec) truncateWordsToWholeSentenceOld(content string) (string, bool) {
|
2016-08-16 16:50:15 -04:00
|
|
|
words := strings.Fields(content)
|
|
|
|
|
2023-01-04 12:24:36 -05:00
|
|
|
if c.Cfg.SummaryLength() >= len(words) {
|
2015-09-03 06:22:20 -04:00
|
|
|
return strings.Join(words, " "), false
|
|
|
|
}
|
|
|
|
|
2023-01-04 12:24:36 -05:00
|
|
|
for counter, word := range words[c.Cfg.SummaryLength():] {
|
2015-09-03 06:22:20 -04:00
|
|
|
if strings.HasSuffix(word, ".") ||
|
|
|
|
strings.HasSuffix(word, "?") ||
|
|
|
|
strings.HasSuffix(word, ".\"") ||
|
|
|
|
strings.HasSuffix(word, "!") {
|
2023-01-04 12:24:36 -05:00
|
|
|
upper := c.Cfg.SummaryLength() + counter + 1
|
2015-09-03 06:22:20 -04:00
|
|
|
return strings.Join(words[:upper], " "), (upper < len(words))
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
|
2023-01-04 12:24:36 -05:00
|
|
|
return strings.Join(words[:c.Cfg.SummaryLength()], " "), true
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|