2018-10-18 04:21:23 -04:00
|
|
|
// Copyright 2018 The Hugo Authors. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package hugolib
|
|
|
|
|
|
|
|
import (
|
2018-10-21 06:20:21 -04:00
|
|
|
"bytes"
|
2018-10-18 04:21:23 -04:00
|
|
|
"io"
|
|
|
|
|
Move the emoji parsing to pageparser
This avoids double parsing the page content when `enableEmoji=true`.
This commit also adds some general improvements to the parser, making it in general much faster:
```bash
benchmark old ns/op new ns/op delta
BenchmarkShortcodeLexer-4 90258 101730 +12.71%
BenchmarkParse-4 148940 15037 -89.90%
benchmark old allocs new allocs delta
BenchmarkShortcodeLexer-4 456 700 +53.51%
BenchmarkParse-4 28 33 +17.86%
benchmark old bytes new bytes delta
BenchmarkShortcodeLexer-4 69875 81014 +15.94%
BenchmarkParse-4 8128 8304 +2.17%
```
Running some site benchmarks with Emoji support turned on:
```bash
benchmark old ns/op new ns/op delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 924556797 818115620 -11.51%
benchmark old allocs new allocs delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 4112613 4133787 +0.51%
benchmark old bytes new bytes delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 426982864 424363832 -0.61%
```
Fixes #5534
2018-12-17 15:03:23 -05:00
|
|
|
"github.com/gohugoio/hugo/helpers"
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
errors "github.com/pkg/errors"
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
bp "github.com/gohugoio/hugo/bufferpool"
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
"github.com/gohugoio/hugo/common/herrors"
|
2018-11-01 06:28:30 -04:00
|
|
|
"github.com/gohugoio/hugo/common/text"
|
2018-10-18 04:21:23 -04:00
|
|
|
"github.com/gohugoio/hugo/parser/metadecoders"
|
|
|
|
"github.com/gohugoio/hugo/parser/pageparser"
|
|
|
|
)
|
|
|
|
|
2018-10-19 05:30:57 -04:00
|
|
|
var (
|
2018-10-30 15:24:34 -04:00
|
|
|
internalSummaryDividerBase = "HUGOMORE42"
|
|
|
|
internalSummaryDividerBaseBytes = []byte(internalSummaryDividerBase)
|
|
|
|
internalSummaryDividerPre = []byte("\n\n" + internalSummaryDividerBase + "\n\n")
|
2018-10-19 05:30:57 -04:00
|
|
|
)
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// The content related items on a Page.
|
|
|
|
type pageContent struct {
|
|
|
|
renderable bool
|
|
|
|
|
|
|
|
// workContent is a copy of rawContent that may be mutated during site build.
|
|
|
|
workContent []byte
|
|
|
|
|
|
|
|
shortcodeState *shortcodeHandler
|
|
|
|
|
|
|
|
source rawPageContent
|
|
|
|
}
|
|
|
|
|
|
|
|
type rawPageContent struct {
|
2018-10-19 05:30:57 -04:00
|
|
|
hasSummaryDivider bool
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// The AST of the parsed page. Contains information about:
|
2018-10-23 08:37:09 -04:00
|
|
|
// shortcodes, front matter, summary indicators.
|
2018-10-18 04:21:23 -04:00
|
|
|
parsed pageparser.Result
|
2018-10-23 08:37:09 -04:00
|
|
|
|
|
|
|
// Returns the position in bytes after any front matter.
|
|
|
|
posMainContent int
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(bep) lazy consolidate
|
|
|
|
func (p *Page) mapContent() error {
|
|
|
|
p.shortcodeState = newShortcodeHandler(p)
|
|
|
|
s := p.shortcodeState
|
|
|
|
p.renderable = true
|
2018-10-23 08:37:09 -04:00
|
|
|
p.source.posMainContent = -1
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
result := bp.GetBuffer()
|
|
|
|
defer bp.PutBuffer(result)
|
|
|
|
|
|
|
|
iter := p.source.parsed.Iterator()
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
fail := func(err error, i pageparser.Item) error {
|
2018-11-01 05:39:44 -04:00
|
|
|
return p.parseError(err, iter.Input(), i.Pos)
|
2018-10-21 06:20:21 -04:00
|
|
|
}
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// the parser is guaranteed to return items in proper order or fail, so …
|
|
|
|
// … it's safe to keep some "global" state
|
|
|
|
var currShortcode shortcode
|
|
|
|
var ordinal int
|
|
|
|
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
it := iter.Next()
|
|
|
|
|
|
|
|
switch {
|
2018-10-19 05:30:57 -04:00
|
|
|
case it.Type == pageparser.TypeIgnore:
|
2018-10-23 08:37:09 -04:00
|
|
|
case it.Type == pageparser.TypeHTMLStart:
|
|
|
|
// This is HTML without front matter. It can still have shortcodes.
|
2018-10-18 04:21:23 -04:00
|
|
|
p.renderable = false
|
|
|
|
result.Write(it.Val)
|
|
|
|
case it.IsFrontMatter():
|
2018-10-19 05:30:57 -04:00
|
|
|
f := metadecoders.FormatFromFrontMatterType(it.Type)
|
2018-12-23 04:40:32 -05:00
|
|
|
m, err := metadecoders.Default.UnmarshalToMap(it.Val, f)
|
2018-10-18 04:21:23 -04:00
|
|
|
if err != nil {
|
2018-10-23 02:54:10 -04:00
|
|
|
if fe, ok := err.(herrors.FileError); ok {
|
|
|
|
return herrors.ToFileErrorWithOffset(fe, iter.LineNumber()-1)
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
if err := p.updateMetaData(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-10-23 08:37:09 -04:00
|
|
|
next := iter.Peek()
|
|
|
|
if !next.IsDone() {
|
|
|
|
p.source.posMainContent = next.Pos
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
if !p.shouldBuild() {
|
|
|
|
// Nothing more to do.
|
|
|
|
return nil
|
2018-10-19 05:30:57 -04:00
|
|
|
}
|
2018-10-18 04:21:23 -04:00
|
|
|
|
2018-10-23 08:37:09 -04:00
|
|
|
case it.Type == pageparser.TypeLeadSummaryDivider:
|
2018-10-30 15:24:34 -04:00
|
|
|
result.Write(internalSummaryDividerPre)
|
2018-10-19 05:30:57 -04:00
|
|
|
p.source.hasSummaryDivider = true
|
|
|
|
// Need to determine if the page is truncated.
|
|
|
|
f := func(item pageparser.Item) bool {
|
|
|
|
if item.IsNonWhitespace() {
|
|
|
|
p.truncated = true
|
|
|
|
|
|
|
|
// Done
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
2018-10-19 05:30:57 -04:00
|
|
|
iter.PeekWalk(f)
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
// Handle shortcode
|
|
|
|
case it.IsLeftShortcodeDelim():
|
|
|
|
// let extractShortcode handle left delim (will do so recursively)
|
|
|
|
iter.Backup()
|
|
|
|
|
|
|
|
currShortcode, err := s.extractShortcode(ordinal, iter, p)
|
|
|
|
|
|
|
|
if currShortcode.name != "" {
|
|
|
|
s.nameSet[currShortcode.name] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2018-10-21 06:20:21 -04:00
|
|
|
return fail(errors.Wrap(err, "failed to extract shortcode"), it)
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if currShortcode.params == nil {
|
|
|
|
currShortcode.params = make([]string, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
placeHolder := s.createShortcodePlaceholder()
|
|
|
|
result.WriteString(placeHolder)
|
|
|
|
ordinal++
|
|
|
|
s.shortcodes.Add(placeHolder, currShortcode)
|
Move the emoji parsing to pageparser
This avoids double parsing the page content when `enableEmoji=true`.
This commit also adds some general improvements to the parser, making it in general much faster:
```bash
benchmark old ns/op new ns/op delta
BenchmarkShortcodeLexer-4 90258 101730 +12.71%
BenchmarkParse-4 148940 15037 -89.90%
benchmark old allocs new allocs delta
BenchmarkShortcodeLexer-4 456 700 +53.51%
BenchmarkParse-4 28 33 +17.86%
benchmark old bytes new bytes delta
BenchmarkShortcodeLexer-4 69875 81014 +15.94%
BenchmarkParse-4 8128 8304 +2.17%
```
Running some site benchmarks with Emoji support turned on:
```bash
benchmark old ns/op new ns/op delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 924556797 818115620 -11.51%
benchmark old allocs new allocs delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 4112613 4133787 +0.51%
benchmark old bytes new bytes delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 426982864 424363832 -0.61%
```
Fixes #5534
2018-12-17 15:03:23 -05:00
|
|
|
case it.Type == pageparser.TypeEmoji:
|
|
|
|
if emoji := helpers.Emoji(it.ValStr()); emoji != nil {
|
|
|
|
result.Write(emoji)
|
|
|
|
} else {
|
|
|
|
result.Write(it.Val)
|
|
|
|
}
|
2018-10-18 04:21:23 -04:00
|
|
|
case it.IsEOF():
|
|
|
|
break Loop
|
|
|
|
case it.IsError():
|
2018-10-21 06:20:21 -04:00
|
|
|
err := fail(errors.WithStack(errors.New(it.ValStr())), it)
|
2018-10-18 04:21:23 -04:00
|
|
|
currShortcode.err = err
|
|
|
|
return err
|
2018-10-21 06:20:21 -04:00
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
default:
|
|
|
|
result.Write(it.Val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resultBytes := make([]byte, result.Len())
|
|
|
|
copy(resultBytes, result.Bytes())
|
|
|
|
p.workContent = resultBytes
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) parse(reader io.Reader) error {
|
|
|
|
|
Move the emoji parsing to pageparser
This avoids double parsing the page content when `enableEmoji=true`.
This commit also adds some general improvements to the parser, making it in general much faster:
```bash
benchmark old ns/op new ns/op delta
BenchmarkShortcodeLexer-4 90258 101730 +12.71%
BenchmarkParse-4 148940 15037 -89.90%
benchmark old allocs new allocs delta
BenchmarkShortcodeLexer-4 456 700 +53.51%
BenchmarkParse-4 28 33 +17.86%
benchmark old bytes new bytes delta
BenchmarkShortcodeLexer-4 69875 81014 +15.94%
BenchmarkParse-4 8128 8304 +2.17%
```
Running some site benchmarks with Emoji support turned on:
```bash
benchmark old ns/op new ns/op delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 924556797 818115620 -11.51%
benchmark old allocs new allocs delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 4112613 4133787 +0.51%
benchmark old bytes new bytes delta
BenchmarkSiteBuilding/TOML,num_langs=3,num_pages=5000,tags_per_page=5,shortcodes,render-4 426982864 424363832 -0.61%
```
Fixes #5534
2018-12-17 15:03:23 -05:00
|
|
|
parseResult, err := pageparser.Parse(
|
|
|
|
reader,
|
|
|
|
pageparser.Config{EnableEmoji: p.s.Cfg.GetBool("enableEmoji")},
|
|
|
|
)
|
2018-10-18 04:21:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
p.source = rawPageContent{
|
|
|
|
parsed: parseResult,
|
|
|
|
}
|
|
|
|
|
2018-10-20 13:09:03 -04:00
|
|
|
p.lang = p.File.Lang()
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
if p.s != nil && p.s.owner != nil {
|
|
|
|
gi, enabled := p.s.owner.gitInfo.forPage(p)
|
|
|
|
if gi != nil {
|
|
|
|
p.GitInfo = gi
|
|
|
|
} else if enabled {
|
Make WARN the new default log log level
This commit also pulls down the log level for a set of WARN statements to INFO. There should be no ERRORs or WARNINGs in a regular Hugo build. That is the story about the Boy Who Cried Wolf.
Since the WARN log is now more visible, this commit also improves on some of them, most notable the "layout not found", which now would look something like this:
```bash
WARN 2018/11/02 09:02:18 Found no layout for "home", language "en", output format "CSS": create a template below /layouts with one of these filenames: index.en.css.css, home.en.css.css, list.en.css.css, index.css.css, home.css.css, list.css.css, index.en.css, home.en.css, list.en.css, index.css, home.css, list.css, _default/index.en.css.css, _default/home.en.css.css, _default/list.en.css.css, _default/index.css.css, _default/home.css.css, _default/list.css.css, _default/index.en.css, _default/home.en.css, _default/list.en.css, _default/index.css, _default/home.css, _default/list.css
```
Fixes #5203
2018-11-01 17:27:42 -04:00
|
|
|
p.s.Log.INFO.Printf("Failed to find GitInfo for page %q", p.Path())
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-21 06:20:21 -04:00
|
|
|
|
2018-11-01 05:39:44 -04:00
|
|
|
func (p *Page) parseError(err error, input []byte, offset int) error {
|
2018-10-21 06:20:21 -04:00
|
|
|
if herrors.UnwrapFileError(err) != nil {
|
|
|
|
// Use the most specific location.
|
|
|
|
return err
|
|
|
|
}
|
2018-11-01 05:39:44 -04:00
|
|
|
pos := p.posFromInput(input, offset)
|
|
|
|
return herrors.NewFileError("md", -1, pos.LineNumber, pos.ColumnNumber, err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-11-01 06:28:30 -04:00
|
|
|
func (p *Page) posFromInput(input []byte, offset int) text.Position {
|
2018-10-21 06:20:21 -04:00
|
|
|
lf := []byte("\n")
|
2018-11-01 05:39:44 -04:00
|
|
|
input = input[:offset]
|
2018-10-21 06:20:21 -04:00
|
|
|
lineNumber := bytes.Count(input, lf) + 1
|
|
|
|
endOfLastLine := bytes.LastIndex(input, lf)
|
|
|
|
|
2018-11-01 06:28:30 -04:00
|
|
|
return text.Position{
|
2018-11-01 05:39:44 -04:00
|
|
|
Filename: p.pathOrTitle(),
|
|
|
|
LineNumber: lineNumber,
|
|
|
|
ColumnNumber: offset - endOfLastLine,
|
|
|
|
Offset: offset,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 06:28:30 -04:00
|
|
|
func (p *Page) posFromPage(offset int) text.Position {
|
2018-11-01 05:39:44 -04:00
|
|
|
return p.posFromInput(p.source.parsed.Input(), offset)
|
2018-10-21 06:20:21 -04:00
|
|
|
}
|