2018-10-18 04:21:23 -04:00
|
|
|
// Copyright 2018 The Hugo Authors. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package hugolib
|
|
|
|
|
|
|
|
import (
|
2018-10-21 06:20:21 -04:00
|
|
|
"bytes"
|
2018-10-18 04:21:23 -04:00
|
|
|
"io"
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
errors "github.com/pkg/errors"
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
bp "github.com/gohugoio/hugo/bufferpool"
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
"github.com/gohugoio/hugo/common/herrors"
|
2018-10-18 04:21:23 -04:00
|
|
|
"github.com/gohugoio/hugo/parser/metadecoders"
|
|
|
|
"github.com/gohugoio/hugo/parser/pageparser"
|
|
|
|
)
|
|
|
|
|
2018-10-19 05:30:57 -04:00
|
|
|
var (
|
|
|
|
internalSummaryDivider = []byte("HUGOMORE42")
|
|
|
|
)
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// The content related items on a Page.
|
|
|
|
type pageContent struct {
|
|
|
|
renderable bool
|
|
|
|
|
|
|
|
// workContent is a copy of rawContent that may be mutated during site build.
|
|
|
|
workContent []byte
|
|
|
|
|
|
|
|
shortcodeState *shortcodeHandler
|
|
|
|
|
|
|
|
source rawPageContent
|
|
|
|
}
|
|
|
|
|
|
|
|
type rawPageContent struct {
|
2018-10-19 05:30:57 -04:00
|
|
|
hasSummaryDivider bool
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// The AST of the parsed page. Contains information about:
|
2018-10-23 08:37:09 -04:00
|
|
|
// shortcodes, front matter, summary indicators.
|
2018-10-18 04:21:23 -04:00
|
|
|
parsed pageparser.Result
|
2018-10-23 08:37:09 -04:00
|
|
|
|
|
|
|
// Returns the position in bytes after any front matter.
|
|
|
|
posMainContent int
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(bep) lazy consolidate
|
|
|
|
func (p *Page) mapContent() error {
|
|
|
|
p.shortcodeState = newShortcodeHandler(p)
|
|
|
|
s := p.shortcodeState
|
|
|
|
p.renderable = true
|
2018-10-23 08:37:09 -04:00
|
|
|
p.source.posMainContent = -1
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
result := bp.GetBuffer()
|
|
|
|
defer bp.PutBuffer(result)
|
|
|
|
|
|
|
|
iter := p.source.parsed.Iterator()
|
|
|
|
|
2018-10-21 06:20:21 -04:00
|
|
|
fail := func(err error, i pageparser.Item) error {
|
|
|
|
return parseError(err, iter.Input(), i.Pos)
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
// the parser is guaranteed to return items in proper order or fail, so …
|
|
|
|
// … it's safe to keep some "global" state
|
|
|
|
var currShortcode shortcode
|
|
|
|
var ordinal int
|
|
|
|
|
|
|
|
Loop:
|
|
|
|
for {
|
|
|
|
it := iter.Next()
|
|
|
|
|
|
|
|
switch {
|
2018-10-19 05:30:57 -04:00
|
|
|
case it.Type == pageparser.TypeIgnore:
|
|
|
|
case it.Type == pageparser.TypeHTMLComment:
|
2018-10-18 04:21:23 -04:00
|
|
|
// Ignore. This is only a leading Front matter comment.
|
2018-10-23 08:37:09 -04:00
|
|
|
case it.Type == pageparser.TypeHTMLStart:
|
|
|
|
// This is HTML without front matter. It can still have shortcodes.
|
2018-10-18 04:21:23 -04:00
|
|
|
p.renderable = false
|
|
|
|
result.Write(it.Val)
|
|
|
|
case it.IsFrontMatter():
|
2018-10-19 05:30:57 -04:00
|
|
|
f := metadecoders.FormatFromFrontMatterType(it.Type)
|
2018-10-18 04:21:23 -04:00
|
|
|
m, err := metadecoders.UnmarshalToMap(it.Val, f)
|
|
|
|
if err != nil {
|
2018-10-23 02:54:10 -04:00
|
|
|
if fe, ok := err.(herrors.FileError); ok {
|
|
|
|
return herrors.ToFileErrorWithOffset(fe, iter.LineNumber()-1)
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
if err := p.updateMetaData(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-10-23 08:37:09 -04:00
|
|
|
next := iter.Peek()
|
|
|
|
if !next.IsDone() {
|
|
|
|
p.source.posMainContent = next.Pos
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
if !p.shouldBuild() {
|
|
|
|
// Nothing more to do.
|
|
|
|
return nil
|
2018-10-19 05:30:57 -04:00
|
|
|
}
|
2018-10-18 04:21:23 -04:00
|
|
|
|
2018-10-23 08:37:09 -04:00
|
|
|
case it.Type == pageparser.TypeLeadSummaryDivider:
|
2018-10-19 05:30:57 -04:00
|
|
|
result.Write(internalSummaryDivider)
|
|
|
|
p.source.hasSummaryDivider = true
|
|
|
|
// Need to determine if the page is truncated.
|
|
|
|
f := func(item pageparser.Item) bool {
|
|
|
|
if item.IsNonWhitespace() {
|
|
|
|
p.truncated = true
|
|
|
|
|
|
|
|
// Done
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
2018-10-19 05:30:57 -04:00
|
|
|
iter.PeekWalk(f)
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
// Handle shortcode
|
|
|
|
case it.IsLeftShortcodeDelim():
|
|
|
|
// let extractShortcode handle left delim (will do so recursively)
|
|
|
|
iter.Backup()
|
|
|
|
|
|
|
|
currShortcode, err := s.extractShortcode(ordinal, iter, p)
|
|
|
|
|
|
|
|
if currShortcode.name != "" {
|
|
|
|
s.nameSet[currShortcode.name] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2018-10-21 06:20:21 -04:00
|
|
|
return fail(errors.Wrap(err, "failed to extract shortcode"), it)
|
2018-10-18 04:21:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if currShortcode.params == nil {
|
|
|
|
currShortcode.params = make([]string, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
placeHolder := s.createShortcodePlaceholder()
|
|
|
|
result.WriteString(placeHolder)
|
|
|
|
ordinal++
|
|
|
|
s.shortcodes.Add(placeHolder, currShortcode)
|
|
|
|
case it.IsEOF():
|
|
|
|
break Loop
|
|
|
|
case it.IsError():
|
2018-10-21 06:20:21 -04:00
|
|
|
err := fail(errors.WithStack(errors.New(it.ValStr())), it)
|
2018-10-18 04:21:23 -04:00
|
|
|
currShortcode.err = err
|
|
|
|
return err
|
2018-10-21 06:20:21 -04:00
|
|
|
|
2018-10-18 04:21:23 -04:00
|
|
|
default:
|
|
|
|
result.Write(it.Val)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resultBytes := make([]byte, result.Len())
|
|
|
|
copy(resultBytes, result.Bytes())
|
|
|
|
p.workContent = resultBytes
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) parse(reader io.Reader) error {
|
|
|
|
|
|
|
|
parseResult, err := pageparser.Parse(reader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
p.source = rawPageContent{
|
|
|
|
parsed: parseResult,
|
|
|
|
}
|
|
|
|
|
2018-10-20 13:09:03 -04:00
|
|
|
p.lang = p.File.Lang()
|
2018-10-18 04:21:23 -04:00
|
|
|
|
|
|
|
if p.s != nil && p.s.owner != nil {
|
|
|
|
gi, enabled := p.s.owner.gitInfo.forPage(p)
|
|
|
|
if gi != nil {
|
|
|
|
p.GitInfo = gi
|
|
|
|
} else if enabled {
|
|
|
|
p.s.Log.WARN.Printf("Failed to find GitInfo for page %q", p.Path())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-21 06:20:21 -04:00
|
|
|
|
|
|
|
func parseError(err error, input []byte, pos int) error {
|
|
|
|
if herrors.UnwrapFileError(err) != nil {
|
|
|
|
// Use the most specific location.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lf := []byte("\n")
|
|
|
|
input = input[:pos]
|
|
|
|
lineNumber := bytes.Count(input, lf) + 1
|
|
|
|
endOfLastLine := bytes.LastIndex(input, lf)
|
2018-10-23 02:54:10 -04:00
|
|
|
return herrors.NewFileError("md", -1, lineNumber, pos-endOfLastLine, err)
|
2018-10-21 06:20:21 -04:00
|
|
|
|
|
|
|
}
|