2015-12-07 13:57:01 -05:00
|
|
|
// Copyright 2015 The Hugo Authors. All rights reserved.
|
2013-07-04 11:32:55 -04:00
|
|
|
//
|
2015-11-23 22:16:36 -05:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
2013-07-04 11:32:55 -04:00
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
2015-11-23 22:16:36 -05:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2013-07-04 11:32:55 -04:00
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package hugolib
|
|
|
|
|
|
|
|
import (
|
2014-01-29 17:50:31 -05:00
|
|
|
"bytes"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2015-01-25 06:08:02 -05:00
|
|
|
"reflect"
|
|
|
|
|
|
|
|
"github.com/mitchellh/mapstructure"
|
2014-11-20 12:32:21 -05:00
|
|
|
"github.com/spf13/hugo/helpers"
|
|
|
|
"github.com/spf13/hugo/parser"
|
2014-11-28 15:16:57 -05:00
|
|
|
|
2014-12-07 13:48:00 -05:00
|
|
|
"html/template"
|
|
|
|
"io"
|
|
|
|
"net/url"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2015-09-03 06:22:20 -04:00
|
|
|
"regexp"
|
2014-12-07 13:48:00 -05:00
|
|
|
"strings"
|
2015-01-21 09:28:05 -05:00
|
|
|
"sync"
|
2014-12-07 13:48:00 -05:00
|
|
|
"time"
|
2015-09-03 06:22:20 -04:00
|
|
|
"unicode/utf8"
|
2015-01-25 06:08:02 -05:00
|
|
|
|
|
|
|
"github.com/spf13/cast"
|
2015-01-30 14:42:02 -05:00
|
|
|
bp "github.com/spf13/hugo/bufferpool"
|
2015-01-25 06:08:02 -05:00
|
|
|
"github.com/spf13/hugo/hugofs"
|
|
|
|
"github.com/spf13/hugo/source"
|
|
|
|
"github.com/spf13/hugo/tpl"
|
|
|
|
jww "github.com/spf13/jwalterweatherman"
|
|
|
|
"github.com/spf13/viper"
|
2013-07-04 11:32:55 -04:00
|
|
|
)
|
|
|
|
|
2015-09-03 06:22:20 -04:00
|
|
|
var (
|
|
|
|
cjk = regexp.MustCompile(`\p{Han}|\p{Hangul}|\p{Hiragana}|\p{Katakana}`)
|
|
|
|
)
|
|
|
|
|
2013-07-04 11:32:55 -04:00
|
|
|
type Page struct {
|
2016-01-07 21:48:13 -05:00
|
|
|
Params map[string]interface{}
|
|
|
|
Content template.HTML
|
|
|
|
Summary template.HTML
|
|
|
|
Aliases []string
|
|
|
|
Status string
|
|
|
|
Images []Image
|
|
|
|
Videos []Video
|
|
|
|
TableOfContents template.HTML
|
|
|
|
Truncated bool
|
|
|
|
Draft bool
|
|
|
|
PublishDate time.Time
|
|
|
|
Markup string
|
2015-01-25 06:08:02 -05:00
|
|
|
extension string
|
|
|
|
contentType string
|
|
|
|
renderable bool
|
2015-11-02 11:24:50 -05:00
|
|
|
Layout string
|
2015-12-18 03:54:46 -05:00
|
|
|
layoutsCalculated []string
|
2015-01-25 06:08:02 -05:00
|
|
|
linkTitle string
|
|
|
|
frontmatter []byte
|
|
|
|
rawContent []byte
|
2016-03-17 10:51:52 -04:00
|
|
|
contentShortCodes map[string]string // TODO(bep) this shouldn't be needed.
|
|
|
|
shortcodes map[string]shortcode
|
2015-01-25 06:08:02 -05:00
|
|
|
plain string // TODO should be []byte
|
2015-02-04 18:38:50 -05:00
|
|
|
plainWords []string
|
|
|
|
plainInit sync.Once
|
2015-01-25 06:08:02 -05:00
|
|
|
renderingConfig *helpers.Blackfriday
|
|
|
|
renderingConfigInit sync.Once
|
2016-01-07 21:48:13 -05:00
|
|
|
pageMenus PageMenus
|
|
|
|
pageMenusInit sync.Once
|
|
|
|
isCJKLanguage bool
|
2014-01-29 17:50:31 -05:00
|
|
|
PageMeta
|
2014-10-16 20:20:09 -04:00
|
|
|
Source
|
2015-05-23 06:28:01 -04:00
|
|
|
Position `json:"-"`
|
2014-01-29 17:50:31 -05:00
|
|
|
Node
|
2016-03-17 10:51:52 -04:00
|
|
|
rendered bool
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2014-10-16 20:20:09 -04:00
|
|
|
type Source struct {
|
|
|
|
Frontmatter []byte
|
|
|
|
Content []byte
|
|
|
|
source.File
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
type PageMeta struct {
|
2014-01-29 17:50:31 -05:00
|
|
|
WordCount int
|
|
|
|
FuzzyWordCount int
|
|
|
|
ReadingTime int
|
|
|
|
Weight int
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type Position struct {
|
2015-01-21 08:35:33 -05:00
|
|
|
Prev *Page
|
|
|
|
Next *Page
|
2014-04-18 03:23:13 -04:00
|
|
|
PrevInSection *Page
|
|
|
|
NextInSection *Page
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type Pages []*Page
|
2016-01-07 21:48:13 -05:00
|
|
|
|
2016-01-11 12:06:52 -05:00
|
|
|
func (ps Pages) FindPagePosByFilePath(inPath string) int {
|
|
|
|
for i, x := range ps {
|
|
|
|
if x.Source.Path() == inPath {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
2016-01-07 21:48:13 -05:00
|
|
|
|
|
|
|
// FindPagePos Given a page, it will find the position in Pages
|
|
|
|
// will return -1 if not found
|
|
|
|
func (ps Pages) FindPagePos(page *Page) int {
|
|
|
|
for i, x := range ps {
|
2016-01-11 12:06:52 -05:00
|
|
|
if x.Source.Path() == page.Source.Path() {
|
2016-01-07 21:48:13 -05:00
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
2014-01-27 17:16:28 -05:00
|
|
|
func (p *Page) Plain() string {
|
2015-02-04 18:38:50 -05:00
|
|
|
p.initPlain()
|
2014-01-29 17:50:31 -05:00
|
|
|
return p.plain
|
2013-10-15 09:15:52 -04:00
|
|
|
}
|
|
|
|
|
2015-02-04 18:38:50 -05:00
|
|
|
func (p *Page) PlainWords() []string {
|
|
|
|
p.initPlain()
|
|
|
|
return p.plainWords
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) initPlain() {
|
|
|
|
p.plainInit.Do(func() {
|
|
|
|
p.plain = helpers.StripHTML(string(p.Content))
|
|
|
|
p.plainWords = strings.Fields(p.plain)
|
2015-07-12 05:28:19 -04:00
|
|
|
return
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-08-19 21:27:13 -04:00
|
|
|
func (p *Page) IsNode() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) IsPage() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-10-11 23:51:04 -04:00
|
|
|
// Param is a convenience method to do lookups in Page's and Site's Params map,
|
|
|
|
// in that order.
|
|
|
|
//
|
|
|
|
// This method is also implemented on Node.
|
|
|
|
func (p *Page) Param(key interface{}) (interface{}, error) {
|
|
|
|
keyStr, err := cast.ToStringE(key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if val, ok := p.Params[keyStr]; ok {
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
return p.Site.Params[keyStr], nil
|
|
|
|
}
|
|
|
|
|
2014-12-09 13:33:55 -05:00
|
|
|
func (p *Page) Author() Author {
|
|
|
|
authors := p.Authors()
|
|
|
|
|
|
|
|
for _, author := range authors {
|
|
|
|
return author
|
|
|
|
}
|
|
|
|
return Author{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) Authors() AuthorList {
|
|
|
|
authorKeys, ok := p.Params["authors"]
|
|
|
|
authors := authorKeys.([]string)
|
|
|
|
if !ok || len(authors) < 1 || len(p.Site.Authors) < 1 {
|
|
|
|
return AuthorList{}
|
|
|
|
}
|
|
|
|
|
|
|
|
al := make(AuthorList)
|
|
|
|
for _, author := range authors {
|
|
|
|
a, ok := p.Site.Authors[author]
|
|
|
|
if ok {
|
|
|
|
al[author] = a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return al
|
|
|
|
}
|
|
|
|
|
2015-03-11 13:34:57 -04:00
|
|
|
func (p *Page) UniqueID() string {
|
|
|
|
return p.Source.UniqueID()
|
2014-10-01 14:26:43 -04:00
|
|
|
}
|
|
|
|
|
Provide (relative) reference funcs & shortcodes.
- `.Ref` and `.RelRef` take a reference (the logical filename for a
page, including extension and/or a document fragment ID) and return
a permalink (or relative permalink) to the referenced document.
- If the reference is a page name (such as `about.md`), the page
will be discovered and the permalink will be returned: `/about/`
- If the reference is a page name with a fragment (such as
`about.md#who`), the page will be discovered and used to add the
`page.UniqueID()` to the resulting fragment and permalink:
`/about/#who:deadbeef`.
- If the reference is a fragment and `.*Ref` has been called from
a `Node` or `SiteInfo`, it will be returned as is: `#who`.
- If the reference is a fragment and `.*Ref` has been called from
a `Page`, it will be returned with the page’s unique ID:
`#who:deadbeef`.
- `.*Ref` can be called from either `Node`, `SiteInfo` (e.g.,
`Node.Site`), `Page` objects, or `ShortcodeWithPage` objects in
templates.
- `.*Ref` cannot be used in content, so two shortcodes have been
created to provide the functionality to content: `ref` and `relref`.
These are intended to be used within markup, like `[Who]({{% ref
about.md#who %}})` or `<a href="{{% ref about.md#who %}}">Who</a>`.
- There are also `ref` and `relref` template functions (used to create
the shortcodes) that expect a `Page` or `Node` object and the
reference string (e.g., `{{ relref . "about.md" }}` or `{{
"about.md" | ref . }}`). It actually looks for `.*Ref` as defined on
`Node` or `Page` objects.
- Shortcode handling had to use a *differently unique* wrapper in
`createShortcodePlaceholder` because of the way that the `ref` and
`relref` are intended to be used in content.
2014-11-24 01:15:34 -05:00
|
|
|
func (p *Page) Ref(ref string) (string, error) {
|
|
|
|
return p.Node.Site.Ref(ref, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) RelRef(ref string) (string, error) {
|
|
|
|
return p.Node.Site.RelRef(ref, p)
|
|
|
|
}
|
|
|
|
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
// for logging
|
|
|
|
func (p *Page) lineNumRawContentStart() int {
|
|
|
|
return bytes.Count(p.frontmatter, []byte("\n")) + 1
|
|
|
|
}
|
|
|
|
|
2014-01-27 17:16:28 -05:00
|
|
|
func (p *Page) setSummary() {
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
|
|
|
|
// at this point, p.rawContent contains placeholders for the short codes,
|
|
|
|
// rendered and ready in p.contentShortcodes
|
|
|
|
|
2014-10-16 20:20:09 -04:00
|
|
|
if bytes.Contains(p.rawContent, helpers.SummaryDivider) {
|
2015-05-10 07:33:50 -04:00
|
|
|
sections := bytes.Split(p.rawContent, helpers.SummaryDivider)
|
|
|
|
header := sections[0]
|
|
|
|
p.Truncated = true
|
|
|
|
if len(sections[1]) < 20 {
|
|
|
|
// only whitespace?
|
|
|
|
p.Truncated = len(bytes.Trim(sections[1], " \n\r")) > 0
|
|
|
|
}
|
|
|
|
|
2015-10-20 14:35:12 -04:00
|
|
|
// TODO(bep) consider doing this once only
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
renderedHeader := p.renderBytes(header)
|
|
|
|
if len(p.contentShortCodes) > 0 {
|
|
|
|
tmpContentWithTokensReplaced, err :=
|
2015-06-21 07:08:30 -04:00
|
|
|
replaceShortcodeTokens(renderedHeader, shortcodePlaceholderPrefix, p.contentShortCodes)
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
if err != nil {
|
|
|
|
jww.FATAL.Printf("Failed to replace short code tokens in Summary for %s:\n%s", p.BaseFileName(), err.Error())
|
|
|
|
} else {
|
|
|
|
renderedHeader = tmpContentWithTokensReplaced
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.Summary = helpers.BytesToHTML(renderedHeader)
|
2014-01-29 17:50:31 -05:00
|
|
|
} else {
|
|
|
|
// If hugo defines split:
|
|
|
|
// render, strip html, then split
|
2015-09-03 06:22:20 -04:00
|
|
|
var summary string
|
|
|
|
var truncated bool
|
|
|
|
if p.isCJKLanguage {
|
|
|
|
summary, truncated = helpers.TruncateWordsByRune(p.PlainWords(), helpers.SummaryLength)
|
|
|
|
} else {
|
|
|
|
summary, truncated = helpers.TruncateWordsToWholeSentence(p.PlainWords(), helpers.SummaryLength)
|
|
|
|
}
|
2015-02-10 08:37:29 -05:00
|
|
|
p.Summary = template.HTML(summary)
|
2015-02-05 15:44:15 -05:00
|
|
|
p.Truncated = truncated
|
2015-02-04 18:38:50 -05:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2014-01-27 17:16:28 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) renderBytes(content []byte) []byte {
|
2015-09-08 20:03:38 -04:00
|
|
|
var fn helpers.LinkResolverFunc
|
|
|
|
var fileFn helpers.FileResolverFunc
|
|
|
|
if p.getRenderingConfig().SourceRelativeLinksEval {
|
|
|
|
fn = func(ref string) (string, error) {
|
|
|
|
return p.Node.Site.GitHub(ref, p)
|
|
|
|
}
|
|
|
|
fileFn = func(ref string) (string, error) {
|
|
|
|
return p.Node.Site.GitHubFileLink(ref, p)
|
|
|
|
}
|
|
|
|
}
|
2014-11-28 15:16:57 -05:00
|
|
|
return helpers.RenderBytes(
|
2015-03-06 08:56:44 -05:00
|
|
|
&helpers.RenderingContext{Content: content, PageFmt: p.guessMarkupType(),
|
2015-09-08 20:03:38 -04:00
|
|
|
DocumentID: p.UniqueID(), Config: p.getRenderingConfig(), LinkResolver: fn, FileResolver: fileFn})
|
2014-01-27 17:16:28 -05:00
|
|
|
}
|
|
|
|
|
2014-01-28 23:11:05 -05:00
|
|
|
func (p *Page) renderContent(content []byte) []byte {
|
2015-09-08 20:03:38 -04:00
|
|
|
var fn helpers.LinkResolverFunc
|
|
|
|
var fileFn helpers.FileResolverFunc
|
|
|
|
if p.getRenderingConfig().SourceRelativeLinksEval {
|
|
|
|
fn = func(ref string) (string, error) {
|
|
|
|
return p.Node.Site.GitHub(ref, p)
|
|
|
|
}
|
|
|
|
fileFn = func(ref string) (string, error) {
|
|
|
|
return p.Node.Site.GitHubFileLink(ref, p)
|
|
|
|
}
|
|
|
|
}
|
2015-03-06 08:56:44 -05:00
|
|
|
return helpers.RenderBytesWithTOC(&helpers.RenderingContext{Content: content, PageFmt: p.guessMarkupType(),
|
2015-09-08 20:03:38 -04:00
|
|
|
DocumentID: p.UniqueID(), Config: p.getRenderingConfig(), LinkResolver: fn, FileResolver: fileFn})
|
2014-11-28 15:16:57 -05:00
|
|
|
}
|
|
|
|
|
2015-01-25 06:08:02 -05:00
|
|
|
func (p *Page) getRenderingConfig() *helpers.Blackfriday {
|
2014-11-28 15:16:57 -05:00
|
|
|
|
2015-01-25 06:08:02 -05:00
|
|
|
p.renderingConfigInit.Do(func() {
|
2015-11-03 14:09:34 -05:00
|
|
|
pageParam := cast.ToStringMap(p.GetParam("blackfriday"))
|
2014-11-28 15:16:57 -05:00
|
|
|
|
2015-01-31 12:24:00 -05:00
|
|
|
p.renderingConfig = helpers.NewBlackfriday()
|
2015-11-03 14:09:34 -05:00
|
|
|
if err := mapstructure.Decode(pageParam, p.renderingConfig); err != nil {
|
2015-01-25 06:08:02 -05:00
|
|
|
jww.FATAL.Printf("Failed to get rendering config for %s:\n%s", p.BaseFileName(), err.Error())
|
|
|
|
}
|
2015-01-21 09:28:05 -05:00
|
|
|
})
|
2014-11-28 15:16:57 -05:00
|
|
|
|
2015-01-25 06:08:02 -05:00
|
|
|
return p.renderingConfig
|
2015-01-21 08:35:33 -05:00
|
|
|
}
|
|
|
|
|
2013-09-04 19:57:17 -04:00
|
|
|
func newPage(filename string) *Page {
|
2014-01-29 17:50:31 -05:00
|
|
|
page := Page{contentType: "",
|
2014-10-16 20:20:09 -04:00
|
|
|
Source: Source{File: *source.NewFile(filename)},
|
2014-05-06 11:02:56 -04:00
|
|
|
Node: Node{Keywords: []string{}, Sitemap: Sitemap{Priority: -1}},
|
2014-01-29 17:50:31 -05:00
|
|
|
Params: make(map[string]interface{})}
|
2014-03-31 13:23:34 -04:00
|
|
|
|
2014-10-16 20:20:09 -04:00
|
|
|
jww.DEBUG.Println("Reading from", page.File.Path())
|
2014-01-29 17:50:31 -05:00
|
|
|
return &page
|
2013-08-13 19:39:24 -04:00
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2013-09-18 13:17:43 -04:00
|
|
|
func (p *Page) IsRenderable() bool {
|
2014-01-29 17:50:31 -05:00
|
|
|
return p.renderable
|
2013-09-18 13:17:43 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) Type() string {
|
|
|
|
if p.contentType != "" {
|
|
|
|
return p.contentType
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2014-10-16 20:20:09 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
if x := p.Section(); x != "" {
|
2014-01-29 17:50:31 -05:00
|
|
|
return x
|
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
return "page"
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) Section() string {
|
|
|
|
return p.Source.Section()
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|
|
|
|
|
2015-11-02 11:24:50 -05:00
|
|
|
func (p *Page) layouts(l ...string) []string {
|
2015-12-18 03:54:46 -05:00
|
|
|
if len(p.layoutsCalculated) > 0 {
|
|
|
|
return p.layoutsCalculated
|
|
|
|
}
|
|
|
|
|
2015-11-02 11:24:50 -05:00
|
|
|
if p.Layout != "" {
|
|
|
|
return layouts(p.Type(), p.Layout)
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2013-10-07 00:57:45 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
layout := ""
|
|
|
|
if len(l) == 0 {
|
|
|
|
layout = "single"
|
|
|
|
} else {
|
|
|
|
layout = l[0]
|
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
return layouts(p.Type(), layout)
|
2013-10-07 00:57:45 -04:00
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2013-10-07 00:57:45 -04:00
|
|
|
func layouts(types string, layout string) (layouts []string) {
|
2014-01-29 17:50:31 -05:00
|
|
|
t := strings.Split(types, "/")
|
2014-06-03 17:06:32 -04:00
|
|
|
|
|
|
|
// Add type/layout.html
|
2014-01-29 17:50:31 -05:00
|
|
|
for i := range t {
|
|
|
|
search := t[:len(t)-i]
|
2014-12-07 13:48:00 -05:00
|
|
|
layouts = append(layouts, fmt.Sprintf("%s/%s.html", strings.ToLower(path.Join(search...)), layout))
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2014-06-03 17:06:32 -04:00
|
|
|
|
|
|
|
// Add _default/layout.html
|
|
|
|
layouts = append(layouts, fmt.Sprintf("_default/%s.html", layout))
|
|
|
|
|
|
|
|
// Add theme/type/layout.html & theme/_default/layout.html
|
|
|
|
for _, l := range layouts {
|
|
|
|
layouts = append(layouts, "theme/"+l)
|
|
|
|
}
|
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
return
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func NewPageFrom(buf io.Reader, name string) (*Page, error) {
|
2014-05-01 14:11:56 -04:00
|
|
|
p, err := NewPage(name)
|
|
|
|
if err != nil {
|
|
|
|
return p, err
|
|
|
|
}
|
2015-04-03 15:41:12 -04:00
|
|
|
_, err = p.ReadFrom(buf)
|
2014-05-01 14:11:56 -04:00
|
|
|
|
|
|
|
return p, err
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func NewPage(name string) (*Page, error) {
|
2014-01-29 17:50:31 -05:00
|
|
|
if len(name) == 0 {
|
|
|
|
return nil, errors.New("Zero length page name")
|
|
|
|
}
|
2013-08-05 10:53:58 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
// Create new page
|
|
|
|
p := newPage(name)
|
2013-08-05 10:53:58 -04:00
|
|
|
|
2014-05-01 13:20:58 -04:00
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2015-04-03 15:41:12 -04:00
|
|
|
func (p *Page) ReadFrom(buf io.Reader) (int64, error) {
|
2014-01-29 17:50:31 -05:00
|
|
|
// Parse for metadata & body
|
2015-04-03 15:41:12 -04:00
|
|
|
if err := p.parse(buf); err != nil {
|
2014-03-31 13:23:34 -04:00
|
|
|
jww.ERROR.Print(err)
|
2015-04-03 15:41:12 -04:00
|
|
|
return 0, err
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2013-08-05 10:53:58 -04:00
|
|
|
|
2015-04-03 15:41:12 -04:00
|
|
|
return int64(len(p.rawContent)), nil
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) analyzePage() {
|
2015-09-03 06:22:20 -04:00
|
|
|
if p.isCJKLanguage {
|
|
|
|
p.WordCount = 0
|
|
|
|
for _, word := range p.PlainWords() {
|
|
|
|
runeCount := utf8.RuneCountInString(word)
|
|
|
|
if len(word) == runeCount {
|
|
|
|
p.WordCount++
|
|
|
|
} else {
|
|
|
|
p.WordCount += runeCount
|
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
} else {
|
|
|
|
p.WordCount = len(p.PlainWords())
|
2015-09-03 06:22:20 -04:00
|
|
|
}
|
2015-09-03 06:22:20 -04:00
|
|
|
|
2016-03-14 15:31:31 -04:00
|
|
|
p.FuzzyWordCount = (p.WordCount + 100) / 100 * 100
|
2015-09-03 06:22:20 -04:00
|
|
|
|
|
|
|
if p.isCJKLanguage {
|
2016-03-14 15:31:31 -04:00
|
|
|
p.ReadingTime = (p.WordCount + 500) / 501
|
2015-09-03 06:22:20 -04:00
|
|
|
} else {
|
2016-03-14 15:31:31 -04:00
|
|
|
p.ReadingTime = (p.WordCount + 212) / 213
|
2015-09-03 06:22:20 -04:00
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2013-10-02 19:33:51 -04:00
|
|
|
func (p *Page) permalink() (*url.URL, error) {
|
2015-03-18 01:16:54 -04:00
|
|
|
baseURL := string(p.Site.BaseURL)
|
2015-05-28 17:05:13 -04:00
|
|
|
dir := strings.TrimSpace(helpers.MakePath(filepath.ToSlash(strings.ToLower(p.Source.Dir()))))
|
2015-04-09 12:14:26 -04:00
|
|
|
pSlug := strings.TrimSpace(helpers.URLize(p.Slug))
|
|
|
|
pURL := strings.TrimSpace(helpers.URLize(p.URL))
|
2014-01-29 17:50:31 -05:00
|
|
|
var permalink string
|
|
|
|
var err error
|
|
|
|
|
2015-03-11 13:34:57 -04:00
|
|
|
if len(pURL) > 0 {
|
|
|
|
return helpers.MakePermalink(baseURL, pURL), nil
|
2014-02-28 02:30:12 -05:00
|
|
|
}
|
|
|
|
|
2014-10-16 20:20:09 -04:00
|
|
|
if override, ok := p.Site.Permalinks[p.Section()]; ok {
|
2014-01-29 17:50:31 -05:00
|
|
|
permalink, err = override.Expand(p)
|
2014-04-23 02:55:43 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-02-28 02:30:12 -05:00
|
|
|
// fmt.Printf("have a section override for %q in section %s → %s\n", p.Title, p.Section, permalink)
|
2014-01-29 17:50:31 -05:00
|
|
|
} else {
|
|
|
|
if len(pSlug) > 0 {
|
2015-03-11 13:34:57 -04:00
|
|
|
permalink = helpers.URLPrep(viper.GetBool("UglyURLs"), path.Join(dir, p.Slug+"."+p.Extension()))
|
2014-01-29 17:50:31 -05:00
|
|
|
} else {
|
2014-11-06 11:52:01 -05:00
|
|
|
_, t := filepath.Split(p.Source.LogicalName())
|
2015-03-11 13:34:57 -04:00
|
|
|
permalink = helpers.URLPrep(viper.GetBool("UglyURLs"), path.Join(dir, helpers.ReplaceExtension(strings.TrimSpace(t), p.Extension())))
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-11 13:34:57 -04:00
|
|
|
return helpers.MakePermalink(baseURL, permalink), nil
|
2013-10-02 19:33:51 -04:00
|
|
|
}
|
|
|
|
|
2014-10-16 20:20:09 -04:00
|
|
|
func (p *Page) Extension() string {
|
|
|
|
if p.extension != "" {
|
|
|
|
return p.extension
|
|
|
|
}
|
2015-03-07 06:25:38 -05:00
|
|
|
return viper.GetString("DefaultExtension")
|
2014-10-16 20:20:09 -04:00
|
|
|
}
|
|
|
|
|
2013-10-25 18:37:53 -04:00
|
|
|
func (p *Page) LinkTitle() string {
|
2014-01-29 17:50:31 -05:00
|
|
|
if len(p.linkTitle) > 0 {
|
|
|
|
return p.linkTitle
|
|
|
|
}
|
2015-03-07 06:25:38 -05:00
|
|
|
return p.Title
|
2013-10-25 18:37:53 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) ShouldBuild() bool {
|
|
|
|
if viper.GetBool("BuildFuture") || p.PublishDate.IsZero() || p.PublishDate.Before(time.Now()) {
|
|
|
|
if viper.GetBool("BuildDrafts") || !p.Draft {
|
2014-05-29 00:48:40 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) IsDraft() bool {
|
|
|
|
return p.Draft
|
2014-08-20 11:09:35 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) IsFuture() bool {
|
|
|
|
if p.PublishDate.Before(time.Now()) {
|
2014-08-20 11:09:35 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-10-02 19:33:51 -04:00
|
|
|
func (p *Page) Permalink() (string, error) {
|
2014-01-29 17:50:31 -05:00
|
|
|
link, err := p.permalink()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return link.String(), nil
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2013-10-02 20:00:21 -04:00
|
|
|
func (p *Page) RelPermalink() (string, error) {
|
2014-01-29 17:50:31 -05:00
|
|
|
link, err := p.permalink()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2013-10-02 20:00:21 -04:00
|
|
|
|
2015-03-11 13:34:57 -04:00
|
|
|
if viper.GetBool("CanonifyURLs") {
|
2015-03-18 01:16:54 -04:00
|
|
|
// replacements for relpermalink with baseURL on the form http://myhost.com/sub/ will fail later on
|
|
|
|
// have to return the URL relative from baseURL
|
|
|
|
relpath, err := helpers.GetRelativePath(link.String(), string(p.Site.BaseURL))
|
2014-12-12 14:28:28 -05:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return "/" + filepath.ToSlash(relpath), nil
|
|
|
|
}
|
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
link.Scheme = ""
|
|
|
|
link.Host = ""
|
|
|
|
link.User = nil
|
|
|
|
link.Opaque = ""
|
|
|
|
return link.String(), nil
|
2013-10-02 20:00:21 -04:00
|
|
|
}
|
|
|
|
|
2015-08-02 02:02:20 -04:00
|
|
|
var ErrHasDraftAndPublished = errors.New("both draft and published parameters were found in page's frontmatter")
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) update(f interface{}) error {
|
2014-05-01 14:11:56 -04:00
|
|
|
if f == nil {
|
|
|
|
return fmt.Errorf("no metadata found")
|
|
|
|
}
|
2014-01-29 17:50:31 -05:00
|
|
|
m := f.(map[string]interface{})
|
2015-01-05 06:44:41 -05:00
|
|
|
var err error
|
2015-09-03 06:22:20 -04:00
|
|
|
var draft, published, isCJKLanguage *bool
|
2014-01-29 17:50:31 -05:00
|
|
|
for k, v := range m {
|
|
|
|
loki := strings.ToLower(k)
|
|
|
|
switch loki {
|
|
|
|
case "title":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Title = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "linktitle":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.linkTitle = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "description":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Description = cast.ToString(v)
|
2015-12-12 16:20:26 -05:00
|
|
|
p.Params["description"] = p.Description
|
2014-01-29 17:50:31 -05:00
|
|
|
case "slug":
|
2015-04-09 12:14:26 -04:00
|
|
|
p.Slug = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "url":
|
2014-04-05 01:26:43 -04:00
|
|
|
if url := cast.ToString(v); strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
|
2015-03-18 01:16:54 -04:00
|
|
|
return fmt.Errorf("Only relative URLs are supported, %v provided", url)
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2015-04-09 12:14:26 -04:00
|
|
|
p.URL = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "type":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.contentType = cast.ToString(v)
|
2014-10-16 20:20:09 -04:00
|
|
|
case "extension", "ext":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.extension = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "keywords":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Keywords = cast.ToStringSlice(v)
|
2014-05-29 00:48:40 -04:00
|
|
|
case "date":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Date, err = cast.ToTimeE(v)
|
2015-01-05 06:44:41 -05:00
|
|
|
if err != nil {
|
2015-03-07 06:25:38 -05:00
|
|
|
jww.ERROR.Printf("Failed to parse date '%v' in page %s", v, p.File.Path())
|
2015-01-05 06:44:41 -05:00
|
|
|
}
|
2015-05-14 16:06:36 -04:00
|
|
|
case "lastmod":
|
|
|
|
p.Lastmod, err = cast.ToTimeE(v)
|
|
|
|
if err != nil {
|
|
|
|
jww.ERROR.Printf("Failed to parse lastmod '%v' in page %s", v, p.File.Path())
|
|
|
|
}
|
2014-05-29 00:48:40 -04:00
|
|
|
case "publishdate", "pubdate":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.PublishDate, err = cast.ToTimeE(v)
|
2015-01-05 06:44:41 -05:00
|
|
|
if err != nil {
|
2015-03-07 06:25:38 -05:00
|
|
|
jww.ERROR.Printf("Failed to parse publishdate '%v' in page %s", v, p.File.Path())
|
2015-01-05 06:44:41 -05:00
|
|
|
}
|
2014-01-29 17:50:31 -05:00
|
|
|
case "draft":
|
2015-08-02 02:02:20 -04:00
|
|
|
draft = new(bool)
|
|
|
|
*draft = cast.ToBool(v)
|
|
|
|
case "published": // Intentionally undocumented
|
|
|
|
published = new(bool)
|
2015-08-30 18:51:25 -04:00
|
|
|
*published = cast.ToBool(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "layout":
|
2015-11-02 11:24:50 -05:00
|
|
|
p.Layout = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "markup":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Markup = cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "weight":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Weight = cast.ToInt(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case "aliases":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Aliases = cast.ToStringSlice(v)
|
|
|
|
for _, alias := range p.Aliases {
|
2014-01-29 17:50:31 -05:00
|
|
|
if strings.HasPrefix(alias, "http://") || strings.HasPrefix(alias, "https://") {
|
|
|
|
return fmt.Errorf("Only relative aliases are supported, %v provided", alias)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case "status":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Status = cast.ToString(v)
|
2014-05-06 11:02:56 -04:00
|
|
|
case "sitemap":
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Sitemap = parseSitemap(cast.ToStringMap(v))
|
2015-09-03 06:22:20 -04:00
|
|
|
case "iscjklanguage":
|
|
|
|
isCJKLanguage = new(bool)
|
|
|
|
*isCJKLanguage = cast.ToBool(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
default:
|
|
|
|
// If not one of the explicit values, store in Params
|
|
|
|
switch vv := v.(type) {
|
|
|
|
case bool:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
case string:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
case int64, int32, int16, int8, int:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
case float64, float32:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
case time.Time:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
default: // handle array of strings as well
|
|
|
|
switch vvv := vv.(type) {
|
|
|
|
case []interface{}:
|
2015-06-25 05:46:09 -04:00
|
|
|
if len(vvv) > 0 {
|
|
|
|
switch vvv[0].(type) {
|
2015-07-26 09:28:56 -04:00
|
|
|
case map[interface{}]interface{}: // Proper parsing structured array from YAML based FrontMatter
|
|
|
|
p.Params[loki] = vvv
|
|
|
|
case map[string]interface{}: // Proper parsing structured array from JSON based FrontMatter
|
2015-06-25 05:46:09 -04:00
|
|
|
p.Params[loki] = vvv
|
|
|
|
default:
|
|
|
|
a := make([]string, len(vvv))
|
|
|
|
for i, u := range vvv {
|
|
|
|
a[i] = cast.ToString(u)
|
|
|
|
}
|
|
|
|
|
|
|
|
p.Params[loki] = a
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p.Params[loki] = []string{}
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2014-04-23 02:55:43 -04:00
|
|
|
default:
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Params[loki] = vv
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-05-14 16:06:36 -04:00
|
|
|
|
2015-08-02 02:02:20 -04:00
|
|
|
if draft != nil && published != nil {
|
|
|
|
p.Draft = *draft
|
|
|
|
jww.ERROR.Printf("page %s has both draft and published settings in its frontmatter. Using draft.", p.File.Path())
|
|
|
|
return ErrHasDraftAndPublished
|
|
|
|
} else if draft != nil {
|
|
|
|
p.Draft = *draft
|
|
|
|
} else if published != nil {
|
|
|
|
p.Draft = !*published
|
|
|
|
}
|
|
|
|
|
2015-05-14 16:06:36 -04:00
|
|
|
if p.Lastmod.IsZero() {
|
|
|
|
p.Lastmod = p.Date
|
|
|
|
}
|
|
|
|
|
2015-09-03 06:22:20 -04:00
|
|
|
if isCJKLanguage != nil {
|
|
|
|
p.isCJKLanguage = *isCJKLanguage
|
|
|
|
} else if viper.GetBool("HasCJKLanguage") {
|
|
|
|
if cjk.Match(p.rawContent) {
|
|
|
|
p.isCJKLanguage = true
|
|
|
|
} else {
|
|
|
|
p.isCJKLanguage = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
return nil
|
2013-07-07 00:49:57 -04:00
|
|
|
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) GetParam(key string) interface{} {
|
2015-05-31 14:30:53 -04:00
|
|
|
return p.getParam(key, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Page) getParam(key string, stringToLower bool) interface{} {
|
2015-03-07 06:25:38 -05:00
|
|
|
v := p.Params[strings.ToLower(key)]
|
2014-01-29 17:50:31 -05:00
|
|
|
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch v.(type) {
|
|
|
|
case bool:
|
2014-04-05 01:26:43 -04:00
|
|
|
return cast.ToBool(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case string:
|
2015-05-31 14:30:53 -04:00
|
|
|
if stringToLower {
|
|
|
|
return strings.ToLower(cast.ToString(v))
|
|
|
|
}
|
|
|
|
return cast.ToString(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case int64, int32, int16, int8, int:
|
2014-04-05 01:26:43 -04:00
|
|
|
return cast.ToInt(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case float64, float32:
|
2014-04-05 01:26:43 -04:00
|
|
|
return cast.ToFloat64(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case time.Time:
|
2014-04-05 01:26:43 -04:00
|
|
|
return cast.ToTime(v)
|
2014-01-29 17:50:31 -05:00
|
|
|
case []string:
|
2015-05-31 14:30:53 -04:00
|
|
|
if stringToLower {
|
|
|
|
return helpers.SliceToLower(v.([]string))
|
|
|
|
}
|
|
|
|
return v.([]string)
|
2015-01-24 06:44:35 -05:00
|
|
|
case map[string]interface{}: // JSON and TOML
|
|
|
|
return v
|
|
|
|
case map[interface{}]interface{}: // YAML
|
2014-11-28 15:16:57 -05:00
|
|
|
return v
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2015-01-24 06:44:35 -05:00
|
|
|
|
|
|
|
jww.ERROR.Printf("GetParam(\"%s\"): Unknown type %s\n", key, reflect.TypeOf(v))
|
2014-01-29 17:50:31 -05:00
|
|
|
return nil
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) HasMenuCurrent(menu string, me *MenuEntry) bool {
|
|
|
|
menus := p.Menus()
|
2015-01-06 12:11:06 -05:00
|
|
|
sectionPagesMenu := viper.GetString("SectionPagesMenu")
|
|
|
|
|
|
|
|
// page is labeled as "shadow-member" of the menu with the same identifier as the section
|
|
|
|
if sectionPagesMenu != "" && p.Section() != "" && sectionPagesMenu == menu && p.Section() == me.Identifier {
|
|
|
|
return true
|
|
|
|
}
|
2014-04-23 02:59:19 -04:00
|
|
|
|
|
|
|
if m, ok := menus[menu]; ok {
|
|
|
|
if me.HasChildren() {
|
|
|
|
for _, child := range me.Children {
|
2014-05-14 18:01:13 -04:00
|
|
|
if child.IsEqual(m) {
|
2014-04-23 02:59:19 -04:00
|
|
|
return true
|
|
|
|
}
|
2015-06-25 06:26:48 -04:00
|
|
|
if p.HasMenuCurrent(menu, child) {
|
|
|
|
return true
|
|
|
|
}
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) IsMenuCurrent(menu string, inme *MenuEntry) bool {
|
|
|
|
menus := p.Menus()
|
2014-04-23 02:59:19 -04:00
|
|
|
|
|
|
|
if me, ok := menus[menu]; ok {
|
2014-05-14 18:01:13 -04:00
|
|
|
return me.IsEqual(inme)
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) Menus() PageMenus {
|
|
|
|
p.pageMenusInit.Do(func() {
|
|
|
|
p.pageMenus = PageMenus{}
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
if ms, ok := p.Params["menu"]; ok {
|
|
|
|
link, _ := p.RelPermalink()
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-03-18 01:16:54 -04:00
|
|
|
me := MenuEntry{Name: p.LinkTitle(), Weight: p.Weight, URL: link}
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
// Could be the name of the menu to attach it to
|
|
|
|
mname, err := cast.ToStringE(ms)
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
if err == nil {
|
2014-04-23 02:59:19 -04:00
|
|
|
me.Menu = mname
|
2015-03-07 06:25:38 -05:00
|
|
|
p.pageMenus[mname] = &me
|
2015-02-04 15:27:27 -05:00
|
|
|
return
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
// Could be a slice of strings
|
|
|
|
mnames, err := cast.ToStringSliceE(ms)
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
if err == nil {
|
|
|
|
for _, mname := range mnames {
|
|
|
|
me.Menu = mname
|
2015-03-07 06:25:38 -05:00
|
|
|
p.pageMenus[mname] = &me
|
2015-02-04 15:27:27 -05:00
|
|
|
}
|
2016-03-10 04:31:12 -05:00
|
|
|
return
|
2015-02-04 15:27:27 -05:00
|
|
|
}
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
// Could be a structured menu entry
|
|
|
|
menus, err := cast.ToStringMapE(ms)
|
2014-04-23 02:59:19 -04:00
|
|
|
|
|
|
|
if err != nil {
|
2015-03-07 06:25:38 -05:00
|
|
|
jww.ERROR.Printf("unable to process menus for %q\n", p.Title)
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
|
|
|
|
2015-02-04 15:27:27 -05:00
|
|
|
for name, menu := range menus {
|
2015-03-18 01:16:54 -04:00
|
|
|
menuEntry := MenuEntry{Name: p.LinkTitle(), URL: link, Weight: p.Weight, Menu: name}
|
2016-03-10 04:31:12 -05:00
|
|
|
if menu != nil {
|
|
|
|
jww.DEBUG.Printf("found menu: %q, in %q\n", name, p.Title)
|
|
|
|
ime, err := cast.ToStringMapE(menu)
|
|
|
|
if err != nil {
|
|
|
|
jww.ERROR.Printf("unable to process menus for %q: %s", p.Title, err)
|
|
|
|
}
|
2015-02-04 15:27:27 -05:00
|
|
|
|
2016-03-22 19:29:39 -04:00
|
|
|
menuEntry.marshallMap(ime)
|
2015-02-04 15:27:27 -05:00
|
|
|
}
|
2015-03-07 06:25:38 -05:00
|
|
|
p.pageMenus[name] = &menuEntry
|
2015-02-04 15:27:27 -05:00
|
|
|
}
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
2015-02-04 15:27:27 -05:00
|
|
|
})
|
2014-04-23 02:59:19 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
return p.pageMenus
|
2014-04-23 02:59:19 -04:00
|
|
|
}
|
|
|
|
|
2013-09-03 15:41:13 -04:00
|
|
|
func (p *Page) Render(layout ...string) template.HTML {
|
2015-07-28 19:19:29 -04:00
|
|
|
var l []string
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
if len(layout) > 0 {
|
2015-07-28 19:19:29 -04:00
|
|
|
l = layouts(p.Type(), layout[0])
|
|
|
|
} else {
|
2015-11-02 11:24:50 -05:00
|
|
|
l = p.layouts()
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2015-07-28 19:19:29 -04:00
|
|
|
return tpl.ExecuteTemplateToHTML(p, l...)
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) guessMarkupType() string {
|
2014-01-29 17:50:31 -05:00
|
|
|
// First try the explicitly set markup from the frontmatter
|
2015-03-07 06:25:38 -05:00
|
|
|
if p.Markup != "" {
|
|
|
|
format := helpers.GuessType(p.Markup)
|
2014-01-29 17:50:31 -05:00
|
|
|
if format != "unknown" {
|
|
|
|
return format
|
|
|
|
}
|
|
|
|
}
|
2013-09-18 17:21:27 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
return helpers.GuessType(p.Source.Ext())
|
2013-12-06 23:56:51 -05:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) parse(reader io.Reader) error {
|
2014-05-01 13:19:51 -04:00
|
|
|
psr, err := parser.ReadFrom(reader)
|
2014-01-29 17:50:31 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-07-04 11:32:55 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
p.renderable = psr.IsRenderable()
|
|
|
|
p.frontmatter = psr.FrontMatter()
|
2015-09-03 06:22:20 -04:00
|
|
|
p.rawContent = psr.Content()
|
|
|
|
|
2014-05-01 13:19:51 -04:00
|
|
|
meta, err := psr.Metadata()
|
2014-05-01 14:11:56 -04:00
|
|
|
if meta != nil {
|
|
|
|
if err != nil {
|
2015-03-07 06:25:38 -05:00
|
|
|
jww.ERROR.Printf("Error parsing page meta data for %s", p.File.Path())
|
2014-05-01 14:11:56 -04:00
|
|
|
jww.ERROR.Println(err)
|
|
|
|
return err
|
|
|
|
}
|
2015-03-07 06:25:38 -05:00
|
|
|
if err = p.update(meta); err != nil {
|
2014-05-01 14:11:56 -04:00
|
|
|
return err
|
|
|
|
}
|
2014-05-01 13:19:51 -04:00
|
|
|
}
|
2013-09-18 13:17:43 -04:00
|
|
|
|
2014-05-01 13:19:51 -04:00
|
|
|
return nil
|
|
|
|
}
|
2013-09-18 13:17:43 -04:00
|
|
|
|
2015-07-02 09:32:57 -04:00
|
|
|
func (p *Page) RawContent() string {
|
|
|
|
return string(p.rawContent)
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) SetSourceContent(content []byte) {
|
|
|
|
p.Source.Content = content
|
2014-05-01 13:19:51 -04:00
|
|
|
}
|
2013-12-06 23:32:00 -05:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) SetSourceMetaData(in interface{}, mark rune) (err error) {
|
2014-05-01 13:19:51 -04:00
|
|
|
by, err := parser.InterfaceToFrontMatter(in, mark)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
2014-05-11 22:34:44 -04:00
|
|
|
by = append(by, '\n')
|
2014-05-01 13:19:51 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
p.Source.Frontmatter = by
|
2013-12-06 23:32:00 -05:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
return nil
|
2013-12-06 23:32:00 -05:00
|
|
|
}
|
2013-08-25 00:27:41 -04:00
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) SafeSaveSourceAs(path string) error {
|
|
|
|
return p.saveSourceAs(path, true)
|
2014-05-02 01:04:48 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) SaveSourceAs(path string) error {
|
|
|
|
return p.saveSourceAs(path, false)
|
2014-05-02 01:04:48 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) saveSourceAs(path string, safe bool) error {
|
2015-01-30 14:42:02 -05:00
|
|
|
b := bp.GetBuffer()
|
|
|
|
defer bp.PutBuffer(b)
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
b.Write(p.Source.Frontmatter)
|
|
|
|
b.Write(p.Source.Content)
|
2014-05-01 13:21:37 -04:00
|
|
|
|
2015-01-30 14:42:02 -05:00
|
|
|
bc := make([]byte, b.Len(), b.Len())
|
|
|
|
copy(bc, b.Bytes())
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
err := p.saveSource(bc, path, safe)
|
2014-05-02 01:04:48 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2014-05-01 13:21:37 -04:00
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) saveSource(by []byte, inpath string, safe bool) (err error) {
|
2014-11-06 11:52:01 -05:00
|
|
|
if !filepath.IsAbs(inpath) {
|
2014-05-01 13:21:37 -04:00
|
|
|
inpath = helpers.AbsPathify(inpath)
|
|
|
|
}
|
|
|
|
jww.INFO.Println("creating", inpath)
|
2014-05-02 01:04:48 -04:00
|
|
|
|
|
|
|
if safe {
|
2014-11-01 11:57:29 -04:00
|
|
|
err = helpers.SafeWriteToDisk(inpath, bytes.NewReader(by), hugofs.SourceFs)
|
2014-05-02 01:04:48 -04:00
|
|
|
} else {
|
2014-11-01 11:57:29 -04:00
|
|
|
err = helpers.WriteToDisk(inpath, bytes.NewReader(by), hugofs.SourceFs)
|
2014-05-02 01:04:48 -04:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-05-01 13:21:37 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) SaveSource() error {
|
|
|
|
return p.SaveSourceAs(p.FullFilePath())
|
2014-05-01 13:21:37 -04:00
|
|
|
}
|
|
|
|
|
2014-11-20 12:32:21 -05:00
|
|
|
func (p *Page) ProcessShortcodes(t tpl.Template) {
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
|
|
|
|
// these short codes aren't used until after Page render,
|
|
|
|
// but processed here to avoid coupling
|
2016-03-17 10:51:52 -04:00
|
|
|
// TODO(bep) Move this and remove p.contentShortCodes
|
|
|
|
if !p.rendered {
|
|
|
|
tmpContent, tmpContentShortCodes, _ := extractAndRenderShortcodes(string(p.rawContent), p, t)
|
|
|
|
p.rawContent = []byte(tmpContent)
|
|
|
|
p.contentShortCodes = tmpContentShortCodes
|
|
|
|
} else {
|
|
|
|
// shortcode template may have changed, rerender
|
|
|
|
p.contentShortCodes = renderShortcodes(p.shortcodes, p, t)
|
|
|
|
}
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
|
2014-01-27 17:16:28 -05:00
|
|
|
}
|
|
|
|
|
2014-11-20 12:39:09 -05:00
|
|
|
// TODO(spf13): Remove this entirely
|
|
|
|
// Here for backwards compatibility & testing. Only works in isolation
|
2015-03-07 06:25:38 -05:00
|
|
|
func (p *Page) Convert() error {
|
2014-11-20 12:39:09 -05:00
|
|
|
var h Handler
|
2015-03-07 06:25:38 -05:00
|
|
|
if p.Markup != "" {
|
|
|
|
h = FindHandler(p.Markup)
|
2014-11-20 12:39:09 -05:00
|
|
|
} else {
|
2015-03-07 06:25:38 -05:00
|
|
|
h = FindHandler(p.File.Extension())
|
2014-11-20 12:39:09 -05:00
|
|
|
}
|
|
|
|
if h != nil {
|
2015-03-07 06:25:38 -05:00
|
|
|
h.PageConvert(p, tpl.T())
|
2014-01-29 17:50:31 -05:00
|
|
|
}
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
|
2014-11-20 12:39:09 -05:00
|
|
|
//// now we know enough to create a summary of the page and count some words
|
2015-03-07 06:25:38 -05:00
|
|
|
p.setSummary()
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
//analyze for raw stats
|
2015-03-07 06:25:38 -05:00
|
|
|
p.analyzePage()
|
Shortcode rewrite, take 2
This commit contains a restructuring and partial rewrite of the shortcode handling.
Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities.
The new flow is:
1. Shortcodes are extracted from page and replaced with placeholders.
2. Shortcodes are processed and rendered
3. Page is processed
4. The placeholders are replaced with the rendered shortcodes
The handling of summaries is also made simpler by this.
This commit also introduces some other chenges:
1. distinction between shortcodes that need further processing and those who do not:
* `{{< >}}`: Typically raw HTML. Will not be processed.
* `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor)
The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go",
which should be easier to understand, give better error messages and perform better.
2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples.
The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning:
* The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not.
* To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner`
Fixes #565
Fixes #480
Fixes #461
And probably some others.
2014-10-27 16:48:30 -04:00
|
|
|
|
2014-01-29 17:50:31 -05:00
|
|
|
return nil
|
2013-07-04 11:32:55 -04:00
|
|
|
}
|
|
|
|
|
2014-05-01 13:21:37 -04:00
|
|
|
func (p *Page) FullFilePath() string {
|
2015-05-31 12:54:50 -04:00
|
|
|
return filepath.Join(p.Dir(), p.LogicalName())
|
2014-05-01 13:21:37 -04:00
|
|
|
}
|
|
|
|
|
2013-09-20 20:24:25 -04:00
|
|
|
func (p *Page) TargetPath() (outfile string) {
|
|
|
|
|
2015-03-18 01:16:54 -04:00
|
|
|
// Always use URL if it's specified
|
|
|
|
if len(strings.TrimSpace(p.URL)) > 2 {
|
|
|
|
outfile = strings.TrimSpace(p.URL)
|
2014-01-29 17:50:31 -05:00
|
|
|
|
|
|
|
if strings.HasSuffix(outfile, "/") {
|
|
|
|
outfile = outfile + "index.html"
|
|
|
|
}
|
2014-12-07 13:48:00 -05:00
|
|
|
outfile = filepath.FromSlash(outfile)
|
2014-01-29 17:50:31 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's a Permalink specification, we use that
|
2014-10-16 20:20:09 -04:00
|
|
|
if override, ok := p.Site.Permalinks[p.Section()]; ok {
|
2014-01-29 17:50:31 -05:00
|
|
|
var err error
|
|
|
|
outfile, err = override.Expand(p)
|
|
|
|
if err == nil {
|
2015-04-09 12:14:26 -04:00
|
|
|
outfile, _ = url.QueryUnescape(outfile)
|
2014-01-29 17:50:31 -05:00
|
|
|
if strings.HasSuffix(outfile, "/") {
|
|
|
|
outfile += "index.html"
|
|
|
|
}
|
2014-12-07 13:48:00 -05:00
|
|
|
outfile = filepath.FromSlash(outfile)
|
2014-01-29 17:50:31 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(strings.TrimSpace(p.Slug)) > 0 {
|
2014-10-16 20:20:09 -04:00
|
|
|
outfile = strings.TrimSpace(p.Slug) + "." + p.Extension()
|
2014-01-29 17:50:31 -05:00
|
|
|
} else {
|
|
|
|
// Fall back to filename
|
2014-10-16 20:20:09 -04:00
|
|
|
outfile = helpers.ReplaceExtension(p.Source.LogicalName(), p.Extension())
|
2014-09-09 16:58:02 -04:00
|
|
|
}
|
|
|
|
|
2015-05-28 17:05:13 -04:00
|
|
|
return filepath.Join(strings.ToLower(helpers.MakePath(p.Source.Dir())), strings.TrimSpace(outfile))
|
2014-10-01 14:26:43 -04:00
|
|
|
}
|