hugo/hugolib/page.go

1431 lines
34 KiB
Go
Raw Normal View History

// Copyright 2016 The Hugo Authors. All rights reserved.
2013-07-04 11:32:55 -04:00
//
2015-11-23 22:16:36 -05:00
// Licensed under the Apache License, Version 2.0 (the "License");
2013-07-04 11:32:55 -04:00
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
2015-11-23 22:16:36 -05:00
// http://www.apache.org/licenses/LICENSE-2.0
2013-07-04 11:32:55 -04:00
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hugolib
import (
2014-01-29 17:50:31 -05:00
"bytes"
"errors"
"fmt"
"reflect"
"github.com/bep/gitmap"
"github.com/mitchellh/mapstructure"
"github.com/spf13/hugo/helpers"
"github.com/spf13/hugo/parser"
"html/template"
"io"
"net/url"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
2015-09-03 06:22:20 -04:00
"unicode/utf8"
"github.com/spf13/cast"
2015-01-30 14:42:02 -05:00
bp "github.com/spf13/hugo/bufferpool"
"github.com/spf13/hugo/hugofs"
"github.com/spf13/hugo/source"
"github.com/spf13/hugo/tpl"
jww "github.com/spf13/jwalterweatherman"
"github.com/spf13/viper"
2013-07-04 11:32:55 -04:00
)
var (
cjk = regexp.MustCompile(`\p{Han}|\p{Hangul}|\p{Hiragana}|\p{Katakana}`)
)
// Kind is the discriminator that identifies the different page types
// in the different page collections. This can, as an example, be used
// to to filter regular pages, find sections etc.
// NOTE: THe exported constants below are used to filter pages from
// templates in the wild, so do not change the values!
type Kind string
const (
KindPage Kind = "page"
// The rest are node types; home page, sections etc.
KindHome Kind = "home"
KindSection Kind = "section"
KindTaxonomy Kind = "taxonomy"
KindTaxonomyTerm Kind = "taxonomyTerm"
// Temporary state.
kindUnknown Kind = "unknown"
// The following are (currently) temporary nodes,
// i.e. nodes we create just to render in isolation.
kindSitemap Kind = "sitemap"
kindRobotsTXT Kind = "robotsTXT"
kind404 Kind = "404"
)
// IsNode returns whether this is an item of one of the list types in Hugo,
// i.e. not a regular content page.
func (k Kind) IsNode() bool {
return k != KindPage
}
// IsHome returns whether this is the home page.
func (k Kind) IsHome() bool {
return k == KindHome
}
// IsPage returns whether this is a regular content page.
func (k Kind) IsPage() bool {
return k == KindPage
}
2013-07-04 11:32:55 -04:00
type Page struct {
// Kind will, for the pages available to the templates, be one of:
// page, home, section, taxonomy and taxonomyTerm.
Kind
// Since Hugo 0.18 we got rid of the Node type. So now all pages are ...
// pages (regular pages, home page, sections etc.).
// Sections etc. will have child pages. These were earlier placed in .Data.Pages,
// but can now be more intuitively also be fetched directly from .Pages.
// This collection will be nil for regular pages.
Pages Pages
Params map[string]interface{}
Content template.HTML
Summary template.HTML
Aliases []string
Status string
Images []Image
Videos []Video
TableOfContents template.HTML
Truncated bool
Draft bool
PublishDate time.Time
ExpiryDate time.Time
Markup string
translations Pages
extension string
contentType string
renderable bool
Layout string
layoutsCalculated []string
linkTitle string
frontmatter []byte
// rawContent isn't "raw" as in the same as in the content file.
// Hugo cares about memory consumption, so we make changes to it to do
// markdown rendering etc., but it is "raw enough" so we can do rebuilds
// when shortcode changes etc.
rawContent []byte
// state telling if this is a "new page" or if we have rendered it previously.
rendered bool
contentShortCodes map[string]func() (string, error)
shortcodes map[string]shortcode
plain string // TODO should be []byte
plainWords []string
plainInit sync.Once
plainWordsInit sync.Once
renderingConfig *helpers.Blackfriday
renderingConfigInit sync.Once
pageMenus PageMenus
pageMenusInit sync.Once
isCJKLanguage bool
2014-01-29 17:50:31 -05:00
PageMeta
Source
Position `json:"-"`
2016-11-02 16:34:19 -04:00
// TODO(bep) np pointer, or remove
2014-01-29 17:50:31 -05:00
Node
GitInfo *gitmap.GitInfo
// This was added as part of getting the Nodes (taxonomies etc.) to work as
// Pages in Hugo 0.18.
// It is deliberately named similar to Section, but not exported (for now).
// We currently have only one level of section in Hugo, but the page can live
// any number of levels down the file path.
// To support taxonomies like /categories/hugo etc. we will need to keep track
// of that information in a general way.
// So, sections represents the path to the content, i.e. a content file or a
// virtual content file in the situations where a taxonomy or a section etc.
// isn't accomanied by one.
sections []string
// TODO(bep) np Site added to page, keep?
site *Site
2013-07-04 11:32:55 -04:00
}
type Source struct {
Frontmatter []byte
Content []byte
source.File
2013-07-04 11:32:55 -04:00
}
type PageMeta struct {
wordCount int
fuzzyWordCount int
readingTime int
pageMetaInit sync.Once
2014-01-29 17:50:31 -05:00
Weight int
2013-07-04 11:32:55 -04:00
}
func (*PageMeta) WordCount() int {
helpers.Deprecated("PageMeta", "WordCount", ".WordCount (on Page)")
return 0
}
func (*PageMeta) FuzzyWordCount() int {
helpers.Deprecated("PageMeta", "FuzzyWordCount", ".FuzzyWordCount (on Page)")
return 0
}
func (*PageMeta) ReadingTime() int {
helpers.Deprecated("PageMeta", "ReadingTime", ".ReadingTime (on Page)")
return 0
}
2013-07-04 11:32:55 -04:00
type Position struct {
Prev *Page
Next *Page
PrevInSection *Page
NextInSection *Page
2013-07-04 11:32:55 -04:00
}
type Pages []*Page
func (ps Pages) FindPagePosByFilePath(inPath string) int {
for i, x := range ps {
if x.Source.Path() == inPath {
return i
}
}
return -1
}
// FindPagePos Given a page, it will find the position in Pages
// will return -1 if not found
func (ps Pages) FindPagePos(page *Page) int {
for i, x := range ps {
if x.Source.Path() == page.Source.Path() {
return i
}
}
return -1
}
func (p *Page) Plain() string {
p.initPlain()
2014-01-29 17:50:31 -05:00
return p.plain
2013-10-15 09:15:52 -04:00
}
func (p *Page) PlainWords() []string {
p.initPlainWords()
return p.plainWords
}
func (p *Page) initPlain() {
p.plainInit.Do(func() {
p.plain = helpers.StripHTML(string(p.Content))
return
})
}
func (p *Page) initPlainWords() {
p.plainWordsInit.Do(func() {
p.plainWords = strings.Fields(p.Plain())
return
})
}
// Param is a convenience method to do lookups in Page's and Site's Params map,
// in that order.
//
// This method is also implemented on Node and SiteInfo.
func (p *Page) Param(key interface{}) (interface{}, error) {
keyStr, err := cast.ToStringE(key)
if err != nil {
return nil, err
}
keyStr = strings.ToLower(keyStr)
if val, ok := p.Params[keyStr]; ok {
return val, nil
}
return p.Site.Params[keyStr], nil
}
func (p *Page) Author() Author {
authors := p.Authors()
for _, author := range authors {
return author
2016-09-11 04:46:56 -04:00
}
return Author{}
}
func (p *Page) Authors() AuthorList {
authorKeys, ok := p.Params["authors"]
if !ok {
return AuthorList{}
}
authors := authorKeys.([]string)
if len(authors) < 1 || len(p.Site.Authors) < 1 {
return AuthorList{}
}
al := make(AuthorList)
for _, author := range authors {
a, ok := p.Site.Authors[author]
if ok {
al[author] = a
}
}
return al
}
func (p *Page) UniqueID() string {
return p.Source.UniqueID()
}
Provide (relative) reference funcs & shortcodes. - `.Ref` and `.RelRef` take a reference (the logical filename for a page, including extension and/or a document fragment ID) and return a permalink (or relative permalink) to the referenced document. - If the reference is a page name (such as `about.md`), the page will be discovered and the permalink will be returned: `/about/` - If the reference is a page name with a fragment (such as `about.md#who`), the page will be discovered and used to add the `page.UniqueID()` to the resulting fragment and permalink: `/about/#who:deadbeef`. - If the reference is a fragment and `.*Ref` has been called from a `Node` or `SiteInfo`, it will be returned as is: `#who`. - If the reference is a fragment and `.*Ref` has been called from a `Page`, it will be returned with the page’s unique ID: `#who:deadbeef`. - `.*Ref` can be called from either `Node`, `SiteInfo` (e.g., `Node.Site`), `Page` objects, or `ShortcodeWithPage` objects in templates. - `.*Ref` cannot be used in content, so two shortcodes have been created to provide the functionality to content: `ref` and `relref`. These are intended to be used within markup, like `[Who]({{% ref about.md#who %}})` or `<a href="{{% ref about.md#who %}}">Who</a>`. - There are also `ref` and `relref` template functions (used to create the shortcodes) that expect a `Page` or `Node` object and the reference string (e.g., `{{ relref . "about.md" }}` or `{{ "about.md" | ref . }}`). It actually looks for `.*Ref` as defined on `Node` or `Page` objects. - Shortcode handling had to use a *differently unique* wrapper in `createShortcodePlaceholder` because of the way that the `ref` and `relref` are intended to be used in content.
2014-11-24 01:15:34 -05:00
func (p *Page) Ref(ref string) (string, error) {
return p.Node.Site.Ref(ref, p)
}
func (p *Page) RelRef(ref string) (string, error) {
return p.Node.Site.RelRef(ref, p)
}
Shortcode rewrite, take 2 This commit contains a restructuring and partial rewrite of the shortcode handling. Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities. The new flow is: 1. Shortcodes are extracted from page and replaced with placeholders. 2. Shortcodes are processed and rendered 3. Page is processed 4. The placeholders are replaced with the rendered shortcodes The handling of summaries is also made simpler by this. This commit also introduces some other chenges: 1. distinction between shortcodes that need further processing and those who do not: * `{{< >}}`: Typically raw HTML. Will not be processed. * `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor) The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go", which should be easier to understand, give better error messages and perform better. 2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples. The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning: * The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not. * To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner` Fixes #565 Fixes #480 Fixes #461 And probably some others.
2014-10-27 16:48:30 -04:00
// for logging
func (p *Page) lineNumRawContentStart() int {
return bytes.Count(p.frontmatter, []byte("\n")) + 1
}
var (
2016-09-19 16:14:15 -04:00
internalSummaryDivider = []byte("HUGOMORE42")
)
Shortcode rewrite, take 2 This commit contains a restructuring and partial rewrite of the shortcode handling. Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities. The new flow is: 1. Shortcodes are extracted from page and replaced with placeholders. 2. Shortcodes are processed and rendered 3. Page is processed 4. The placeholders are replaced with the rendered shortcodes The handling of summaries is also made simpler by this. This commit also introduces some other chenges: 1. distinction between shortcodes that need further processing and those who do not: * `{{< >}}`: Typically raw HTML. Will not be processed. * `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor) The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go", which should be easier to understand, give better error messages and perform better. 2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples. The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning: * The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not. * To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner` Fixes #565 Fixes #480 Fixes #461 And probably some others.
2014-10-27 16:48:30 -04:00
// Returns the page as summary and main if a user defined split is provided.
func (p *Page) setUserDefinedSummaryIfProvided(rawContentCopy []byte) (*summaryContent, error) {
Shortcode rewrite, take 2 This commit contains a restructuring and partial rewrite of the shortcode handling. Prior to this commit rendering of the page content was mingled with handling of the shortcodes. This led to several oddities. The new flow is: 1. Shortcodes are extracted from page and replaced with placeholders. 2. Shortcodes are processed and rendered 3. Page is processed 4. The placeholders are replaced with the rendered shortcodes The handling of summaries is also made simpler by this. This commit also introduces some other chenges: 1. distinction between shortcodes that need further processing and those who do not: * `{{< >}}`: Typically raw HTML. Will not be processed. * `{{% %}}`: Will be processed by the page's markup engine (Markdown or (infuture) Asciidoctor) The above also involves a new shortcode-parser, with lexical scanning inspired by Rob Pike's talk called "Lexical Scanning in Go", which should be easier to understand, give better error messages and perform better. 2. If you want to exclude a shortcode from being processed (for documentation etc.), the inner part of the shorcode must be commented out, i.e. `{{%/* movie 47238zzb */%}}`. See the updated shortcode section in the documentation for further examples. The new parser supports nested shortcodes. This isn't new, but has two related design choices worth mentioning: * The shortcodes will be rendered individually, so If both `{{< >}}` and `{{% %}}` are used in the nested hierarchy, one will be passed through the page's markdown processor, the other not. * To avoid potential costly overhead of always looking far ahead for a possible closing tag, this implementation looks at the template itself, and is branded as a container with inner content if it contains a reference to `.Inner` Fixes #565 Fixes #480 Fixes #461 And probably some others.
2014-10-27 16:48:30 -04:00
sc, err := splitUserDefinedSummaryAndContent(p.Markup, rawContentCopy)
if err != nil {
return nil, err
}
if sc == nil {
// No divider found
return nil, nil
}
p.Truncated = true
if len(sc.content) < 20 {
// only whitespace?
p.Truncated = len(bytes.Trim(sc.content, " \n\r")) > 0
2014-01-29 17:50:31 -05:00
}
p.Summary = helpers.BytesToHTML(sc.summary)
return sc, nil
}
// Make this explicit so there is no doubt about what is what.
type summaryContent struct {
summary []byte
content []byte
contentWithoutSummary []byte
}
func splitUserDefinedSummaryAndContent(markup string, c []byte) (sc *summaryContent, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("summary split failed: %s", r)
}
}()
c = bytes.TrimSpace(c)
startDivider := bytes.Index(c, internalSummaryDivider)
if startDivider == -1 {
return
}
endDivider := startDivider + len(internalSummaryDivider)
endSummary := startDivider
var (
startMarkup []byte
endMarkup []byte
addDiv bool
divStart = []byte("<div class=\"document\">")
)
switch markup {
default:
startMarkup = []byte("<p>")
endMarkup = []byte("</p>")
case "asciidoc":
startMarkup = []byte("<div class=\"paragraph\">")
endMarkup = []byte("</div>")
case "rst":
startMarkup = []byte("<p>")
endMarkup = []byte("</p>")
addDiv = true
}
// Find the closest end/start markup string to the divider
fromStart := -1
fromIdx := bytes.LastIndex(c[:startDivider], startMarkup)
if fromIdx != -1 {
fromStart = startDivider - fromIdx - len(startMarkup)
}
fromEnd := bytes.Index(c[endDivider:], endMarkup)
if fromEnd != -1 && fromEnd <= fromStart {
endSummary = startDivider + fromEnd + len(endMarkup)
} else if fromStart != -1 && fromEnd != -1 {
endSummary = startDivider - fromStart - len(startMarkup)
}
withoutDivider := bytes.TrimSpace(append(c[:startDivider], c[endDivider:]...))
var (
contentWithoutSummary []byte
summary []byte
)
if len(withoutDivider) > 0 {
contentWithoutSummary = bytes.TrimSpace(withoutDivider[endSummary:])
summary = bytes.TrimSpace(withoutDivider[:endSummary])
}
if addDiv {
// For the rst
summary = append(append([]byte(nil), summary...), []byte("</div>")...)
// TODO(bep) include the document class, maybe
contentWithoutSummary = append(divStart, contentWithoutSummary...)
}
if err != nil {
return
}
sc = &summaryContent{
summary: summary,
content: withoutDivider,
contentWithoutSummary: contentWithoutSummary,
}
return
}
func (p *Page) setAutoSummary() error {
var summary string
var truncated bool
if p.isCJKLanguage {
summary, truncated = helpers.TruncateWordsByRune(p.PlainWords(), helpers.SummaryLength)
} else {
summary, truncated = helpers.TruncateWordsToWholeSentence(p.Plain(), helpers.SummaryLength)
}
p.Summary = template.HTML(summary)
p.Truncated = truncated
return nil
}
func (p *Page) renderContent(content []byte) []byte {
var fn helpers.LinkResolverFunc
var fileFn helpers.FileResolverFunc
if p.getRenderingConfig().SourceRelativeLinksEval {
fn = func(ref string) (string, error) {
return p.Node.Site.SourceRelativeLink(ref, p)
}
fileFn = func(ref string) (string, error) {
return p.Node.Site.SourceRelativeLinkFile(ref, p)
}
}
return helpers.RenderBytes(&helpers.RenderingContext{
Content: content, RenderTOC: true, PageFmt: p.determineMarkupType(),
ConfigProvider: p.Language(),
DocumentID: p.UniqueID(), DocumentName: p.Path(),
Config: p.getRenderingConfig(), LinkResolver: fn, FileResolver: fileFn})
}
func (p *Page) getRenderingConfig() *helpers.Blackfriday {
p.renderingConfigInit.Do(func() {
pageParam := cast.ToStringMap(p.GetParam("blackfriday"))
if p.Language() == nil {
panic(fmt.Sprintf("nil language for %s with source lang %s", p.BaseFileName(), p.lang))
}
p.renderingConfig = helpers.NewBlackfriday(p.Language())
if err := mapstructure.Decode(pageParam, p.renderingConfig); err != nil {
jww.FATAL.Printf("Failed to get rendering config for %s:\n%s", p.BaseFileName(), err.Error())
}
})
return p.renderingConfig
}
func newPage(filename string) *Page {
page := Page{
Kind: nodeTypeFromFilename(filename),
contentType: "",
Source: Source{File: *source.NewFile(filename)},
Node: Node{Keywords: []string{}, Sitemap: Sitemap{Priority: -1}},
Params: make(map[string]interface{}),
translations: make(Pages, 0),
sections: sectionsFromFilename(filename),
}
jww.DEBUG.Println("Reading from", page.File.Path())
2014-01-29 17:50:31 -05:00
return &page
}
2013-07-04 11:32:55 -04:00
func (p *Page) IsRenderable() bool {
2014-01-29 17:50:31 -05:00
return p.renderable
}
func (p *Page) Type() string {
if p.contentType != "" {
return p.contentType
2014-01-29 17:50:31 -05:00
}
if x := p.Section(); x != "" {
2014-01-29 17:50:31 -05:00
return x
}
2013-07-04 11:32:55 -04:00
2014-01-29 17:50:31 -05:00
return "page"
2013-07-04 11:32:55 -04:00
}
func (p *Page) Section() string {
return p.Source.Section()
}
func (p *Page) layouts(l ...string) []string {
if len(p.layoutsCalculated) > 0 {
return p.layoutsCalculated
}
// TODO(bep) np taxonomy etc.
switch p.Kind {
case KindHome:
return []string{"index.html", "_default/list.html"}
case KindSection:
section := p.sections[0]
return []string{"section/" + section + ".html", "_default/section.html", "_default/list.html", "indexes/" + section + ".html", "_default/indexes.html"}
case KindTaxonomy:
singular := p.site.taxonomiesPluralSingular[p.sections[0]]
return []string{"taxonomy/" + singular + ".html", "indexes/" + singular + ".html", "_default/taxonomy.html", "_default/list.html"}
case KindTaxonomyTerm:
singular := p.site.taxonomiesPluralSingular[p.sections[0]]
return []string{"taxonomy/" + singular + ".terms.html", "_default/terms.html", "indexes/indexes.html"}
}
// Regular Page handled below
if p.Layout != "" {
return layouts(p.Type(), p.Layout)
2014-01-29 17:50:31 -05:00
}
2014-01-29 17:50:31 -05:00
layout := ""
if len(l) == 0 {
layout = "single"
} else {
layout = l[0]
}
2013-07-04 11:32:55 -04:00
return layouts(p.Type(), layout)
}
2013-07-04 11:32:55 -04:00
2016-11-02 16:34:19 -04:00
// TODO(bep) np consolidate and test these NodeType switches
// rssLayouts returns RSS layouts to use for the RSS version of this page, nil
// if no RSS should be rendered.
func (p *Page) rssLayouts() []string {
switch p.Kind {
case KindHome:
2016-11-02 16:34:19 -04:00
return []string{"rss.xml", "_default/rss.xml", "_internal/_default/rss.xml"}
case KindSection:
2016-11-02 16:34:19 -04:00
section := p.sections[0]
return []string{"section/" + section + ".rss.xml", "_default/rss.xml", "rss.xml", "_internal/_default/rss.xml"}
case KindTaxonomy:
2016-11-02 16:34:19 -04:00
singular := p.site.taxonomiesPluralSingular[p.sections[0]]
return []string{"taxonomy/" + singular + ".rss.xml", "_default/rss.xml", "rss.xml", "_internal/_default/rss.xml"}
case KindTaxonomyTerm:
2016-11-02 16:34:19 -04:00
// No RSS for taxonomy terms
case KindPage:
2016-11-02 16:34:19 -04:00
// No RSS for regular pages
}
return nil
}
func layouts(types string, layout string) (layouts []string) {
2014-01-29 17:50:31 -05:00
t := strings.Split(types, "/")
// Add type/layout.html
2014-01-29 17:50:31 -05:00
for i := range t {
search := t[:len(t)-i]
layouts = append(layouts, fmt.Sprintf("%s/%s.html", strings.ToLower(path.Join(search...)), layout))
2014-01-29 17:50:31 -05:00
}
// Add _default/layout.html
layouts = append(layouts, fmt.Sprintf("_default/%s.html", layout))
// Add theme/type/layout.html & theme/_default/layout.html
for _, l := range layouts {
layouts = append(layouts, "theme/"+l)
}
2014-01-29 17:50:31 -05:00
return
2013-07-04 11:32:55 -04:00
}
func NewPageFrom(buf io.Reader, name string) (*Page, error) {
p, err := NewPage(name)
if err != nil {
return p, err
}
_, err = p.ReadFrom(buf)
return p, err
}
func NewPage(name string) (*Page, error) {
2014-01-29 17:50:31 -05:00
if len(name) == 0 {
return nil, errors.New("Zero length page name")
}
2014-01-29 17:50:31 -05:00
// Create new page
p := newPage(name)
return p, nil
}
func (p *Page) ReadFrom(buf io.Reader) (int64, error) {
2014-01-29 17:50:31 -05:00
// Parse for metadata & body
if err := p.parse(buf); err != nil {
jww.ERROR.Print(err)
return 0, err
2014-01-29 17:50:31 -05:00
}
return int64(len(p.rawContent)), nil
2013-07-04 11:32:55 -04:00
}
func (p *Page) WordCount() int {
p.analyzePage()
return p.wordCount
}
func (p *Page) ReadingTime() int {
p.analyzePage()
return p.readingTime
}
func (p *Page) FuzzyWordCount() int {
p.analyzePage()
return p.fuzzyWordCount
}
2013-07-04 11:32:55 -04:00
func (p *Page) analyzePage() {
p.pageMetaInit.Do(func() {
if p.isCJKLanguage {
p.wordCount = 0
for _, word := range p.PlainWords() {
runeCount := utf8.RuneCountInString(word)
if len(word) == runeCount {
p.wordCount++
} else {
p.wordCount += runeCount
}
}
} else {
p.wordCount = helpers.TotalWords(p.Plain())
2015-09-03 06:22:20 -04:00
}
// TODO(bep) is set in a test. Fix that.
if p.fuzzyWordCount == 0 {
p.fuzzyWordCount = (p.wordCount + 100) / 100 * 100
}
if p.isCJKLanguage {
p.readingTime = (p.wordCount + 500) / 501
} else {
p.readingTime = (p.wordCount + 212) / 213
}
})
2013-07-04 11:32:55 -04:00
}
func (p *Page) permalink() (*url.URL, error) {
baseURL := string(p.Site.BaseURL)
dir := strings.TrimSpace(p.Site.pathSpec.MakePath(filepath.ToSlash(strings.ToLower(p.Source.Dir()))))
pSlug := strings.TrimSpace(p.Site.pathSpec.URLize(p.Slug))
pURL := strings.TrimSpace(p.Site.pathSpec.URLize(p.URLPath.URL))
2014-01-29 17:50:31 -05:00
var permalink string
var err error
if len(pURL) > 0 {
return helpers.MakePermalink(baseURL, pURL), nil
}
if override, ok := p.Site.Permalinks[p.Section()]; ok {
2014-01-29 17:50:31 -05:00
permalink, err = override.Expand(p)
2014-01-29 17:50:31 -05:00
if err != nil {
return nil, err
}
} else {
if len(pSlug) > 0 {
permalink = helpers.URLPrep(viper.GetBool("uglyURLs"), path.Join(dir, p.Slug+"."+p.Extension()))
2014-01-29 17:50:31 -05:00
} else {
t := p.Source.TranslationBaseName()
permalink = helpers.URLPrep(viper.GetBool("uglyURLs"), path.Join(dir, (strings.TrimSpace(t)+"."+p.Extension())))
2014-01-29 17:50:31 -05:00
}
}
permalink = p.addLangPathPrefix(permalink)
return helpers.MakePermalink(baseURL, permalink), nil
}
func (p *Page) Extension() string {
if p.extension != "" {
return p.extension
}
return viper.GetString("defaultExtension")
}
// AllTranslations returns all translations, including the current Page.
func (p *Page) AllTranslations() Pages {
return p.translations
}
// IsTranslated returns whether this content file is translated to
// other language(s).
func (p *Page) IsTranslated() bool {
return len(p.translations) > 1
}
// Translations returns the translations excluding the current Page.
func (p *Page) Translations() Pages {
translations := make(Pages, 0)
for _, t := range p.translations {
if t != p {
translations = append(translations, t)
}
}
return translations
}
2013-10-25 18:37:53 -04:00
func (p *Page) LinkTitle() string {
2014-01-29 17:50:31 -05:00
if len(p.linkTitle) > 0 {
return p.linkTitle
}
return p.Title
2013-10-25 18:37:53 -04:00
}
func (p *Page) shouldBuild() bool {
return shouldBuild(viper.GetBool("buildFuture"), viper.GetBool("buildExpired"),
viper.GetBool("buildDrafts"), p.Draft, p.PublishDate, p.ExpiryDate)
}
func shouldBuild(buildFuture bool, buildExpired bool, buildDrafts bool, Draft bool,
publishDate time.Time, expiryDate time.Time) bool {
if !(buildDrafts || !Draft) {
return false
}
if !buildFuture && !publishDate.IsZero() && publishDate.After(time.Now()) {
return false
}
if !buildExpired && !expiryDate.IsZero() && expiryDate.Before(time.Now()) {
return false
}
return true
}
func (p *Page) IsDraft() bool {
return p.Draft
}
func (p *Page) IsFuture() bool {
if p.PublishDate.IsZero() {
return false
}
2016-05-11 10:04:53 -04:00
return p.PublishDate.After(time.Now())
}
func (p *Page) IsExpired() bool {
if p.ExpiryDate.IsZero() {
return false
}
2016-05-11 10:04:53 -04:00
return p.ExpiryDate.Before(time.Now())
}
func (p *Page) Permalink() (string, error) {
// TODO(bep) np permalink
if p.IsNode() {
return p.Node.Permalink(), nil
}
2014-01-29 17:50:31 -05:00
link, err := p.permalink()
if err != nil {
return "", err
}
return link.String(), nil
2013-07-04 11:32:55 -04:00
}
func (p *Page) URL() string {
if p.URLPath.URL != "" {
// This is the url set in front matter
return p.URLPath.URL
}
// Fall back to the relative permalink.
u, _ := p.RelPermalink()
return u
}
2013-10-02 20:00:21 -04:00
func (p *Page) RelPermalink() (string, error) {
2014-01-29 17:50:31 -05:00
link, err := p.permalink()
if err != nil {
return "", err
}
2013-10-02 20:00:21 -04:00
if viper.GetBool("canonifyURLs") {
// replacements for relpermalink with baseURL on the form http://myhost.com/sub/ will fail later on
// have to return the URL relative from baseURL
relpath, err := helpers.GetRelativePath(link.String(), string(p.Site.BaseURL))
if err != nil {
return "", err
}
return "/" + filepath.ToSlash(relpath), nil
}
2014-01-29 17:50:31 -05:00
link.Scheme = ""
link.Host = ""
link.User = nil
link.Opaque = ""
return link.String(), nil
2013-10-02 20:00:21 -04:00
}
var ErrHasDraftAndPublished = errors.New("both draft and published parameters were found in page's frontmatter")
func (p *Page) update(f interface{}) error {
if f == nil {
return fmt.Errorf("no metadata found")
}
2014-01-29 17:50:31 -05:00
m := f.(map[string]interface{})
var err error
var draft, published, isCJKLanguage *bool
2014-01-29 17:50:31 -05:00
for k, v := range m {
loki := strings.ToLower(k)
switch loki {
case "title":
p.Title = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "linktitle":
p.linkTitle = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "description":
p.Description = cast.ToString(v)
p.Params["description"] = p.Description
2014-01-29 17:50:31 -05:00
case "slug":
p.Slug = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "url":
if url := cast.ToString(v); strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
return fmt.Errorf("Only relative URLs are supported, %v provided", url)
2014-01-29 17:50:31 -05:00
}
p.URLPath.URL = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "type":
p.contentType = cast.ToString(v)
case "extension", "ext":
p.extension = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "keywords":
p.Keywords = cast.ToStringSlice(v)
case "date":
p.Date, err = cast.ToTimeE(v)
if err != nil {
jww.ERROR.Printf("Failed to parse date '%v' in page %s", v, p.File.Path())
}
case "lastmod":
p.Lastmod, err = cast.ToTimeE(v)
if err != nil {
jww.ERROR.Printf("Failed to parse lastmod '%v' in page %s", v, p.File.Path())
}
case "publishdate", "pubdate":
p.PublishDate, err = cast.ToTimeE(v)
if err != nil {
jww.ERROR.Printf("Failed to parse publishdate '%v' in page %s", v, p.File.Path())
}
2016-05-11 10:04:53 -04:00
case "expirydate", "unpublishdate":
p.ExpiryDate, err = cast.ToTimeE(v)
if err != nil {
jww.ERROR.Printf("Failed to parse expirydate '%v' in page %s", v, p.File.Path())
}
2014-01-29 17:50:31 -05:00
case "draft":
draft = new(bool)
*draft = cast.ToBool(v)
case "published": // Intentionally undocumented
published = new(bool)
2015-08-30 18:51:25 -04:00
*published = cast.ToBool(v)
2014-01-29 17:50:31 -05:00
case "layout":
p.Layout = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "markup":
p.Markup = cast.ToString(v)
2014-01-29 17:50:31 -05:00
case "weight":
p.Weight = cast.ToInt(v)
2014-01-29 17:50:31 -05:00
case "aliases":
p.Aliases = cast.ToStringSlice(v)
for _, alias := range p.Aliases {
2014-01-29 17:50:31 -05:00
if strings.HasPrefix(alias, "http://") || strings.HasPrefix(alias, "https://") {
return fmt.Errorf("Only relative aliases are supported, %v provided", alias)
}
}
case "status":
p.Status = cast.ToString(v)
2014-05-06 11:02:56 -04:00
case "sitemap":
p.Sitemap = parseSitemap(cast.ToStringMap(v))
case "iscjklanguage":
isCJKLanguage = new(bool)
*isCJKLanguage = cast.ToBool(v)
2014-01-29 17:50:31 -05:00
default:
// If not one of the explicit values, store in Params
switch vv := v.(type) {
case bool:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
case string:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
case int64, int32, int16, int8, int:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
case float64, float32:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
case time.Time:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
default: // handle array of strings as well
switch vvv := vv.(type) {
case []interface{}:
if len(vvv) > 0 {
switch vvv[0].(type) {
case map[interface{}]interface{}: // Proper parsing structured array from YAML based FrontMatter
p.Params[loki] = vvv
case map[string]interface{}: // Proper parsing structured array from JSON based FrontMatter
p.Params[loki] = vvv
default:
a := make([]string, len(vvv))
for i, u := range vvv {
a[i] = cast.ToString(u)
}
p.Params[loki] = a
}
} else {
p.Params[loki] = []string{}
2014-01-29 17:50:31 -05:00
}
default:
p.Params[loki] = vv
2014-01-29 17:50:31 -05:00
}
}
}
}
if draft != nil && published != nil {
p.Draft = *draft
jww.ERROR.Printf("page %s has both draft and published settings in its frontmatter. Using draft.", p.File.Path())
return ErrHasDraftAndPublished
} else if draft != nil {
p.Draft = *draft
} else if published != nil {
p.Draft = !*published
}
if p.Date.IsZero() && viper.GetBool("useModTimeAsFallback") {
fi, err := hugofs.Source().Stat(filepath.Join(helpers.AbsPathify(viper.GetString("contentDir")), p.File.Path()))
if err == nil {
p.Date = fi.ModTime()
}
}
if p.Lastmod.IsZero() {
p.Lastmod = p.Date
}
if isCJKLanguage != nil {
p.isCJKLanguage = *isCJKLanguage
} else if viper.GetBool("hasCJKLanguage") {
if cjk.Match(p.rawContent) {
p.isCJKLanguage = true
} else {
p.isCJKLanguage = false
}
}
2014-01-29 17:50:31 -05:00
return nil
2013-07-04 11:32:55 -04:00
}
func (p *Page) GetParam(key string) interface{} {
return p.getParam(key, true)
}
func (p *Page) getParam(key string, stringToLower bool) interface{} {
v := p.Params[strings.ToLower(key)]
2014-01-29 17:50:31 -05:00
if v == nil {
return nil
}
switch val := v.(type) {
2014-01-29 17:50:31 -05:00
case bool:
return val
case string:
if stringToLower {
return strings.ToLower(val)
}
return val
2014-01-29 17:50:31 -05:00
case int64, int32, int16, int8, int:
return cast.ToInt(v)
2014-01-29 17:50:31 -05:00
case float64, float32:
return cast.ToFloat64(v)
case time.Time:
return val
2014-01-29 17:50:31 -05:00
case []string:
if stringToLower {
return helpers.SliceToLower(val)
}
return v
case map[string]interface{}: // JSON and TOML
return v
case map[interface{}]interface{}: // YAML
return v
2014-01-29 17:50:31 -05:00
}
jww.ERROR.Printf("GetParam(\"%s\"): Unknown type %s\n", key, reflect.TypeOf(v))
2014-01-29 17:50:31 -05:00
return nil
2013-07-04 11:32:55 -04:00
}
func (p *Page) HasMenuCurrent(menu string, me *MenuEntry) bool {
// TODO(bep) np menu
if p.IsNode() {
return p.Node.HasMenuCurrent(menu, me)
}
menus := p.Menus()
sectionPagesMenu := helpers.Config().GetString("SectionPagesMenu")
// page is labeled as "shadow-member" of the menu with the same identifier as the section
if sectionPagesMenu != "" && p.Section() != "" && sectionPagesMenu == menu && p.Section() == me.Identifier {
return true
}
if m, ok := menus[menu]; ok {
if me.HasChildren() {
for _, child := range me.Children {
if child.IsEqual(m) {
return true
}
if p.HasMenuCurrent(menu, child) {
return true
}
}
}
}
return false
}
func (p *Page) IsMenuCurrent(menu string, inme *MenuEntry) bool {
// TODO(bep) np menu
if p.IsNode() {
return p.Node.IsMenuCurrent(menu, inme)
}
menus := p.Menus()
if me, ok := menus[menu]; ok {
return me.IsEqual(inme)
}
return false
}
func (p *Page) Menus() PageMenus {
p.pageMenusInit.Do(func() {
p.pageMenus = PageMenus{}
if ms, ok := p.Params["menu"]; ok {
link, _ := p.RelPermalink()
me := MenuEntry{Name: p.LinkTitle(), Weight: p.Weight, URL: link}
2015-02-04 15:27:27 -05:00
// Could be the name of the menu to attach it to
mname, err := cast.ToStringE(ms)
2015-02-04 15:27:27 -05:00
if err == nil {
me.Menu = mname
p.pageMenus[mname] = &me
2015-02-04 15:27:27 -05:00
return
}
2015-02-04 15:27:27 -05:00
// Could be a slice of strings
mnames, err := cast.ToStringSliceE(ms)
2015-02-04 15:27:27 -05:00
if err == nil {
for _, mname := range mnames {
me.Menu = mname
p.pageMenus[mname] = &me
2015-02-04 15:27:27 -05:00
}
return
2015-02-04 15:27:27 -05:00
}
2015-02-04 15:27:27 -05:00
// Could be a structured menu entry
menus, err := cast.ToStringMapE(ms)
if err != nil {
jww.ERROR.Printf("unable to process menus for %q\n", p.Title)
}
2015-02-04 15:27:27 -05:00
for name, menu := range menus {
menuEntry := MenuEntry{Name: p.LinkTitle(), URL: link, Weight: p.Weight, Menu: name}
if menu != nil {
jww.DEBUG.Printf("found menu: %q, in %q\n", name, p.Title)
ime, err := cast.ToStringMapE(menu)
if err != nil {
jww.ERROR.Printf("unable to process menus for %q: %s", p.Title, err)
}
2015-02-04 15:27:27 -05:00
2016-03-22 19:29:39 -04:00
menuEntry.marshallMap(ime)
2015-02-04 15:27:27 -05:00
}
p.pageMenus[name] = &menuEntry
2015-02-04 15:27:27 -05:00
}
}
2015-02-04 15:27:27 -05:00
})
return p.pageMenus
}
func (p *Page) Render(layout ...string) template.HTML {
var l []string
2013-07-04 11:32:55 -04:00
2014-01-29 17:50:31 -05:00
if len(layout) > 0 {
l = layouts(p.Type(), layout[0])
} else {
l = p.layouts()
2014-01-29 17:50:31 -05:00
}
2013-07-04 11:32:55 -04:00
return tpl.ExecuteTemplateToHTML(p, l...)
2013-07-04 11:32:55 -04:00
}
func (p *Page) determineMarkupType() string {
// Try markup explicitly set in the frontmatter
p.Markup = helpers.GuessType(p.Markup)
if p.Markup == "unknown" {
// Fall back to file extension (might also return "unknown")
p.Markup = helpers.GuessType(p.Source.Ext())
2014-01-29 17:50:31 -05:00
}
return p.Markup
}
func (p *Page) parse(reader io.Reader) error {
psr, err := parser.ReadFrom(reader)
2014-01-29 17:50:31 -05:00
if err != nil {
return err
}
2013-07-04 11:32:55 -04:00
p.renderable = psr.IsRenderable()
p.frontmatter = psr.FrontMatter()
p.rawContent = psr.Content()
p.lang = p.Source.File.Lang()
meta, err := psr.Metadata()
if meta != nil {
if err != nil {
jww.ERROR.Printf("Error parsing page meta data for %s", p.File.Path())
jww.ERROR.Println(err)
return err
}
if err = p.update(meta); err != nil {
return err
}
}
return nil
}
func (p *Page) RawContent() string {
return string(p.rawContent)
}
func (p *Page) SetSourceContent(content []byte) {
p.Source.Content = content
}
func (p *Page) SetSourceMetaData(in interface{}, mark rune) (err error) {
// See https://github.com/spf13/hugo/issues/2458
defer func() {
if r := recover(); r != nil {
var ok bool
err, ok = r.(error)
if !ok {
err = fmt.Errorf("error from marshal: %v", r)
}
}
}()
var by []byte
by, err = parser.InterfaceToFrontMatter(in, mark)
if err != nil {
return
2014-01-29 17:50:31 -05:00
}
by = append(by, '\n')
p.Source.Frontmatter = by
return
}
2013-08-25 00:27:41 -04:00
func (p *Page) SafeSaveSourceAs(path string) error {
return p.saveSourceAs(path, true)
2014-05-02 01:04:48 -04:00
}
func (p *Page) SaveSourceAs(path string) error {
return p.saveSourceAs(path, false)
2014-05-02 01:04:48 -04:00
}
func (p *Page) saveSourceAs(path string, safe bool) error {
2015-01-30 14:42:02 -05:00
b := bp.GetBuffer()
defer bp.PutBuffer(b)
b.Write(p.Source.Frontmatter)
b.Write(p.Source.Content)
2015-01-30 14:42:02 -05:00
bc := make([]byte, b.Len(), b.Len())
copy(bc, b.Bytes())
err := p.saveSource(bc, path, safe)
2014-05-02 01:04:48 -04:00
if err != nil {
return err
}
return nil
}
func (p *Page) saveSource(by []byte, inpath string, safe bool) (err error) {
2014-11-06 11:52:01 -05:00
if !filepath.IsAbs(inpath) {
inpath = helpers.AbsPathify(inpath)
}
jww.INFO.Println("creating", inpath)
2014-05-02 01:04:48 -04:00
if safe {
err = helpers.SafeWriteToDisk(inpath, bytes.NewReader(by), hugofs.Source())
2014-05-02 01:04:48 -04:00
} else {
err = helpers.WriteToDisk(inpath, bytes.NewReader(by), hugofs.Source())
2014-05-02 01:04:48 -04:00
}
if err != nil {
return
}
return nil
}
func (p *Page) SaveSource() error {
return p.SaveSourceAs(p.FullFilePath())
}
func (p *Page) ProcessShortcodes(t tpl.Template) {
tmpContent, tmpContentShortCodes, _ := extractAndRenderShortcodes(string(p.rawContent), p, t)
p.rawContent = []byte(tmpContent)
p.contentShortCodes = tmpContentShortCodes
}
func (p *Page) FullFilePath() string {
2015-05-31 12:54:50 -04:00
return filepath.Join(p.Dir(), p.LogicalName())
}
func (p *Page) TargetPath() (outfile string) {
// TODO(bep) np
switch p.Kind {
case KindHome:
return p.addLangFilepathPrefix(helpers.FilePathSeparator)
case KindSection:
return p.addLangFilepathPrefix(p.sections[0])
case KindTaxonomy:
return p.addLangFilepathPrefix(filepath.Join(p.sections...))
case KindTaxonomyTerm:
return p.addLangFilepathPrefix(filepath.Join(p.sections...))
}
// Always use URL if it's specified
if len(strings.TrimSpace(p.URLPath.URL)) > 2 {
outfile = strings.TrimSpace(p.URLPath.URL)
2014-01-29 17:50:31 -05:00
if strings.HasSuffix(outfile, "/") {
outfile = outfile + "index.html"
}
outfile = filepath.FromSlash(outfile)
2014-01-29 17:50:31 -05:00
return
}
// If there's a Permalink specification, we use that
if override, ok := p.Site.Permalinks[p.Section()]; ok {
2014-01-29 17:50:31 -05:00
var err error
outfile, err = override.Expand(p)
if err == nil {
outfile, _ = url.QueryUnescape(outfile)
2014-01-29 17:50:31 -05:00
if strings.HasSuffix(outfile, "/") {
outfile += "index.html"
}
outfile = filepath.FromSlash(outfile)
outfile = p.addLangFilepathPrefix(outfile)
2014-01-29 17:50:31 -05:00
return
}
}
if len(strings.TrimSpace(p.Slug)) > 0 {
outfile = strings.TrimSpace(p.Slug) + "." + p.Extension()
2014-01-29 17:50:31 -05:00
} else {
// Fall back to filename
outfile = (p.Source.TranslationBaseName() + "." + p.Extension())
2014-09-09 16:58:02 -04:00
}
return p.addLangFilepathPrefix(filepath.Join(strings.ToLower(
p.Site.pathSpec.MakePath(p.Source.Dir())), strings.TrimSpace(outfile)))
}
// Pre render prepare steps
func (p *Page) prepareLayouts() error {
// TODO(bep): Check the IsRenderable logic.
if p.Kind == KindPage {
var layouts []string
if !p.IsRenderable() {
self := "__" + p.TargetPath()
_, err := p.Site.owner.tmpl.GetClone().New(self).Parse(string(p.Content))
if err != nil {
return err
}
layouts = append(layouts, self)
} else {
layouts = append(layouts, p.layouts()...)
layouts = append(layouts, "_default/single.html")
}
p.layoutsCalculated = layouts
}
return nil
}
// TODO(bep) np naming, move some
func (p *Page) prepareData(s *Site) error {
var pages Pages
p.Data = make(map[string]interface{})
switch p.Kind {
case KindPage:
case KindHome:
pages = s.findPagesByNodeTypeNotIn(KindHome, s.Pages)
case KindSection:
sectionData, ok := s.Sections[p.sections[0]]
if !ok {
return fmt.Errorf("Data for section %s not found", p.Section())
}
pages = sectionData.Pages()
case KindTaxonomy:
plural := p.sections[0]
term := p.sections[1]
singular := s.taxonomiesPluralSingular[plural]
taxonomy := s.Taxonomies[plural].Get(term)
p.Data[singular] = taxonomy
p.Data["Singular"] = singular
p.Data["Plural"] = plural
pages = taxonomy.Pages()
case KindTaxonomyTerm:
plural := p.sections[0]
singular := s.taxonomiesPluralSingular[plural]
p.Data["Singular"] = singular
p.Data["Plural"] = plural
p.Data["Terms"] = s.Taxonomies[plural]
// keep the following just for legacy reasons
p.Data["OrderedIndex"] = p.Data["Terms"]
p.Data["Index"] = p.Data["Terms"]
}
p.Data["Pages"] = pages
p.Pages = pages
// Now we know enough to set missing dates on home page etc.
p.updatePageDates()
return nil
}
func (p *Page) updatePageDates() {
// TODO(bep) np there is a potential issue with page sorting for home pages
// etc. without front matter dates set, but let us wrap the head around
// that in another time.
if !p.Kind.IsNode() {
return
}
if !p.Date.IsZero() {
if p.Lastmod.IsZero() {
p.Lastmod = p.Date
}
return
} else if !p.Lastmod.IsZero() {
if p.Date.IsZero() {
p.Date = p.Lastmod
}
return
}
// Set it to the first non Zero date in children
var foundDate, foundLastMod bool
for _, child := range p.Pages {
if !child.Date.IsZero() {
p.Date = child.Date
foundDate = true
}
if !child.Lastmod.IsZero() {
p.Lastmod = child.Lastmod
foundLastMod = true
}
if foundDate && foundLastMod {
break
}
}
}
// Page constains some sync.Once which have a mutex, so we cannot just
// copy the Page by value. So for the situations where we need a copy,
// the paginators etc., we do it manually here.
// TODO(bep) np do better
func (p *Page) copy() *Page {
c := &Page{Kind: p.Kind, Node: Node{Site: p.Site}}
c.Title = p.Title
c.Data = p.Data
c.Date = p.Date
c.Lastmod = p.Lastmod
c.language = p.language
c.lang = p.lang
c.URLPath = p.URLPath
return c
}