Rewrite markdown rendering to blackfriday v2 and rewrite orgmode rendering to go-org (#8560)

* Rewrite markdown rendering to blackfriday v2.0

* Fix style

* Fix go mod with golang 1.13

* Fix blackfriday v2 import

* Inital orgmode renderer migration to go-org

* Vendor go-org dependency

* Ignore errors :/

* Update go-org to latest version

* Update test

* Fix go-org test

* Remove unneeded code

* Fix comments

* Fix markdown test

* Fix blackfriday regression rendering HTML block
This commit is contained in:
Lauris BH 2019-10-31 03:06:25 +02:00 committed by zeripath
parent 690a8ec502
commit 086a46994a
55 changed files with 5769 additions and 3732 deletions

21
vendor/github.com/niklasfasching/go-org/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 Niklas Fasching
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

84
vendor/github.com/niklasfasching/go-org/org/block.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
package org
import (
"regexp"
"strings"
"unicode"
)
type Block struct {
Name string
Parameters []string
Children []Node
}
type Example struct {
Children []Node
}
var exampleLineRegexp = regexp.MustCompile(`^(\s*):(\s(.*)|\s*$)`)
var beginBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+BEGIN_(\w+)(.*)`)
var endBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+END_(\w+)`)
func lexBlock(line string) (token, bool) {
if m := beginBlockRegexp.FindStringSubmatch(line); m != nil {
return token{"beginBlock", len(m[1]), strings.ToUpper(m[2]), m}, true
} else if m := endBlockRegexp.FindStringSubmatch(line); m != nil {
return token{"endBlock", len(m[1]), strings.ToUpper(m[2]), m}, true
}
return nilToken, false
}
func lexExample(line string) (token, bool) {
if m := exampleLineRegexp.FindStringSubmatch(line); m != nil {
return token{"example", len(m[1]), m[3], m}, true
}
return nilToken, false
}
func isRawTextBlock(name string) bool { return name == "SRC" || name == "EXAMPLE" || name == "EXPORT" }
func (d *Document) parseBlock(i int, parentStop stopFn) (int, Node) {
t, start := d.tokens[i], i
name, parameters := t.content, strings.Fields(t.matches[3])
trim := trimIndentUpTo(d.tokens[i].lvl)
stop := func(d *Document, i int) bool {
return i >= len(d.tokens) || (d.tokens[i].kind == "endBlock" && d.tokens[i].content == name)
}
block, i := Block{name, parameters, nil}, i+1
if isRawTextBlock(name) {
rawText := ""
for ; !stop(d, i); i++ {
rawText += trim(d.tokens[i].matches[0]) + "\n"
}
block.Children = d.parseRawInline(rawText)
} else {
consumed, nodes := d.parseMany(i, stop)
block.Children = nodes
i += consumed
}
if i < len(d.tokens) && d.tokens[i].kind == "endBlock" && d.tokens[i].content == name {
return i + 1 - start, block
}
return 0, nil
}
func (d *Document) parseExample(i int, parentStop stopFn) (int, Node) {
example, start := Example{}, i
for ; !parentStop(d, i) && d.tokens[i].kind == "example"; i++ {
example.Children = append(example.Children, Text{d.tokens[i].content, true})
}
return i - start, example
}
func trimIndentUpTo(max int) func(string) string {
return func(line string) string {
i := 0
for ; i < len(line) && i < max && unicode.IsSpace(rune(line[i])); i++ {
}
return line[i:]
}
}
func (n Example) String() string { return orgWriter.nodesAsString(n) }
func (n Block) String() string { return orgWriter.nodesAsString(n) }

260
vendor/github.com/niklasfasching/go-org/org/document.go generated vendored Normal file
View file

@ -0,0 +1,260 @@
// Package org is an Org mode syntax processor.
//
// It parses plain text into an AST and can export it as HTML or pretty printed Org mode syntax.
// Further export formats can be defined using the Writer interface.
//
// You probably want to start with something like this:
// input := strings.NewReader("Your Org mode input")
// html, err := org.New().Parse(input, "./").Write(org.NewHTMLWriter())
// if err != nil {
// log.Fatalf("Something went wrong: %s", err)
// }
// log.Print(html)
package org
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
)
type Configuration struct {
MaxEmphasisNewLines int // Maximum number of newlines inside an emphasis. See org-emphasis-regexp-components newline.
AutoLink bool // Try to convert text passages that look like hyperlinks into hyperlinks.
DefaultSettings map[string]string // Default values for settings that are overriden by setting the same key in BufferSettings.
Log *log.Logger // Log is used to print warnings during parsing.
ReadFile func(filename string) ([]byte, error) // ReadFile is used to read e.g. #+INCLUDE files.
}
// Document contains the parsing results and a pointer to the Configuration.
type Document struct {
*Configuration
Path string // Path of the file containing the parse input - used to resolve relative paths during parsing (e.g. INCLUDE).
tokens []token
Nodes []Node
NamedNodes map[string]Node
Outline Outline // Outline is a Table Of Contents for the document and contains all sections (headline + content).
BufferSettings map[string]string // Settings contains all settings that were parsed from keywords.
Error error
}
// Node represents a parsed node of the document.
type Node interface {
String() string // String returns the pretty printed Org mode string for the node (see OrgWriter).
}
type lexFn = func(line string) (t token, ok bool)
type parseFn = func(*Document, int, stopFn) (int, Node)
type stopFn = func(*Document, int) bool
type token struct {
kind string
lvl int
content string
matches []string
}
var lexFns = []lexFn{
lexHeadline,
lexDrawer,
lexBlock,
lexList,
lexTable,
lexHorizontalRule,
lexKeywordOrComment,
lexFootnoteDefinition,
lexExample,
lexText,
}
var nilToken = token{"nil", -1, "", nil}
var orgWriter = NewOrgWriter()
// New returns a new Configuration with (hopefully) sane defaults.
func New() *Configuration {
return &Configuration{
AutoLink: true,
MaxEmphasisNewLines: 1,
DefaultSettings: map[string]string{
"TODO": "TODO | DONE",
"EXCLUDE_TAGS": "noexport",
"OPTIONS": "toc:t <:t e:t f:t pri:t todo:t tags:t",
},
Log: log.New(os.Stderr, "go-org: ", 0),
ReadFile: ioutil.ReadFile,
}
}
// String returns the pretty printed Org mode string for the given nodes (see OrgWriter).
func String(nodes []Node) string { return orgWriter.nodesAsString(nodes...) }
// Write is called after with an instance of the Writer interface to export a parsed Document into another format.
func (d *Document) Write(w Writer) (out string, err error) {
defer func() {
if recovered := recover(); recovered != nil {
err = fmt.Errorf("could not write output: %s", recovered)
}
}()
if d.Error != nil {
return "", d.Error
} else if d.Nodes == nil {
return "", fmt.Errorf("could not write output: parse was not called")
}
w.Before(d)
WriteNodes(w, d.Nodes...)
w.After(d)
return w.String(), err
}
// Parse parses the input into an AST (and some other helpful fields like Outline).
// To allow method chaining, errors are stored in document.Error rather than being returned.
func (c *Configuration) Parse(input io.Reader, path string) (d *Document) {
outlineSection := &Section{}
d = &Document{
Configuration: c,
Outline: Outline{outlineSection, outlineSection, 0},
BufferSettings: map[string]string{},
NamedNodes: map[string]Node{},
Path: path,
}
defer func() {
if recovered := recover(); recovered != nil {
d.Error = fmt.Errorf("could not parse input: %v", recovered)
}
}()
if d.tokens != nil {
d.Error = fmt.Errorf("parse was called multiple times")
}
d.tokenize(input)
_, nodes := d.parseMany(0, func(d *Document, i int) bool { return i >= len(d.tokens) })
d.Nodes = nodes
return d
}
// Silent disables all logging of warnings during parsing.
func (c *Configuration) Silent() *Configuration {
c.Log = log.New(ioutil.Discard, "", 0)
return c
}
func (d *Document) tokenize(input io.Reader) {
d.tokens = []token{}
scanner := bufio.NewScanner(input)
for scanner.Scan() {
d.tokens = append(d.tokens, tokenize(scanner.Text()))
}
if err := scanner.Err(); err != nil {
d.Error = fmt.Errorf("could not tokenize input: %s", err)
}
}
// Get returns the value for key in BufferSettings or DefaultSettings if key does not exist in the former
func (d *Document) Get(key string) string {
if v, ok := d.BufferSettings[key]; ok {
return v
}
if v, ok := d.DefaultSettings[key]; ok {
return v
}
return ""
}
// GetOption returns the value associated to the export option key
// Currently supported options:
// - < (export timestamps)
// - e (export org entities)
// - f (export footnotes)
// - toc (export table of content)
// - todo (export headline todo status)
// - pri (export headline priority)
// - tags (export headline tags)
// see https://orgmode.org/manual/Export-settings.html for more information
func (d *Document) GetOption(key string) bool {
get := func(settings map[string]string) string {
for _, field := range strings.Fields(settings["OPTIONS"]) {
if strings.HasPrefix(field, key+":") {
return field[len(key)+1:]
}
}
return ""
}
value := get(d.BufferSettings)
if value == "" {
value = get(d.DefaultSettings)
}
switch value {
case "t":
return true
case "nil":
return false
default:
d.Log.Printf("Bad value for export option %s (%s)", key, value)
return false
}
}
func (d *Document) parseOne(i int, stop stopFn) (consumed int, node Node) {
switch d.tokens[i].kind {
case "unorderedList", "orderedList":
consumed, node = d.parseList(i, stop)
case "tableRow", "tableSeparator":
consumed, node = d.parseTable(i, stop)
case "beginBlock":
consumed, node = d.parseBlock(i, stop)
case "beginDrawer":
consumed, node = d.parseDrawer(i, stop)
case "text":
consumed, node = d.parseParagraph(i, stop)
case "example":
consumed, node = d.parseExample(i, stop)
case "horizontalRule":
consumed, node = d.parseHorizontalRule(i, stop)
case "comment":
consumed, node = d.parseComment(i, stop)
case "keyword":
consumed, node = d.parseKeyword(i, stop)
case "headline":
consumed, node = d.parseHeadline(i, stop)
case "footnoteDefinition":
consumed, node = d.parseFootnoteDefinition(i, stop)
}
if consumed != 0 {
return consumed, node
}
d.Log.Printf("Could not parse token %#v: Falling back to treating it as plain text.", d.tokens[i])
m := plainTextRegexp.FindStringSubmatch(d.tokens[i].matches[0])
d.tokens[i] = token{"text", len(m[1]), m[2], m}
return d.parseOne(i, stop)
}
func (d *Document) parseMany(i int, stop stopFn) (int, []Node) {
start, nodes := i, []Node{}
for i < len(d.tokens) && !stop(d, i) {
consumed, node := d.parseOne(i, stop)
i += consumed
nodes = append(nodes, node)
}
return i - start, nodes
}
func (d *Document) addHeadline(headline *Headline) int {
current := &Section{Headline: headline}
d.Outline.last.add(current)
d.Outline.count++
d.Outline.last = current
return d.Outline.count
}
func tokenize(line string) token {
for _, lexFn := range lexFns {
if token, ok := lexFn(line); ok {
return token
}
}
panic(fmt.Sprintf("could not lex line: %s", line))
}

97
vendor/github.com/niklasfasching/go-org/org/drawer.go generated vendored Normal file
View file

@ -0,0 +1,97 @@
package org
import (
"regexp"
"strings"
)
type Drawer struct {
Name string
Children []Node
}
type PropertyDrawer struct {
Properties [][]string
}
var beginDrawerRegexp = regexp.MustCompile(`^(\s*):(\S+):\s*$`)
var endDrawerRegexp = regexp.MustCompile(`^(\s*):END:\s*$`)
var propertyRegexp = regexp.MustCompile(`^(\s*):(\S+):(\s+(.*)$|$)`)
func lexDrawer(line string) (token, bool) {
if m := endDrawerRegexp.FindStringSubmatch(line); m != nil {
return token{"endDrawer", len(m[1]), "", m}, true
} else if m := beginDrawerRegexp.FindStringSubmatch(line); m != nil {
return token{"beginDrawer", len(m[1]), strings.ToUpper(m[2]), m}, true
}
return nilToken, false
}
func (d *Document) parseDrawer(i int, parentStop stopFn) (int, Node) {
name := strings.ToUpper(d.tokens[i].content)
if name == "PROPERTIES" {
return d.parsePropertyDrawer(i, parentStop)
}
drawer, start := Drawer{Name: name}, i
i++
stop := func(d *Document, i int) bool {
if parentStop(d, i) {
return true
}
kind := d.tokens[i].kind
return kind == "beginDrawer" || kind == "endDrawer" || kind == "headline"
}
for {
consumed, nodes := d.parseMany(i, stop)
i += consumed
drawer.Children = append(drawer.Children, nodes...)
if i < len(d.tokens) && d.tokens[i].kind == "beginDrawer" {
p := Paragraph{[]Node{Text{":" + d.tokens[i].content + ":", false}}}
drawer.Children = append(drawer.Children, p)
i++
} else {
break
}
}
if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" {
i++
}
return i - start, drawer
}
func (d *Document) parsePropertyDrawer(i int, parentStop stopFn) (int, Node) {
drawer, start := PropertyDrawer{}, i
i++
stop := func(d *Document, i int) bool {
return parentStop(d, i) || (d.tokens[i].kind != "text" && d.tokens[i].kind != "beginDrawer")
}
for ; !stop(d, i); i++ {
m := propertyRegexp.FindStringSubmatch(d.tokens[i].matches[0])
if m == nil {
return 0, nil
}
k, v := strings.ToUpper(m[2]), strings.TrimSpace(m[4])
drawer.Properties = append(drawer.Properties, []string{k, v})
}
if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" {
i++
} else {
return 0, nil
}
return i - start, drawer
}
func (d *PropertyDrawer) Get(key string) (string, bool) {
if d == nil {
return "", false
}
for _, kvPair := range d.Properties {
if kvPair[0] == key {
return kvPair[1], true
}
}
return "", false
}
func (n Drawer) String() string { return orgWriter.nodesAsString(n) }
func (n PropertyDrawer) String() string { return orgWriter.nodesAsString(n) }

View file

@ -0,0 +1,35 @@
package org
import (
"regexp"
)
type FootnoteDefinition struct {
Name string
Children []Node
Inline bool
}
var footnoteDefinitionRegexp = regexp.MustCompile(`^\[fn:([\w-]+)\](\s+(.+)|\s*$)`)
func lexFootnoteDefinition(line string) (token, bool) {
if m := footnoteDefinitionRegexp.FindStringSubmatch(line); m != nil {
return token{"footnoteDefinition", 0, m[1], m}, true
}
return nilToken, false
}
func (d *Document) parseFootnoteDefinition(i int, parentStop stopFn) (int, Node) {
start, name := i, d.tokens[i].content
d.tokens[i] = tokenize(d.tokens[i].matches[2])
stop := func(d *Document, i int) bool {
return parentStop(d, i) ||
(isSecondBlankLine(d, i) && i > start+1) ||
d.tokens[i].kind == "headline" || d.tokens[i].kind == "footnoteDefinition"
}
consumed, nodes := d.parseMany(i, stop)
definition := FootnoteDefinition{name, nodes, false}
return consumed, definition
}
func (n FootnoteDefinition) String() string { return orgWriter.nodesAsString(n) }

27
vendor/github.com/niklasfasching/go-org/org/fuzz.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// +build gofuzz
package org
import (
"bytes"
"strings"
)
// Fuzz function to be used by https://github.com/dvyukov/go-fuzz
func Fuzz(input []byte) int {
conf := New().Silent()
d := conf.Parse(bytes.NewReader(input), "")
orgOutput, err := d.Write(NewOrgWriter())
if err != nil {
panic(err)
}
htmlOutputA, err := d.Write(NewHTMLWriter())
if err != nil {
panic(err)
}
htmlOutputB, err := conf.Parse(strings.NewReader(orgOutput), "").Write(NewHTMLWriter())
if htmlOutputA != htmlOutputB {
panic("rendered org results in different html than original input")
}
return 0
}

101
vendor/github.com/niklasfasching/go-org/org/headline.go generated vendored Normal file
View file

@ -0,0 +1,101 @@
package org
import (
"fmt"
"regexp"
"strings"
"unicode"
)
type Outline struct {
*Section
last *Section
count int
}
type Section struct {
Headline *Headline
Parent *Section
Children []*Section
}
type Headline struct {
Index int
Lvl int
Status string
Priority string
Properties *PropertyDrawer
Title []Node
Tags []string
Children []Node
}
var headlineRegexp = regexp.MustCompile(`^([*]+)\s+(.*)`)
var tagRegexp = regexp.MustCompile(`(.*?)\s+(:[A-Za-z0-9_@#%:]+:\s*$)`)
func lexHeadline(line string) (token, bool) {
if m := headlineRegexp.FindStringSubmatch(line); m != nil {
return token{"headline", len(m[1]), m[2], m}, true
}
return nilToken, false
}
func (d *Document) parseHeadline(i int, parentStop stopFn) (int, Node) {
t, headline := d.tokens[i], Headline{}
headline.Lvl = t.lvl
headline.Index = d.addHeadline(&headline)
text := t.content
todoKeywords := strings.FieldsFunc(d.Get("TODO"), func(r rune) bool { return unicode.IsSpace(r) || r == '|' })
for _, k := range todoKeywords {
if strings.HasPrefix(text, k) && len(text) > len(k) && unicode.IsSpace(rune(text[len(k)])) {
headline.Status = k
text = text[len(k)+1:]
break
}
}
if len(text) >= 4 && text[0:2] == "[#" && strings.Contains("ABC", text[2:3]) && text[3] == ']' {
headline.Priority = text[2:3]
text = strings.TrimSpace(text[4:])
}
if m := tagRegexp.FindStringSubmatch(text); m != nil {
text = m[1]
headline.Tags = strings.FieldsFunc(m[2], func(r rune) bool { return r == ':' })
}
headline.Title = d.parseInline(text)
stop := func(d *Document, i int) bool {
return parentStop(d, i) || d.tokens[i].kind == "headline" && d.tokens[i].lvl <= headline.Lvl
}
consumed, nodes := d.parseMany(i+1, stop)
if len(nodes) > 0 {
if d, ok := nodes[0].(PropertyDrawer); ok {
headline.Properties = &d
nodes = nodes[1:]
}
}
headline.Children = nodes
return consumed + 1, headline
}
func (h Headline) ID() string {
if customID, ok := h.Properties.Get("CUSTOM_ID"); ok {
return customID
}
return fmt.Sprintf("headline-%d", h.Index)
}
func (parent *Section) add(current *Section) {
if parent.Headline == nil || parent.Headline.Lvl < current.Headline.Lvl {
parent.Children = append(parent.Children, current)
current.Parent = parent
} else {
parent.Parent.add(current)
}
}
func (n Headline) String() string { return orgWriter.nodesAsString(n) }

View file

@ -0,0 +1,437 @@
package org
import "strings"
var htmlEntityReplacer *strings.Replacer
func init() {
htmlEntities = append(htmlEntities,
"---", "—",
"--", "",
"...", "…",
)
htmlEntityReplacer = strings.NewReplacer(htmlEntities...)
}
/*
Generated & copied over using the following elisp
(Setting up go generate seems like a waste for now - I call YAGNI on that one)
(insert (mapconcat
(lambda (entity) (concat "`\\" (car entity) "`, `" (nth 6 entity) "`")) ; entity -> utf8
(remove-if-not 'listp org-entities)
",\n"))
*/
var htmlEntities = []string{
`\Agrave`, `À`,
`\agrave`, `à`,
`\Aacute`, `Á`,
`\aacute`, `á`,
`\Acirc`, `Â`,
`\acirc`, `â`,
`\Amacr`, `Ã`,
`\amacr`, `ã`,
`\Atilde`, `Ã`,
`\atilde`, `ã`,
`\Auml`, `Ä`,
`\auml`, `ä`,
`\Aring`, `Å`,
`\AA`, `Å`,
`\aring`, `å`,
`\AElig`, `Æ`,
`\aelig`, `æ`,
`\Ccedil`, `Ç`,
`\ccedil`, `ç`,
`\Egrave`, `È`,
`\egrave`, `è`,
`\Eacute`, `É`,
`\eacute`, `é`,
`\Ecirc`, `Ê`,
`\ecirc`, `ê`,
`\Euml`, `Ë`,
`\euml`, `ë`,
`\Igrave`, `Ì`,
`\igrave`, `ì`,
`\Iacute`, `Í`,
`\iacute`, `í`,
`\Icirc`, `Î`,
`\icirc`, `î`,
`\Iuml`, `Ï`,
`\iuml`, `ï`,
`\Ntilde`, `Ñ`,
`\ntilde`, `ñ`,
`\Ograve`, `Ò`,
`\ograve`, `ò`,
`\Oacute`, `Ó`,
`\oacute`, `ó`,
`\Ocirc`, `Ô`,
`\ocirc`, `ô`,
`\Otilde`, `Õ`,
`\otilde`, `õ`,
`\Ouml`, `Ö`,
`\ouml`, `ö`,
`\Oslash`, `Ø`,
`\oslash`, `ø`,
`\OElig`, `Œ`,
`\oelig`, `œ`,
`\Scaron`, `Š`,
`\scaron`, `š`,
`\szlig`, `ß`,
`\Ugrave`, `Ù`,
`\ugrave`, `ù`,
`\Uacute`, `Ú`,
`\uacute`, `ú`,
`\Ucirc`, `Û`,
`\ucirc`, `û`,
`\Uuml`, `Ü`,
`\uuml`, `ü`,
`\Yacute`, `Ý`,
`\yacute`, `ý`,
`\Yuml`, `Ÿ`,
`\yuml`, `ÿ`,
`\fnof`, `ƒ`,
`\real`, ``,
`\image`, ``,
`\weierp`, ``,
`\ell`, ``,
`\imath`, `ı`,
`\jmath`, `ȷ`,
`\Alpha`, `Α`,
`\alpha`, `α`,
`\Beta`, `Β`,
`\beta`, `β`,
`\Gamma`, `Γ`,
`\gamma`, `γ`,
`\Delta`, `Δ`,
`\delta`, `δ`,
`\Epsilon`, `Ε`,
`\epsilon`, `ε`,
`\varepsilon`, `ε`,
`\Zeta`, `Ζ`,
`\zeta`, `ζ`,
`\Eta`, `Η`,
`\eta`, `η`,
`\Theta`, `Θ`,
`\theta`, `θ`,
`\thetasym`, `ϑ`,
`\vartheta`, `ϑ`,
`\Iota`, `Ι`,
`\iota`, `ι`,
`\Kappa`, `Κ`,
`\kappa`, `κ`,
`\Lambda`, `Λ`,
`\lambda`, `λ`,
`\Mu`, `Μ`,
`\mu`, `μ`,
`\nu`, `ν`,
`\Nu`, `Ν`,
`\Xi`, `Ξ`,
`\xi`, `ξ`,
`\Omicron`, `Ο`,
`\omicron`, `ο`,
`\Pi`, `Π`,
`\pi`, `π`,
`\Rho`, `Ρ`,
`\rho`, `ρ`,
`\Sigma`, `Σ`,
`\sigma`, `σ`,
`\sigmaf`, `ς`,
`\varsigma`, `ς`,
`\Tau`, `Τ`,
`\Upsilon`, `Υ`,
`\upsih`, `ϒ`,
`\upsilon`, `υ`,
`\Phi`, `Φ`,
`\phi`, `ɸ`,
`\varphi`, `φ`,
`\Chi`, `Χ`,
`\chi`, `χ`,
`\acutex`, `𝑥́`,
`\Psi`, `Ψ`,
`\psi`, `ψ`,
`\tau`, `τ`,
`\Omega`, `Ω`,
`\omega`, `ω`,
`\piv`, `ϖ`,
`\varpi`, `ϖ`,
`\partial`, ``,
`\alefsym`, ``,
`\aleph`, ``,
`\gimel`, ``,
`\beth`, `ב`,
`\dalet`, `ד`,
`\ETH`, `Ð`,
`\eth`, `ð`,
`\THORN`, `Þ`,
`\thorn`, `þ`,
`\dots`, ``,
`\cdots`, ``,
`\hellip`, ``,
`\middot`, `·`,
`\iexcl`, `¡`,
`\iquest`, `¿`,
`\shy`, ``,
`\ndash`, ``,
`\mdash`, ``,
`\quot`, `"`,
`\acute`, `´`,
`\ldquo`, ``,
`\rdquo`, ``,
`\bdquo`, ``,
`\lsquo`, ``,
`\rsquo`, ``,
`\sbquo`, ``,
`\laquo`, `«`,
`\raquo`, `»`,
`\lsaquo`, ``,
`\rsaquo`, ``,
`\circ`, ``,
`\vert`, `|`,
`\vbar`, `|`,
`\brvbar`, `¦`,
`\S`, `§`,
`\sect`, `§`,
`\amp`, `&`,
`\lt`, `<`,
`\gt`, `>`,
`\tilde`, `~`,
`\slash`, `/`,
`\plus`, `+`,
`\under`, `_`,
`\equal`, `=`,
`\asciicirc`, `^`,
`\dagger`, ``,
`\dag`, ``,
`\Dagger`, ``,
`\ddag`, ``,
`\nbsp`, ` `,
`\ensp`, ``,
`\emsp`, ``,
`\thinsp`, ``,
`\curren`, `¤`,
`\cent`, `¢`,
`\pound`, `£`,
`\yen`, `¥`,
`\euro`, ``,
`\EUR`, ``,
`\dollar`, `$`,
`\USD`, `$`,
`\copy`, `©`,
`\reg`, `®`,
`\trade`, ``,
`\minus`, ``,
`\pm`, `±`,
`\plusmn`, `±`,
`\times`, `×`,
`\frasl`, ``,
`\colon`, `:`,
`\div`, `÷`,
`\frac12`, `½`,
`\frac14`, `¼`,
`\frac34`, `¾`,
`\permil`, ``,
`\sup1`, `¹`,
`\sup2`, `²`,
`\sup3`, `³`,
`\radic`, ``,
`\sum`, ``,
`\prod`, ``,
`\micro`, `µ`,
`\macr`, `¯`,
`\deg`, `°`,
`\prime`, ``,
`\Prime`, ``,
`\infin`, ``,
`\infty`, ``,
`\prop`, ``,
`\propto`, ``,
`\not`, `¬`,
`\neg`, `¬`,
`\land`, ``,
`\wedge`, ``,
`\lor`, ``,
`\vee`, ``,
`\cap`, ``,
`\cup`, ``,
`\smile`, ``,
`\frown`, ``,
`\int`, ``,
`\therefore`, ``,
`\there4`, ``,
`\because`, ``,
`\sim`, ``,
`\cong`, ``,
`\simeq`, ``,
`\asymp`, ``,
`\approx`, ``,
`\ne`, ``,
`\neq`, ``,
`\equiv`, ``,
`\triangleq`, ``,
`\le`, ``,
`\leq`, ``,
`\ge`, ``,
`\geq`, ``,
`\lessgtr`, ``,
`\lesseqgtr`, ``,
`\ll`, ``,
`\Ll`, ``,
`\lll`, ``,
`\gg`, ``,
`\Gg`, ``,
`\ggg`, ``,
`\prec`, ``,
`\preceq`, ``,
`\preccurlyeq`, ``,
`\succ`, ``,
`\succeq`, ``,
`\succcurlyeq`, ``,
`\sub`, ``,
`\subset`, ``,
`\sup`, ``,
`\supset`, ``,
`\nsub`, ``,
`\sube`, ``,
`\nsup`, ``,
`\supe`, ``,
`\setminus`, ``,
`\forall`, ``,
`\exist`, ``,
`\exists`, ``,
`\nexist`, ``,
`\nexists`, ``,
`\empty`, ``,
`\emptyset`, ``,
`\isin`, ``,
`\in`, ``,
`\notin`, ``,
`\ni`, ``,
`\nabla`, ``,
`\ang`, ``,
`\angle`, ``,
`\perp`, ``,
`\parallel`, ``,
`\sdot`, ``,
`\cdot`, ``,
`\lceil`, ``,
`\rceil`, ``,
`\lfloor`, ``,
`\rfloor`, ``,
`\lang`, ``,
`\rang`, ``,
`\langle`, ``,
`\rangle`, ``,
`\hbar`, ``,
`\mho`, ``,
`\larr`, ``,
`\leftarrow`, ``,
`\gets`, ``,
`\lArr`, ``,
`\Leftarrow`, ``,
`\uarr`, ``,
`\uparrow`, ``,
`\uArr`, ``,
`\Uparrow`, ``,
`\rarr`, ``,
`\to`, ``,
`\rightarrow`, ``,
`\rArr`, ``,
`\Rightarrow`, ``,
`\darr`, ``,
`\downarrow`, ``,
`\dArr`, ``,
`\Downarrow`, ``,
`\harr`, ``,
`\leftrightarrow`, ``,
`\hArr`, ``,
`\Leftrightarrow`, ``,
`\crarr`, ``,
`\hookleftarrow`, ``,
`\arccos`, `arccos`,
`\arcsin`, `arcsin`,
`\arctan`, `arctan`,
`\arg`, `arg`,
`\cos`, `cos`,
`\cosh`, `cosh`,
`\cot`, `cot`,
`\coth`, `coth`,
`\csc`, `csc`,
`\deg`, `deg`,
`\det`, `det`,
`\dim`, `dim`,
`\exp`, `exp`,
`\gcd`, `gcd`,
`\hom`, `hom`,
`\inf`, `inf`,
`\ker`, `ker`,
`\lg`, `lg`,
`\lim`, `lim`,
`\liminf`, `liminf`,
`\limsup`, `limsup`,
`\ln`, `ln`,
`\log`, `log`,
`\max`, `max`,
`\min`, `min`,
`\Pr`, `Pr`,
`\sec`, `sec`,
`\sin`, `sin`,
`\sinh`, `sinh`,
`\sup`, `sup`,
`\tan`, `tan`,
`\tanh`, `tanh`,
`\bull`, ``,
`\bullet`, ``,
`\star`, ``,
`\lowast`, ``,
`\ast`, `*`,
`\odot`, `ʘ`,
`\oplus`, ``,
`\otimes`, ``,
`\check`, ``,
`\checkmark`, ``,
`\para`, ``,
`\ordf`, `ª`,
`\ordm`, `º`,
`\cedil`, `¸`,
`\oline`, ``,
`\uml`, `¨`,
`\zwnj`, ``,
`\zwj`, ``,
`\lrm`, ``,
`\rlm`, ``,
`\smiley`, ``,
`\blacksmile`, ``,
`\sad`, ``,
`\frowny`, ``,
`\clubs`, ``,
`\clubsuit`, ``,
`\spades`, ``,
`\spadesuit`, ``,
`\hearts`, ``,
`\heartsuit`, ``,
`\diams`, ``,
`\diamondsuit`, ``,
`\diamond`, ``,
`\Diamond`, ``,
`\loz`, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
`\_ `, ``,
}

View file

@ -0,0 +1,504 @@
package org
import (
"fmt"
"html"
"log"
"regexp"
"strings"
"unicode"
h "golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
// HTMLWriter exports an org document into a html document.
type HTMLWriter struct {
ExtendingWriter Writer
HighlightCodeBlock func(source, lang string) string
strings.Builder
document *Document
htmlEscape bool
log *log.Logger
footnotes *footnotes
}
type footnotes struct {
mapping map[string]int
list []*FootnoteDefinition
}
var emphasisTags = map[string][]string{
"/": []string{"<em>", "</em>"},
"*": []string{"<strong>", "</strong>"},
"+": []string{"<del>", "</del>"},
"~": []string{"<code>", "</code>"},
"=": []string{`<code class="verbatim">`, "</code>"},
"_": []string{`<span style="text-decoration: underline;">`, "</span>"},
"_{}": []string{"<sub>", "</sub>"},
"^{}": []string{"<sup>", "</sup>"},
}
var listTags = map[string][]string{
"unordered": []string{"<ul>", "</ul>"},
"ordered": []string{"<ol>", "</ol>"},
"descriptive": []string{"<dl>", "</dl>"},
}
var listItemStatuses = map[string]string{
" ": "unchecked",
"-": "indeterminate",
"X": "checked",
}
var cleanHeadlineTitleForHTMLAnchorRegexp = regexp.MustCompile(`</?a[^>]*>`) // nested a tags are not valid HTML
func NewHTMLWriter() *HTMLWriter {
defaultConfig := New()
return &HTMLWriter{
document: &Document{Configuration: defaultConfig},
log: defaultConfig.Log,
htmlEscape: true,
HighlightCodeBlock: func(source, lang string) string {
return fmt.Sprintf("<div class=\"highlight\">\n<pre>\n%s\n</pre>\n</div>", html.EscapeString(source))
},
footnotes: &footnotes{
mapping: map[string]int{},
},
}
}
func (w *HTMLWriter) emptyClone() *HTMLWriter {
wcopy := *w
wcopy.Builder = strings.Builder{}
return &wcopy
}
func (w *HTMLWriter) nodesAsString(nodes ...Node) string {
tmp := w.emptyClone()
WriteNodes(tmp, nodes...)
return tmp.String()
}
func (w *HTMLWriter) WriterWithExtensions() Writer {
if w.ExtendingWriter != nil {
return w.ExtendingWriter
}
return w
}
func (w *HTMLWriter) Before(d *Document) {
w.document = d
w.log = d.Log
w.WriteOutline(d)
}
func (w *HTMLWriter) After(d *Document) {
w.WriteFootnotes(d)
}
func (w *HTMLWriter) WriteComment(Comment) {}
func (w *HTMLWriter) WritePropertyDrawer(PropertyDrawer) {}
func (w *HTMLWriter) WriteBlock(b Block) {
content := ""
if isRawTextBlock(b.Name) {
exportWriter := w.emptyClone()
exportWriter.htmlEscape = false
WriteNodes(exportWriter, b.Children...)
content = strings.TrimRightFunc(exportWriter.String(), unicode.IsSpace)
} else {
content = w.nodesAsString(b.Children...)
}
switch name := b.Name; {
case name == "SRC":
lang := "text"
if len(b.Parameters) >= 1 {
lang = strings.ToLower(b.Parameters[0])
}
content = w.HighlightCodeBlock(content, lang)
w.WriteString(fmt.Sprintf("<div class=\"src src-%s\">\n%s\n</div>\n", lang, content))
case name == "EXAMPLE":
w.WriteString(`<pre class="example">` + "\n" + content + "\n</pre>\n")
case name == "EXPORT" && len(b.Parameters) >= 1 && strings.ToLower(b.Parameters[0]) == "html":
w.WriteString(content + "\n")
case name == "QUOTE":
w.WriteString("<blockquote>\n" + content + "</blockquote>\n")
case name == "CENTER":
w.WriteString(`<div class="center-block" style="text-align: center; margin-left: auto; margin-right: auto;">` + "\n")
w.WriteString(content + "</div>\n")
default:
w.WriteString(fmt.Sprintf(`<div class="%s-block">`, strings.ToLower(b.Name)) + "\n")
w.WriteString(content + "</div>\n")
}
}
func (w *HTMLWriter) WriteDrawer(d Drawer) {
WriteNodes(w, d.Children...)
}
func (w *HTMLWriter) WriteKeyword(k Keyword) {
if k.Key == "HTML" {
w.WriteString(k.Value + "\n")
}
}
func (w *HTMLWriter) WriteInclude(i Include) {
WriteNodes(w, i.Resolve())
}
func (w *HTMLWriter) WriteFootnoteDefinition(f FootnoteDefinition) {
w.footnotes.updateDefinition(f)
}
func (w *HTMLWriter) WriteFootnotes(d *Document) {
if !w.document.GetOption("f") || len(w.footnotes.list) == 0 {
return
}
w.WriteString(`<div class="footnotes">` + "\n")
w.WriteString(`<hr class="footnotes-separatator">` + "\n")
w.WriteString(`<div class="footnote-definitions">` + "\n")
for i, definition := range w.footnotes.list {
id := i + 1
if definition == nil {
name := ""
for k, v := range w.footnotes.mapping {
if v == i {
name = k
}
}
w.log.Printf("Missing footnote definition for [fn:%s] (#%d)", name, id)
continue
}
w.WriteString(`<div class="footnote-definition">` + "\n")
w.WriteString(fmt.Sprintf(`<sup id="footnote-%d"><a href="#footnote-reference-%d">%d</a></sup>`, id, id, id) + "\n")
w.WriteString(`<div class="footnote-body">` + "\n")
WriteNodes(w, definition.Children...)
w.WriteString("</div>\n</div>\n")
}
w.WriteString("</div>\n</div>\n")
}
func (w *HTMLWriter) WriteOutline(d *Document) {
if w.document.GetOption("toc") && len(d.Outline.Children) != 0 {
w.WriteString("<nav>\n<ul>\n")
for _, section := range d.Outline.Children {
w.writeSection(section)
}
w.WriteString("</ul>\n</nav>\n")
}
}
func (w *HTMLWriter) writeSection(section *Section) {
// NOTE: To satisfy hugo ExtractTOC() check we cannot use `<li>\n` here. Doesn't really matter, just a note.
w.WriteString("<li>")
h := section.Headline
title := cleanHeadlineTitleForHTMLAnchorRegexp.ReplaceAllString(w.nodesAsString(h.Title...), "")
w.WriteString(fmt.Sprintf("<a href=\"#%s\">%s</a>\n", h.ID(), title))
if len(section.Children) != 0 {
w.WriteString("<ul>\n")
for _, section := range section.Children {
w.writeSection(section)
}
w.WriteString("</ul>\n")
}
w.WriteString("</li>\n")
}
func (w *HTMLWriter) WriteHeadline(h Headline) {
for _, excludeTag := range strings.Fields(w.document.Get("EXCLUDE_TAGS")) {
for _, tag := range h.Tags {
if excludeTag == tag {
return
}
}
}
w.WriteString(fmt.Sprintf(`<h%d id="%s">`, h.Lvl, h.ID()) + "\n")
if w.document.GetOption("todo") && h.Status != "" {
w.WriteString(fmt.Sprintf(`<span class="todo">%s</span>`, h.Status) + "\n")
}
if w.document.GetOption("pri") && h.Priority != "" {
w.WriteString(fmt.Sprintf(`<span class="priority">[%s]</span>`, h.Priority) + "\n")
}
WriteNodes(w, h.Title...)
if w.document.GetOption("tags") && len(h.Tags) != 0 {
tags := make([]string, len(h.Tags))
for i, tag := range h.Tags {
tags[i] = fmt.Sprintf(`<span>%s</span>`, tag)
}
w.WriteString("&#xa0;&#xa0;&#xa0;")
w.WriteString(fmt.Sprintf(`<span class="tags">%s</span>`, strings.Join(tags, "&#xa0;")))
}
w.WriteString(fmt.Sprintf("\n</h%d>\n", h.Lvl))
WriteNodes(w, h.Children...)
}
func (w *HTMLWriter) WriteText(t Text) {
if !w.htmlEscape {
w.WriteString(t.Content)
} else if !w.document.GetOption("e") || t.IsRaw {
w.WriteString(html.EscapeString(t.Content))
} else {
w.WriteString(html.EscapeString(htmlEntityReplacer.Replace(t.Content)))
}
}
func (w *HTMLWriter) WriteEmphasis(e Emphasis) {
tags, ok := emphasisTags[e.Kind]
if !ok {
panic(fmt.Sprintf("bad emphasis %#v", e))
}
w.WriteString(tags[0])
WriteNodes(w, e.Content...)
w.WriteString(tags[1])
}
func (w *HTMLWriter) WriteLatexFragment(l LatexFragment) {
w.WriteString(l.OpeningPair)
WriteNodes(w, l.Content...)
w.WriteString(l.ClosingPair)
}
func (w *HTMLWriter) WriteStatisticToken(s StatisticToken) {
w.WriteString(fmt.Sprintf(`<code class="statistic">[%s]</code>`, s.Content))
}
func (w *HTMLWriter) WriteLineBreak(l LineBreak) {
w.WriteString(strings.Repeat("\n", l.Count))
}
func (w *HTMLWriter) WriteExplicitLineBreak(l ExplicitLineBreak) {
w.WriteString("<br>\n")
}
func (w *HTMLWriter) WriteFootnoteLink(l FootnoteLink) {
if !w.document.GetOption("f") {
return
}
i := w.footnotes.add(l)
id := i + 1
w.WriteString(fmt.Sprintf(`<sup class="footnote-reference"><a id="footnote-reference-%d" href="#footnote-%d">%d</a></sup>`, id, id, id))
}
func (w *HTMLWriter) WriteTimestamp(t Timestamp) {
if !w.document.GetOption("<") {
return
}
w.WriteString(`<span class="timestamp">&lt;`)
if t.IsDate {
w.WriteString(t.Time.Format(datestampFormat))
} else {
w.WriteString(t.Time.Format(timestampFormat))
}
if t.Interval != "" {
w.WriteString(" " + t.Interval)
}
w.WriteString(`&gt;</span>`)
}
func (w *HTMLWriter) WriteRegularLink(l RegularLink) {
url := html.EscapeString(l.URL)
if l.Protocol == "file" {
url = url[len("file:"):]
}
description := url
if l.Description != nil {
description = w.nodesAsString(l.Description...)
}
switch l.Kind() {
case "image":
w.WriteString(fmt.Sprintf(`<img src="%s" alt="%s" title="%s" />`, url, description, description))
case "video":
w.WriteString(fmt.Sprintf(`<video src="%s" title="%s">%s</video>`, url, description, description))
default:
w.WriteString(fmt.Sprintf(`<a href="%s">%s</a>`, url, description))
}
}
func (w *HTMLWriter) WriteList(l List) {
tags, ok := listTags[l.Kind]
if !ok {
panic(fmt.Sprintf("bad list kind %#v", l))
}
w.WriteString(tags[0] + "\n")
WriteNodes(w, l.Items...)
w.WriteString(tags[1] + "\n")
}
func (w *HTMLWriter) WriteListItem(li ListItem) {
if li.Status != "" {
w.WriteString(fmt.Sprintf("<li class=\"%s\">\n", listItemStatuses[li.Status]))
} else {
w.WriteString("<li>\n")
}
WriteNodes(w, li.Children...)
w.WriteString("</li>\n")
}
func (w *HTMLWriter) WriteDescriptiveListItem(di DescriptiveListItem) {
if di.Status != "" {
w.WriteString(fmt.Sprintf("<dt class=\"%s\">\n", listItemStatuses[di.Status]))
} else {
w.WriteString("<dt>\n")
}
if len(di.Term) != 0 {
WriteNodes(w, di.Term...)
} else {
w.WriteString("?")
}
w.WriteString("\n</dt>\n")
w.WriteString("<dd>\n")
WriteNodes(w, di.Details...)
w.WriteString("</dd>\n")
}
func (w *HTMLWriter) WriteParagraph(p Paragraph) {
if len(p.Children) == 0 {
return
}
w.WriteString("<p>")
if _, ok := p.Children[0].(LineBreak); !ok {
w.WriteString("\n")
}
WriteNodes(w, p.Children...)
w.WriteString("\n</p>\n")
}
func (w *HTMLWriter) WriteExample(e Example) {
w.WriteString(`<pre class="example">` + "\n")
if len(e.Children) != 0 {
for _, n := range e.Children {
WriteNodes(w, n)
w.WriteString("\n")
}
}
w.WriteString("</pre>\n")
}
func (w *HTMLWriter) WriteHorizontalRule(h HorizontalRule) {
w.WriteString("<hr>\n")
}
func (w *HTMLWriter) WriteNodeWithMeta(n NodeWithMeta) {
out := w.nodesAsString(n.Node)
if p, ok := n.Node.(Paragraph); ok {
if len(p.Children) == 1 && isImageOrVideoLink(p.Children[0]) {
out = w.nodesAsString(p.Children[0])
}
}
for _, attributes := range n.Meta.HTMLAttributes {
out = w.withHTMLAttributes(out, attributes...) + "\n"
}
if len(n.Meta.Caption) != 0 {
caption := ""
for i, ns := range n.Meta.Caption {
if i != 0 {
caption += " "
}
caption += w.nodesAsString(ns...)
}
out = fmt.Sprintf("<figure>\n%s<figcaption>\n%s\n</figcaption>\n</figure>\n", out, caption)
}
w.WriteString(out)
}
func (w *HTMLWriter) WriteNodeWithName(n NodeWithName) {
WriteNodes(w, n.Node)
}
func (w *HTMLWriter) WriteTable(t Table) {
w.WriteString("<table>\n")
beforeFirstContentRow := true
for i, row := range t.Rows {
if row.IsSpecial || len(row.Columns) == 0 {
continue
}
if beforeFirstContentRow {
beforeFirstContentRow = false
if i+1 < len(t.Rows) && len(t.Rows[i+1].Columns) == 0 {
w.WriteString("<thead>\n")
w.writeTableColumns(row.Columns, "th")
w.WriteString("</thead>\n<tbody>\n")
continue
} else {
w.WriteString("<tbody>\n")
}
}
w.writeTableColumns(row.Columns, "td")
}
w.WriteString("</tbody>\n</table>\n")
}
func (w *HTMLWriter) writeTableColumns(columns []Column, tag string) {
w.WriteString("<tr>\n")
for _, column := range columns {
if column.Align == "" {
w.WriteString(fmt.Sprintf("<%s>", tag))
} else {
w.WriteString(fmt.Sprintf(`<%s class="align-%s">`, tag, column.Align))
}
WriteNodes(w, column.Children...)
w.WriteString(fmt.Sprintf("</%s>\n", tag))
}
w.WriteString("</tr>\n")
}
func (w *HTMLWriter) withHTMLAttributes(input string, kvs ...string) string {
if len(kvs)%2 != 0 {
w.log.Printf("withHTMLAttributes: Len of kvs must be even: %#v", kvs)
return input
}
context := &h.Node{Type: h.ElementNode, Data: "body", DataAtom: atom.Body}
nodes, err := h.ParseFragment(strings.NewReader(strings.TrimSpace(input)), context)
if err != nil || len(nodes) != 1 {
w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, nodes, err)
return input
}
out, node := strings.Builder{}, nodes[0]
for i := 0; i < len(kvs)-1; i += 2 {
node.Attr = setHTMLAttribute(node.Attr, strings.TrimPrefix(kvs[i], ":"), kvs[i+1])
}
err = h.Render(&out, nodes[0])
if err != nil {
w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, node, err)
return input
}
return out.String()
}
func setHTMLAttribute(attributes []h.Attribute, k, v string) []h.Attribute {
for i, a := range attributes {
if strings.ToLower(a.Key) == strings.ToLower(k) {
switch strings.ToLower(k) {
case "class", "style":
attributes[i].Val += " " + v
default:
attributes[i].Val = v
}
return attributes
}
}
return append(attributes, h.Attribute{Namespace: "", Key: k, Val: v})
}
func (fs *footnotes) add(f FootnoteLink) int {
if i, ok := fs.mapping[f.Name]; ok && f.Name != "" {
return i
}
fs.list = append(fs.list, f.Definition)
i := len(fs.list) - 1
if f.Name != "" {
fs.mapping[f.Name] = i
}
return i
}
func (fs *footnotes) updateDefinition(f FootnoteDefinition) {
if i, ok := fs.mapping[f.Name]; ok {
fs.list[i] = &f
}
}

357
vendor/github.com/niklasfasching/go-org/org/inline.go generated vendored Normal file
View file

@ -0,0 +1,357 @@
package org
import (
"fmt"
"path"
"regexp"
"strings"
"time"
"unicode"
)
type Text struct {
Content string
IsRaw bool
}
type LineBreak struct{ Count int }
type ExplicitLineBreak struct{}
type StatisticToken struct{ Content string }
type Timestamp struct {
Time time.Time
IsDate bool
Interval string
}
type Emphasis struct {
Kind string
Content []Node
}
type LatexFragment struct {
OpeningPair string
ClosingPair string
Content []Node
}
type FootnoteLink struct {
Name string
Definition *FootnoteDefinition
}
type RegularLink struct {
Protocol string
Description []Node
URL string
AutoLink bool
}
var validURLCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~:/?#[]@!$&'()*+,;="
var autolinkProtocols = regexp.MustCompile(`^(https?|ftp|file)$`)
var imageExtensionRegexp = regexp.MustCompile(`^[.](png|gif|jpe?g|svg|tiff?)$`)
var videoExtensionRegexp = regexp.MustCompile(`^[.](webm|mp4)$`)
var subScriptSuperScriptRegexp = regexp.MustCompile(`^([_^]){([^{}]+?)}`)
var timestampRegexp = regexp.MustCompile(`^<(\d{4}-\d{2}-\d{2})( [A-Za-z]+)?( \d{2}:\d{2})?( \+\d+[dwmy])?>`)
var footnoteRegexp = regexp.MustCompile(`^\[fn:([\w-]*?)(:(.*?))?\]`)
var statisticsTokenRegexp = regexp.MustCompile(`^\[(\d+/\d+|\d+%)\]`)
var latexFragmentRegexp = regexp.MustCompile(`(?s)^\\begin{(\w+)}(.*)\\end{(\w+)}`)
var timestampFormat = "2006-01-02 Mon 15:04"
var datestampFormat = "2006-01-02 Mon"
var latexFragmentPairs = map[string]string{
`\(`: `\)`,
`\[`: `\]`,
`$$`: `$$`,
}
func (d *Document) parseInline(input string) (nodes []Node) {
previous, current := 0, 0
for current < len(input) {
rewind, consumed, node := 0, 0, (Node)(nil)
switch input[current] {
case '^':
consumed, node = d.parseSubOrSuperScript(input, current)
case '_':
consumed, node = d.parseSubScriptOrEmphasis(input, current)
case '*', '/', '+':
consumed, node = d.parseEmphasis(input, current, false)
case '=', '~':
consumed, node = d.parseEmphasis(input, current, true)
case '[':
consumed, node = d.parseOpeningBracket(input, current)
case '<':
consumed, node = d.parseTimestamp(input, current)
case '\\':
consumed, node = d.parseExplicitLineBreakOrLatexFragment(input, current)
case '$':
consumed, node = d.parseLatexFragment(input, current)
case '\n':
consumed, node = d.parseLineBreak(input, current)
case ':':
rewind, consumed, node = d.parseAutoLink(input, current)
current -= rewind
}
if consumed != 0 {
if current > previous {
nodes = append(nodes, Text{input[previous:current], false})
}
if node != nil {
nodes = append(nodes, node)
}
current += consumed
previous = current
} else {
current++
}
}
if previous < len(input) {
nodes = append(nodes, Text{input[previous:], false})
}
return nodes
}
func (d *Document) parseRawInline(input string) (nodes []Node) {
previous, current := 0, 0
for current < len(input) {
if input[current] == '\n' {
consumed, node := d.parseLineBreak(input, current)
if current > previous {
nodes = append(nodes, Text{input[previous:current], true})
}
nodes = append(nodes, node)
current += consumed
previous = current
} else {
current++
}
}
if previous < len(input) {
nodes = append(nodes, Text{input[previous:], true})
}
return nodes
}
func (d *Document) parseLineBreak(input string, start int) (int, Node) {
i := start
for ; i < len(input) && input[i] == '\n'; i++ {
}
return i - start, LineBreak{i - start}
}
func (d *Document) parseExplicitLineBreakOrLatexFragment(input string, start int) (int, Node) {
switch {
case start+2 >= len(input):
case input[start+1] == '\\' && start != 0 && input[start-1] != '\n':
for i := start + 2; unicode.IsSpace(rune(input[i])); i++ {
if i >= len(input) || input[i] == '\n' {
return i + 1 - start, ExplicitLineBreak{}
}
}
case input[start+1] == '(' || input[start+1] == '[':
return d.parseLatexFragment(input, start)
case strings.Index(input[start:], `\begin{`) == 0:
if m := latexFragmentRegexp.FindStringSubmatch(input[start:]); m != nil {
if open, content, close := m[1], m[2], m[3]; open == close {
openingPair, closingPair := `\begin{`+open+`}`, `\end{`+close+`}`
i := strings.Index(input[start:], closingPair)
return i + len(closingPair), LatexFragment{openingPair, closingPair, d.parseRawInline(content)}
}
}
}
return 0, nil
}
func (d *Document) parseLatexFragment(input string, start int) (int, Node) {
if start+2 >= len(input) {
return 0, nil
}
openingPair := input[start : start+2]
closingPair := latexFragmentPairs[openingPair]
if i := strings.Index(input[start+2:], closingPair); i != -1 {
content := d.parseRawInline(input[start+2 : start+2+i])
return i + 2 + 2, LatexFragment{openingPair, closingPair, content}
}
return 0, nil
}
func (d *Document) parseSubOrSuperScript(input string, start int) (int, Node) {
if m := subScriptSuperScriptRegexp.FindStringSubmatch(input[start:]); m != nil {
return len(m[2]) + 3, Emphasis{m[1] + "{}", []Node{Text{m[2], false}}}
}
return 0, nil
}
func (d *Document) parseSubScriptOrEmphasis(input string, start int) (int, Node) {
if consumed, node := d.parseSubOrSuperScript(input, start); consumed != 0 {
return consumed, node
}
return d.parseEmphasis(input, start, false)
}
func (d *Document) parseOpeningBracket(input string, start int) (int, Node) {
if len(input[start:]) >= 2 && input[start] == '[' && input[start+1] == '[' {
return d.parseRegularLink(input, start)
} else if footnoteRegexp.MatchString(input[start:]) {
return d.parseFootnoteReference(input, start)
} else if statisticsTokenRegexp.MatchString(input[start:]) {
return d.parseStatisticToken(input, start)
}
return 0, nil
}
func (d *Document) parseFootnoteReference(input string, start int) (int, Node) {
if m := footnoteRegexp.FindStringSubmatch(input[start:]); m != nil {
name, definition := m[1], m[3]
if name == "" && definition == "" {
return 0, nil
}
link := FootnoteLink{name, nil}
if definition != "" {
link.Definition = &FootnoteDefinition{name, []Node{Paragraph{d.parseInline(definition)}}, true}
}
return len(m[0]), link
}
return 0, nil
}
func (d *Document) parseStatisticToken(input string, start int) (int, Node) {
if m := statisticsTokenRegexp.FindStringSubmatch(input[start:]); m != nil {
return len(m[1]) + 2, StatisticToken{m[1]}
}
return 0, nil
}
func (d *Document) parseAutoLink(input string, start int) (int, int, Node) {
if !d.AutoLink || start == 0 || len(input[start:]) < 3 || input[start:start+3] != "://" {
return 0, 0, nil
}
protocolStart, protocol := start-1, ""
for ; protocolStart > 0; protocolStart-- {
if !unicode.IsLetter(rune(input[protocolStart])) {
protocolStart++
break
}
}
if m := autolinkProtocols.FindStringSubmatch(input[protocolStart:start]); m != nil {
protocol = m[1]
} else {
return 0, 0, nil
}
end := start
for ; end < len(input) && strings.ContainsRune(validURLCharacters, rune(input[end])); end++ {
}
path := input[start:end]
if path == "://" {
return 0, 0, nil
}
return len(protocol), len(path + protocol), RegularLink{protocol, nil, protocol + path, true}
}
func (d *Document) parseRegularLink(input string, start int) (int, Node) {
input = input[start:]
if len(input) < 3 || input[:2] != "[[" || input[2] == '[' {
return 0, nil
}
end := strings.Index(input, "]]")
if end == -1 {
return 0, nil
}
rawLinkParts := strings.Split(input[2:end], "][")
description, link := ([]Node)(nil), rawLinkParts[0]
if len(rawLinkParts) == 2 {
link, description = rawLinkParts[0], d.parseInline(rawLinkParts[1])
}
if strings.ContainsRune(link, '\n') {
return 0, nil
}
consumed := end + 2
protocol, linkParts := "", strings.SplitN(link, ":", 2)
if len(linkParts) == 2 {
protocol = linkParts[0]
}
return consumed, RegularLink{protocol, description, link, false}
}
func (d *Document) parseTimestamp(input string, start int) (int, Node) {
if m := timestampRegexp.FindStringSubmatch(input[start:]); m != nil {
ddmmyy, hhmm, interval, isDate := m[1], m[3], strings.TrimSpace(m[4]), false
if hhmm == "" {
hhmm, isDate = "00:00", true
}
t, err := time.Parse(timestampFormat, fmt.Sprintf("%s Mon %s", ddmmyy, hhmm))
if err != nil {
return 0, nil
}
timestamp := Timestamp{t, isDate, interval}
return len(m[0]), timestamp
}
return 0, nil
}
func (d *Document) parseEmphasis(input string, start int, isRaw bool) (int, Node) {
marker, i := input[start], start
if !hasValidPreAndBorderChars(input, i) {
return 0, nil
}
for i, consumedNewLines := i+1, 0; i < len(input) && consumedNewLines <= d.MaxEmphasisNewLines; i++ {
if input[i] == '\n' {
consumedNewLines++
}
if input[i] == marker && i != start+1 && hasValidPostAndBorderChars(input, i) {
if isRaw {
return i + 1 - start, Emphasis{input[start : start+1], d.parseRawInline(input[start+1 : i])}
}
return i + 1 - start, Emphasis{input[start : start+1], d.parseInline(input[start+1 : i])}
}
}
return 0, nil
}
// see org-emphasis-regexp-components (emacs elisp variable)
func hasValidPreAndBorderChars(input string, i int) bool {
return (i+1 >= len(input) || isValidBorderChar(rune(input[i+1]))) && (i == 0 || isValidPreChar(rune(input[i-1])))
}
func hasValidPostAndBorderChars(input string, i int) bool {
return (i == 0 || isValidBorderChar(rune(input[i-1]))) && (i+1 >= len(input) || isValidPostChar(rune(input[i+1])))
}
func isValidPreChar(r rune) bool {
return unicode.IsSpace(r) || strings.ContainsRune(`-({'"`, r)
}
func isValidPostChar(r rune) bool {
return unicode.IsSpace(r) || strings.ContainsRune(`-.,:!?;'")}[`, r)
}
func isValidBorderChar(r rune) bool { return !unicode.IsSpace(r) }
func (l RegularLink) Kind() string {
if p := l.Protocol; l.Description != nil || (p != "" && p != "file" && p != "http" && p != "https") {
return "regular"
}
if imageExtensionRegexp.MatchString(path.Ext(l.URL)) {
return "image"
}
if videoExtensionRegexp.MatchString(path.Ext(l.URL)) {
return "video"
}
return "regular"
}
func (n Text) String() string { return orgWriter.nodesAsString(n) }
func (n LineBreak) String() string { return orgWriter.nodesAsString(n) }
func (n ExplicitLineBreak) String() string { return orgWriter.nodesAsString(n) }
func (n StatisticToken) String() string { return orgWriter.nodesAsString(n) }
func (n Emphasis) String() string { return orgWriter.nodesAsString(n) }
func (n LatexFragment) String() string { return orgWriter.nodesAsString(n) }
func (n FootnoteLink) String() string { return orgWriter.nodesAsString(n) }
func (n RegularLink) String() string { return orgWriter.nodesAsString(n) }
func (n Timestamp) String() string { return orgWriter.nodesAsString(n) }

184
vendor/github.com/niklasfasching/go-org/org/keyword.go generated vendored Normal file
View file

@ -0,0 +1,184 @@
package org
import (
"bytes"
"path/filepath"
"regexp"
"strings"
)
type Comment struct{ Content string }
type Keyword struct {
Key string
Value string
}
type NodeWithName struct {
Name string
Node Node
}
type NodeWithMeta struct {
Node Node
Meta Metadata
}
type Metadata struct {
Caption [][]Node
HTMLAttributes [][]string
}
type Include struct {
Keyword
Resolve func() Node
}
var keywordRegexp = regexp.MustCompile(`^(\s*)#\+([^:]+):(\s+(.*)|$)`)
var commentRegexp = regexp.MustCompile(`^(\s*)#(.*)`)
var includeFileRegexp = regexp.MustCompile(`(?i)^"([^"]+)" (src|example|export) (\w+)$`)
var attributeRegexp = regexp.MustCompile(`(?:^|\s+)(:[-\w]+)\s+(.*)$`)
func lexKeywordOrComment(line string) (token, bool) {
if m := keywordRegexp.FindStringSubmatch(line); m != nil {
return token{"keyword", len(m[1]), m[2], m}, true
} else if m := commentRegexp.FindStringSubmatch(line); m != nil {
return token{"comment", len(m[1]), m[2], m}, true
}
return nilToken, false
}
func (d *Document) parseComment(i int, stop stopFn) (int, Node) {
return 1, Comment{d.tokens[i].content}
}
func (d *Document) parseKeyword(i int, stop stopFn) (int, Node) {
k := parseKeyword(d.tokens[i])
switch k.Key {
case "NAME":
return d.parseNodeWithName(k, i, stop)
case "SETUPFILE":
return d.loadSetupFile(k)
case "INCLUDE":
return d.parseInclude(k)
case "CAPTION", "ATTR_HTML":
consumed, node := d.parseAffiliated(i, stop)
if consumed != 0 {
return consumed, node
}
fallthrough
default:
if _, ok := d.BufferSettings[k.Key]; ok {
d.BufferSettings[k.Key] = strings.Join([]string{d.BufferSettings[k.Key], k.Value}, "\n")
} else {
d.BufferSettings[k.Key] = k.Value
}
return 1, k
}
}
func (d *Document) parseNodeWithName(k Keyword, i int, stop stopFn) (int, Node) {
if stop(d, i+1) {
return 0, nil
}
consumed, node := d.parseOne(i+1, stop)
if consumed == 0 || node == nil {
return 0, nil
}
d.NamedNodes[k.Value] = node
return consumed + 1, NodeWithName{k.Value, node}
}
func (d *Document) parseAffiliated(i int, stop stopFn) (int, Node) {
start, meta := i, Metadata{}
for ; !stop(d, i) && d.tokens[i].kind == "keyword"; i++ {
switch k := parseKeyword(d.tokens[i]); k.Key {
case "CAPTION":
meta.Caption = append(meta.Caption, d.parseInline(k.Value))
case "ATTR_HTML":
attributes, rest := []string{}, k.Value
for {
if k, m := "", attributeRegexp.FindStringSubmatch(rest); m != nil {
k, rest = m[1], m[2]
attributes = append(attributes, k)
if v, m := "", attributeRegexp.FindStringSubmatchIndex(rest); m != nil {
v, rest = rest[:m[0]], rest[m[0]:]
attributes = append(attributes, v)
} else {
attributes = append(attributes, strings.TrimSpace(rest))
break
}
} else {
break
}
}
meta.HTMLAttributes = append(meta.HTMLAttributes, attributes)
default:
return 0, nil
}
}
if stop(d, i) {
return 0, nil
}
consumed, node := d.parseOne(i, stop)
if consumed == 0 || node == nil {
return 0, nil
}
i += consumed
return i - start, NodeWithMeta{node, meta}
}
func parseKeyword(t token) Keyword {
k, v := t.matches[2], t.matches[4]
return Keyword{strings.ToUpper(k), strings.TrimSpace(v)}
}
func (d *Document) parseInclude(k Keyword) (int, Node) {
resolve := func() Node {
d.Log.Printf("Bad include %#v", k)
return k
}
if m := includeFileRegexp.FindStringSubmatch(k.Value); m != nil {
path, kind, lang := m[1], m[2], m[3]
if !filepath.IsAbs(path) {
path = filepath.Join(filepath.Dir(d.Path), path)
}
resolve = func() Node {
bs, err := d.ReadFile(path)
if err != nil {
d.Log.Printf("Bad include %#v: %s", k, err)
return k
}
return Block{strings.ToUpper(kind), []string{lang}, d.parseRawInline(string(bs))}
}
}
return 1, Include{k, resolve}
}
func (d *Document) loadSetupFile(k Keyword) (int, Node) {
path := k.Value
if !filepath.IsAbs(path) {
path = filepath.Join(filepath.Dir(d.Path), path)
}
bs, err := d.ReadFile(path)
if err != nil {
d.Log.Printf("Bad setup file: %#v: %s", k, err)
return 1, k
}
setupDocument := d.Configuration.Parse(bytes.NewReader(bs), path)
if err := setupDocument.Error; err != nil {
d.Log.Printf("Bad setup file: %#v: %s", k, err)
return 1, k
}
for k, v := range setupDocument.BufferSettings {
d.BufferSettings[k] = v
}
return 1, k
}
func (n Comment) String() string { return orgWriter.nodesAsString(n) }
func (n Keyword) String() string { return orgWriter.nodesAsString(n) }
func (n NodeWithMeta) String() string { return orgWriter.nodesAsString(n) }
func (n NodeWithName) String() string { return orgWriter.nodesAsString(n) }
func (n Include) String() string { return orgWriter.nodesAsString(n) }

114
vendor/github.com/niklasfasching/go-org/org/list.go generated vendored Normal file
View file

@ -0,0 +1,114 @@
package org
import (
"fmt"
"regexp"
"strings"
"unicode"
)
type List struct {
Kind string
Items []Node
}
type ListItem struct {
Bullet string
Status string
Children []Node
}
type DescriptiveListItem struct {
Bullet string
Status string
Term []Node
Details []Node
}
var unorderedListRegexp = regexp.MustCompile(`^(\s*)([+*-])(\s+(.*)|$)`)
var orderedListRegexp = regexp.MustCompile(`^(\s*)(([0-9]+|[a-zA-Z])[.)])(\s+(.*)|$)`)
var descriptiveListItemRegexp = regexp.MustCompile(`\s::(\s|$)`)
var listItemStatusRegexp = regexp.MustCompile(`\[( |X|-)\]\s`)
func lexList(line string) (token, bool) {
if m := unorderedListRegexp.FindStringSubmatch(line); m != nil {
return token{"unorderedList", len(m[1]), m[4], m}, true
} else if m := orderedListRegexp.FindStringSubmatch(line); m != nil {
return token{"orderedList", len(m[1]), m[5], m}, true
}
return nilToken, false
}
func isListToken(t token) bool {
return t.kind == "unorderedList" || t.kind == "orderedList"
}
func listKind(t token) (string, string) {
kind := ""
switch bullet := t.matches[2]; {
case bullet == "*" || bullet == "+" || bullet == "-":
kind = "unordered"
case unicode.IsLetter(rune(bullet[0])), unicode.IsDigit(rune(bullet[0])):
kind = "ordered"
default:
panic(fmt.Sprintf("bad list bullet '%s': %#v", bullet, t))
}
if descriptiveListItemRegexp.MatchString(t.content) {
return kind, "descriptive"
}
return kind, kind
}
func (d *Document) parseList(i int, parentStop stopFn) (int, Node) {
start, lvl := i, d.tokens[i].lvl
listMainKind, kind := listKind(d.tokens[i])
list := List{Kind: kind}
stop := func(*Document, int) bool {
if parentStop(d, i) || d.tokens[i].lvl != lvl || !isListToken(d.tokens[i]) {
return true
}
itemMainKind, _ := listKind(d.tokens[i])
return itemMainKind != listMainKind
}
for !stop(d, i) {
consumed, node := d.parseListItem(list, i, parentStop)
i += consumed
list.Items = append(list.Items, node)
}
return i - start, list
}
func (d *Document) parseListItem(l List, i int, parentStop stopFn) (int, Node) {
start, nodes, bullet := i, []Node{}, d.tokens[i].matches[2]
minIndent, dterm, content, status := d.tokens[i].lvl+len(bullet), "", d.tokens[i].content, ""
if m := listItemStatusRegexp.FindStringSubmatch(content); m != nil {
status, content = m[1], content[len("[ ] "):]
}
if l.Kind == "descriptive" {
if m := descriptiveListItemRegexp.FindStringIndex(content); m != nil {
dterm, content = content[:m[0]], content[m[1]:]
}
}
d.tokens[i] = tokenize(strings.Repeat(" ", minIndent) + content)
stop := func(d *Document, i int) bool {
if parentStop(d, i) {
return true
}
t := d.tokens[i]
return t.lvl < minIndent && !(t.kind == "text" && t.content == "")
}
for !stop(d, i) && (i <= start+1 || !isSecondBlankLine(d, i)) {
consumed, node := d.parseOne(i, stop)
i += consumed
nodes = append(nodes, node)
}
if l.Kind == "descriptive" {
return i - start, DescriptiveListItem{bullet, status, d.parseInline(dterm), nodes}
}
return i - start, ListItem{bullet, status, nodes}
}
func (n List) String() string { return orgWriter.nodesAsString(n) }
func (n ListItem) String() string { return orgWriter.nodesAsString(n) }
func (n DescriptiveListItem) String() string { return orgWriter.nodesAsString(n) }

View file

@ -0,0 +1,334 @@
package org
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// OrgWriter export an org document into pretty printed org document.
type OrgWriter struct {
ExtendingWriter Writer
TagsColumn int
strings.Builder
indent string
}
var emphasisOrgBorders = map[string][]string{
"_": []string{"_", "_"},
"*": []string{"*", "*"},
"/": []string{"/", "/"},
"+": []string{"+", "+"},
"~": []string{"~", "~"},
"=": []string{"=", "="},
"_{}": []string{"_{", "}"},
"^{}": []string{"^{", "}"},
}
func NewOrgWriter() *OrgWriter {
return &OrgWriter{
TagsColumn: 77,
}
}
func (w *OrgWriter) WriterWithExtensions() Writer {
if w.ExtendingWriter != nil {
return w.ExtendingWriter
}
return w
}
func (w *OrgWriter) Before(d *Document) {}
func (w *OrgWriter) After(d *Document) {}
func (w *OrgWriter) emptyClone() *OrgWriter {
wcopy := *w
wcopy.Builder = strings.Builder{}
return &wcopy
}
func (w *OrgWriter) nodesAsString(nodes ...Node) string {
tmp := w.emptyClone()
WriteNodes(tmp, nodes...)
return tmp.String()
}
func (w *OrgWriter) WriteHeadline(h Headline) {
tmp := w.emptyClone()
tmp.WriteString(strings.Repeat("*", h.Lvl))
if h.Status != "" {
tmp.WriteString(" " + h.Status)
}
if h.Priority != "" {
tmp.WriteString(" [#" + h.Priority + "]")
}
tmp.WriteString(" ")
WriteNodes(tmp, h.Title...)
hString := tmp.String()
if len(h.Tags) != 0 {
tString := ":" + strings.Join(h.Tags, ":") + ":"
if n := w.TagsColumn - len(tString) - len(hString); n > 0 {
w.WriteString(hString + strings.Repeat(" ", n) + tString)
} else {
w.WriteString(hString + " " + tString)
}
} else {
w.WriteString(hString)
}
w.WriteString("\n")
if len(h.Children) != 0 {
w.WriteString(w.indent)
}
if h.Properties != nil {
WriteNodes(w, *h.Properties)
}
WriteNodes(w, h.Children...)
}
func (w *OrgWriter) WriteBlock(b Block) {
w.WriteString(w.indent + "#+BEGIN_" + b.Name)
if len(b.Parameters) != 0 {
w.WriteString(" " + strings.Join(b.Parameters, " "))
}
w.WriteString("\n")
if isRawTextBlock(b.Name) {
w.WriteString(w.indent)
}
WriteNodes(w, b.Children...)
if !isRawTextBlock(b.Name) {
w.WriteString(w.indent)
}
w.WriteString("#+END_" + b.Name + "\n")
}
func (w *OrgWriter) WriteDrawer(d Drawer) {
w.WriteString(w.indent + ":" + d.Name + ":\n")
WriteNodes(w, d.Children...)
w.WriteString(w.indent + ":END:\n")
}
func (w *OrgWriter) WritePropertyDrawer(d PropertyDrawer) {
w.WriteString(":PROPERTIES:\n")
for _, kvPair := range d.Properties {
k, v := kvPair[0], kvPair[1]
if v != "" {
v = " " + v
}
w.WriteString(fmt.Sprintf(":%s:%s\n", k, v))
}
w.WriteString(":END:\n")
}
func (w *OrgWriter) WriteFootnoteDefinition(f FootnoteDefinition) {
w.WriteString(fmt.Sprintf("[fn:%s]", f.Name))
content := w.nodesAsString(f.Children...)
if content != "" && !unicode.IsSpace(rune(content[0])) {
w.WriteString(" ")
}
w.WriteString(content)
}
func (w *OrgWriter) WriteParagraph(p Paragraph) {
content := w.nodesAsString(p.Children...)
if len(content) > 0 && content[0] != '\n' {
w.WriteString(w.indent)
}
w.WriteString(content + "\n")
}
func (w *OrgWriter) WriteExample(e Example) {
for _, n := range e.Children {
w.WriteString(w.indent + ":")
if content := w.nodesAsString(n); content != "" {
w.WriteString(" " + content)
}
w.WriteString("\n")
}
}
func (w *OrgWriter) WriteKeyword(k Keyword) {
w.WriteString(w.indent + "#+" + k.Key + ":")
if k.Value != "" {
w.WriteString(" " + k.Value)
}
w.WriteString("\n")
}
func (w *OrgWriter) WriteInclude(i Include) {
w.WriteKeyword(i.Keyword)
}
func (w *OrgWriter) WriteNodeWithMeta(n NodeWithMeta) {
for _, ns := range n.Meta.Caption {
w.WriteString("#+CAPTION: ")
WriteNodes(w, ns...)
w.WriteString("\n")
}
for _, attributes := range n.Meta.HTMLAttributes {
w.WriteString("#+ATTR_HTML: ")
w.WriteString(strings.Join(attributes, " ") + "\n")
}
WriteNodes(w, n.Node)
}
func (w *OrgWriter) WriteNodeWithName(n NodeWithName) {
w.WriteString(fmt.Sprintf("#+NAME: %s\n", n.Name))
WriteNodes(w, n.Node)
}
func (w *OrgWriter) WriteComment(c Comment) {
w.WriteString(w.indent + "#" + c.Content + "\n")
}
func (w *OrgWriter) WriteList(l List) { WriteNodes(w, l.Items...) }
func (w *OrgWriter) WriteListItem(li ListItem) {
liWriter := w.emptyClone()
liWriter.indent = w.indent + strings.Repeat(" ", len(li.Bullet)+1)
WriteNodes(liWriter, li.Children...)
content := strings.TrimPrefix(liWriter.String(), liWriter.indent)
w.WriteString(w.indent + li.Bullet)
if li.Status != "" {
w.WriteString(fmt.Sprintf(" [%s]", li.Status))
}
if len(content) > 0 && content[0] == '\n' {
w.WriteString(content)
} else {
w.WriteString(" " + content)
}
}
func (w *OrgWriter) WriteDescriptiveListItem(di DescriptiveListItem) {
w.WriteString(w.indent + di.Bullet)
if di.Status != "" {
w.WriteString(fmt.Sprintf(" [%s]", di.Status))
}
indent := w.indent + strings.Repeat(" ", len(di.Bullet)+1)
if len(di.Term) != 0 {
term := w.nodesAsString(di.Term...)
w.WriteString(" " + term + " ::")
indent = indent + strings.Repeat(" ", len(term)+4)
}
diWriter := w.emptyClone()
diWriter.indent = indent
WriteNodes(diWriter, di.Details...)
details := strings.TrimPrefix(diWriter.String(), diWriter.indent)
if len(details) > 0 && details[0] == '\n' {
w.WriteString(details)
} else {
w.WriteString(" " + details)
}
}
func (w *OrgWriter) WriteTable(t Table) {
for _, row := range t.Rows {
w.WriteString(w.indent)
if len(row.Columns) == 0 {
w.WriteString(`|`)
for i := 0; i < len(t.ColumnInfos); i++ {
w.WriteString(strings.Repeat("-", t.ColumnInfos[i].Len+2))
if i < len(t.ColumnInfos)-1 {
w.WriteString("+")
}
}
w.WriteString(`|`)
} else {
w.WriteString(`|`)
for _, column := range row.Columns {
w.WriteString(` `)
content := w.nodesAsString(column.Children...)
if content == "" {
content = " "
}
n := column.Len - utf8.RuneCountInString(content)
if n < 0 {
n = 0
}
if column.Align == "center" {
if n%2 != 0 {
w.WriteString(" ")
}
w.WriteString(strings.Repeat(" ", n/2) + content + strings.Repeat(" ", n/2))
} else if column.Align == "right" {
w.WriteString(strings.Repeat(" ", n) + content)
} else {
w.WriteString(content + strings.Repeat(" ", n))
}
w.WriteString(` |`)
}
}
w.WriteString("\n")
}
}
func (w *OrgWriter) WriteHorizontalRule(hr HorizontalRule) {
w.WriteString(w.indent + "-----\n")
}
func (w *OrgWriter) WriteText(t Text) { w.WriteString(t.Content) }
func (w *OrgWriter) WriteEmphasis(e Emphasis) {
borders, ok := emphasisOrgBorders[e.Kind]
if !ok {
panic(fmt.Sprintf("bad emphasis %#v", e))
}
w.WriteString(borders[0])
WriteNodes(w, e.Content...)
w.WriteString(borders[1])
}
func (w *OrgWriter) WriteLatexFragment(l LatexFragment) {
w.WriteString(l.OpeningPair)
WriteNodes(w, l.Content...)
w.WriteString(l.ClosingPair)
}
func (w *OrgWriter) WriteStatisticToken(s StatisticToken) {
w.WriteString(fmt.Sprintf("[%s]", s.Content))
}
func (w *OrgWriter) WriteLineBreak(l LineBreak) {
w.WriteString(strings.Repeat("\n"+w.indent, l.Count))
}
func (w *OrgWriter) WriteExplicitLineBreak(l ExplicitLineBreak) {
w.WriteString(`\\` + "\n" + w.indent)
}
func (w *OrgWriter) WriteTimestamp(t Timestamp) {
w.WriteString("<")
if t.IsDate {
w.WriteString(t.Time.Format(datestampFormat))
} else {
w.WriteString(t.Time.Format(timestampFormat))
}
if t.Interval != "" {
w.WriteString(" " + t.Interval)
}
w.WriteString(">")
}
func (w *OrgWriter) WriteFootnoteLink(l FootnoteLink) {
w.WriteString("[fn:" + l.Name)
if l.Definition != nil {
w.WriteString(":")
WriteNodes(w, l.Definition.Children[0].(Paragraph).Children...)
}
w.WriteString("]")
}
func (w *OrgWriter) WriteRegularLink(l RegularLink) {
if l.AutoLink {
w.WriteString(l.URL)
} else if l.Description == nil {
w.WriteString(fmt.Sprintf("[[%s]]", l.URL))
} else {
descriptionWriter := w.emptyClone()
WriteNodes(descriptionWriter, l.Description...)
description := descriptionWriter.String()
w.WriteString(fmt.Sprintf("[[%s][%s]]", l.URL, description))
}
}

View file

@ -0,0 +1,46 @@
package org
import (
"regexp"
"strings"
)
type Paragraph struct{ Children []Node }
type HorizontalRule struct{}
var horizontalRuleRegexp = regexp.MustCompile(`^(\s*)-{5,}\s*$`)
var plainTextRegexp = regexp.MustCompile(`^(\s*)(.*)`)
func lexText(line string) (token, bool) {
if m := plainTextRegexp.FindStringSubmatch(line); m != nil {
return token{"text", len(m[1]), m[2], m}, true
}
return nilToken, false
}
func lexHorizontalRule(line string) (token, bool) {
if m := horizontalRuleRegexp.FindStringSubmatch(line); m != nil {
return token{"horizontalRule", len(m[1]), "", m}, true
}
return nilToken, false
}
func (d *Document) parseParagraph(i int, parentStop stopFn) (int, Node) {
lines, start := []string{d.tokens[i].content}, i
i++
stop := func(d *Document, i int) bool {
return parentStop(d, i) || d.tokens[i].kind != "text" || d.tokens[i].content == ""
}
for ; !stop(d, i); i++ {
lines = append(lines, d.tokens[i].content)
}
consumed := i - start
return consumed, Paragraph{d.parseInline(strings.Join(lines, "\n"))}
}
func (d *Document) parseHorizontalRule(i int, parentStop stopFn) (int, Node) {
return 1, HorizontalRule{}
}
func (n Paragraph) String() string { return orgWriter.nodesAsString(n) }
func (n HorizontalRule) String() string { return orgWriter.nodesAsString(n) }

130
vendor/github.com/niklasfasching/go-org/org/table.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
package org
import (
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
type Table struct {
Rows []Row
ColumnInfos []ColumnInfo
}
type Row struct {
Columns []Column
IsSpecial bool
}
type Column struct {
Children []Node
*ColumnInfo
}
type ColumnInfo struct {
Align string
Len int
}
var tableSeparatorRegexp = regexp.MustCompile(`^(\s*)(\|[+-|]*)\s*$`)
var tableRowRegexp = regexp.MustCompile(`^(\s*)(\|.*)`)
var columnAlignRegexp = regexp.MustCompile(`^<(l|c|r)>$`)
func lexTable(line string) (token, bool) {
if m := tableSeparatorRegexp.FindStringSubmatch(line); m != nil {
return token{"tableSeparator", len(m[1]), m[2], m}, true
} else if m := tableRowRegexp.FindStringSubmatch(line); m != nil {
return token{"tableRow", len(m[1]), m[2], m}, true
}
return nilToken, false
}
func (d *Document) parseTable(i int, parentStop stopFn) (int, Node) {
rawRows, start := [][]string{}, i
for ; !parentStop(d, i); i++ {
if t := d.tokens[i]; t.kind == "tableRow" {
rawRow := strings.FieldsFunc(d.tokens[i].content, func(r rune) bool { return r == '|' })
for i := range rawRow {
rawRow[i] = strings.TrimSpace(rawRow[i])
}
rawRows = append(rawRows, rawRow)
} else if t.kind == "tableSeparator" {
rawRows = append(rawRows, nil)
} else {
break
}
}
table := Table{nil, getColumnInfos(rawRows)}
for _, rawColumns := range rawRows {
row := Row{nil, isSpecialRow(rawColumns)}
if len(rawColumns) != 0 {
for i := range table.ColumnInfos {
column := Column{nil, &table.ColumnInfos[i]}
if i < len(rawColumns) {
column.Children = d.parseInline(rawColumns[i])
}
row.Columns = append(row.Columns, column)
}
}
table.Rows = append(table.Rows, row)
}
return i - start, table
}
func getColumnInfos(rows [][]string) []ColumnInfo {
columnCount := 0
for _, columns := range rows {
if n := len(columns); n > columnCount {
columnCount = n
}
}
columnInfos := make([]ColumnInfo, columnCount)
for i := 0; i < columnCount; i++ {
countNumeric, countNonNumeric := 0, 0
for _, columns := range rows {
if i >= len(columns) {
continue
}
if n := utf8.RuneCountInString(columns[i]); n > columnInfos[i].Len {
columnInfos[i].Len = n
}
if m := columnAlignRegexp.FindStringSubmatch(columns[i]); m != nil && isSpecialRow(columns) {
switch m[1] {
case "l":
columnInfos[i].Align = "left"
case "c":
columnInfos[i].Align = "center"
case "r":
columnInfos[i].Align = "right"
}
} else if _, err := strconv.ParseFloat(columns[i], 32); err == nil {
countNumeric++
} else if strings.TrimSpace(columns[i]) != "" {
countNonNumeric++
}
}
if columnInfos[i].Align == "" && countNumeric >= countNonNumeric {
columnInfos[i].Align = "right"
}
}
return columnInfos
}
func isSpecialRow(rawColumns []string) bool {
isAlignRow := true
for _, rawColumn := range rawColumns {
if !columnAlignRegexp.MatchString(rawColumn) && rawColumn != "" {
isAlignRow = false
}
}
return isAlignRow
}
func (n Table) String() string { return orgWriter.nodesAsString(n) }

19
vendor/github.com/niklasfasching/go-org/org/util.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package org
func isSecondBlankLine(d *Document, i int) bool {
if i-1 <= 0 {
return false
}
t1, t2 := d.tokens[i-1], d.tokens[i]
if t1.kind == "text" && t2.kind == "text" && t1.content == "" && t2.content == "" {
return true
}
return false
}
func isImageOrVideoLink(n Node) bool {
if l, ok := n.(RegularLink); ok && l.Kind() == "video" || l.Kind() == "image" {
return true
}
return false
}

103
vendor/github.com/niklasfasching/go-org/org/writer.go generated vendored Normal file
View file

@ -0,0 +1,103 @@
package org
import "fmt"
// Writer is the interface that is used to export a parsed document into a new format. See Document.Write().
type Writer interface {
Before(*Document) // Before is called before any nodes are passed to the writer.
After(*Document) // After is called after all nodes have been passed to the writer.
String() string // String is called at the very end to retrieve the final output.
WriterWithExtensions() Writer
WriteKeyword(Keyword)
WriteInclude(Include)
WriteComment(Comment)
WriteNodeWithMeta(NodeWithMeta)
WriteNodeWithName(NodeWithName)
WriteHeadline(Headline)
WriteBlock(Block)
WriteExample(Example)
WriteDrawer(Drawer)
WritePropertyDrawer(PropertyDrawer)
WriteList(List)
WriteListItem(ListItem)
WriteDescriptiveListItem(DescriptiveListItem)
WriteTable(Table)
WriteHorizontalRule(HorizontalRule)
WriteParagraph(Paragraph)
WriteText(Text)
WriteEmphasis(Emphasis)
WriteLatexFragment(LatexFragment)
WriteStatisticToken(StatisticToken)
WriteExplicitLineBreak(ExplicitLineBreak)
WriteLineBreak(LineBreak)
WriteRegularLink(RegularLink)
WriteTimestamp(Timestamp)
WriteFootnoteLink(FootnoteLink)
WriteFootnoteDefinition(FootnoteDefinition)
}
func WriteNodes(w Writer, nodes ...Node) {
w = w.WriterWithExtensions()
for _, n := range nodes {
switch n := n.(type) {
case Keyword:
w.WriteKeyword(n)
case Include:
w.WriteInclude(n)
case Comment:
w.WriteComment(n)
case NodeWithMeta:
w.WriteNodeWithMeta(n)
case NodeWithName:
w.WriteNodeWithName(n)
case Headline:
w.WriteHeadline(n)
case Block:
w.WriteBlock(n)
case Example:
w.WriteExample(n)
case Drawer:
w.WriteDrawer(n)
case PropertyDrawer:
w.WritePropertyDrawer(n)
case List:
w.WriteList(n)
case ListItem:
w.WriteListItem(n)
case DescriptiveListItem:
w.WriteDescriptiveListItem(n)
case Table:
w.WriteTable(n)
case HorizontalRule:
w.WriteHorizontalRule(n)
case Paragraph:
w.WriteParagraph(n)
case Text:
w.WriteText(n)
case Emphasis:
w.WriteEmphasis(n)
case LatexFragment:
w.WriteLatexFragment(n)
case StatisticToken:
w.WriteStatisticToken(n)
case ExplicitLineBreak:
w.WriteExplicitLineBreak(n)
case LineBreak:
w.WriteLineBreak(n)
case RegularLink:
w.WriteRegularLink(n)
case Timestamp:
w.WriteTimestamp(n)
case FootnoteLink:
w.WriteFootnoteLink(n)
case FootnoteDefinition:
w.WriteFootnoteDefinition(n)
default:
if n != nil {
panic(fmt.Sprintf("bad node %T %#v", n, n))
}
}
}
}