forked from forgejo/forgejo
Change markdown rendering from blackfriday to goldmark (#9533)
* Move to goldmark Markdown rendering moved from blackfriday to the goldmark. Multiple subtle changes required to the goldmark extensions to keep current rendering and defaults. Can go further with goldmark linkify and have this work within markdown rendering making the link processor unnecessary. Need to think about how to go about allowing extensions - at present it seems that these would be hard to do without recompilation. * linter fixes Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
parent
0c07f1de5b
commit
27757714d0
83 changed files with 13838 additions and 6297 deletions
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,319 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var attrNameID = []byte("id")
|
||||
var attrNameClass = []byte("class")
|
||||
|
||||
// An Attribute is an attribute of the markdown elements
|
||||
type Attribute struct {
|
||||
Name []byte
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// An Attributes is a collection of attributes.
|
||||
type Attributes []Attribute
|
||||
|
||||
// Find returns a (value, true) if an attribute correspond with given name is found, otherwise (nil, false).
|
||||
func (as Attributes) Find(name []byte) (interface{}, bool) {
|
||||
for _, a := range as {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
return a.Value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (as Attributes) findUpdate(name []byte, cb func(v interface{}) interface{}) bool {
|
||||
for i, a := range as {
|
||||
if bytes.Equal(a.Name, name) {
|
||||
as[i].Value = cb(a.Value)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseAttributes parses attributes into a map.
|
||||
// ParseAttributes returns a parsed attributes and true if could parse
|
||||
// attributes, otherwise nil and false.
|
||||
func ParseAttributes(reader text.Reader) (Attributes, bool) {
|
||||
savedLine, savedPosition := reader.Position()
|
||||
reader.SkipSpaces()
|
||||
if reader.Peek() != '{' {
|
||||
reader.SetPosition(savedLine, savedPosition)
|
||||
return nil, false
|
||||
}
|
||||
reader.Advance(1)
|
||||
attrs := Attributes{}
|
||||
for {
|
||||
if reader.Peek() == '}' {
|
||||
reader.Advance(1)
|
||||
return attrs, true
|
||||
}
|
||||
attr, ok := parseAttribute(reader)
|
||||
if !ok {
|
||||
reader.SetPosition(savedLine, savedPosition)
|
||||
return nil, false
|
||||
}
|
||||
if bytes.Equal(attr.Name, attrNameClass) {
|
||||
if !attrs.findUpdate(attrNameClass, func(v interface{}) interface{} {
|
||||
ret := make([]byte, 0, len(v.([]byte))+1+len(attr.Value.([]byte)))
|
||||
ret = append(ret, v.([]byte)...)
|
||||
return append(append(ret, ' '), attr.Value.([]byte)...)
|
||||
}) {
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
} else {
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
reader.SkipSpaces()
|
||||
if reader.Peek() == ',' {
|
||||
reader.Advance(1)
|
||||
reader.SkipSpaces()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttribute(reader text.Reader) (Attribute, bool) {
|
||||
reader.SkipSpaces()
|
||||
c := reader.Peek()
|
||||
if c == '#' || c == '.' {
|
||||
reader.Advance(1)
|
||||
line, _ := reader.PeekLine()
|
||||
i := 0
|
||||
for ; i < len(line) && !util.IsSpace(line[i]) && (!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-'); i++ {
|
||||
}
|
||||
name := attrNameClass
|
||||
if c == '#' {
|
||||
name = attrNameID
|
||||
}
|
||||
reader.Advance(i)
|
||||
return Attribute{Name: name, Value: line[0:i]}, true
|
||||
}
|
||||
line, _ := reader.PeekLine()
|
||||
if len(line) == 0 {
|
||||
return Attribute{}, false
|
||||
}
|
||||
c = line[0]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
c == '_' || c == ':') {
|
||||
return Attribute{}, false
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(line); i++ {
|
||||
c = line[i]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||
break
|
||||
}
|
||||
}
|
||||
name := line[:i]
|
||||
reader.Advance(i)
|
||||
reader.SkipSpaces()
|
||||
c = reader.Peek()
|
||||
if c != '=' {
|
||||
return Attribute{}, false
|
||||
}
|
||||
reader.Advance(1)
|
||||
reader.SkipSpaces()
|
||||
value, ok := parseAttributeValue(reader)
|
||||
if !ok {
|
||||
return Attribute{}, false
|
||||
}
|
||||
return Attribute{Name: name, Value: value}, true
|
||||
}
|
||||
|
||||
func parseAttributeValue(reader text.Reader) (interface{}, bool) {
|
||||
reader.SkipSpaces()
|
||||
c := reader.Peek()
|
||||
var value interface{}
|
||||
ok := false
|
||||
switch c {
|
||||
case text.EOF:
|
||||
return Attribute{}, false
|
||||
case '{':
|
||||
value, ok = ParseAttributes(reader)
|
||||
case '[':
|
||||
value, ok = parseAttributeArray(reader)
|
||||
case '"':
|
||||
value, ok = parseAttributeString(reader)
|
||||
default:
|
||||
if c == '-' || c == '+' || util.IsNumeric(c) {
|
||||
value, ok = parseAttributeNumber(reader)
|
||||
} else {
|
||||
value, ok = parseAttributeOthers(reader)
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func parseAttributeArray(reader text.Reader) ([]interface{}, bool) {
|
||||
reader.Advance(1) // skip [
|
||||
ret := []interface{}{}
|
||||
for i := 0; ; i++ {
|
||||
c := reader.Peek()
|
||||
comma := false
|
||||
if i != 0 && c == ',' {
|
||||
reader.Advance(1)
|
||||
comma = true
|
||||
}
|
||||
if c == ']' {
|
||||
if !comma {
|
||||
reader.Advance(1)
|
||||
return ret, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
reader.SkipSpaces()
|
||||
value, ok := parseAttributeValue(reader)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
ret = append(ret, value)
|
||||
reader.SkipSpaces()
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttributeString(reader text.Reader) ([]byte, bool) {
|
||||
reader.Advance(1) // skip "
|
||||
line, _ := reader.PeekLine()
|
||||
i := 0
|
||||
l := len(line)
|
||||
var buf bytes.Buffer
|
||||
for i < l {
|
||||
c := line[i]
|
||||
if c == '\\' && i != l-1 {
|
||||
n := line[i+1]
|
||||
switch n {
|
||||
case '"', '/', '\\':
|
||||
buf.WriteByte(n)
|
||||
i += 2
|
||||
case 'b':
|
||||
buf.WriteString("\b")
|
||||
i += 2
|
||||
case 'f':
|
||||
buf.WriteString("\f")
|
||||
i += 2
|
||||
case 'n':
|
||||
buf.WriteString("\n")
|
||||
i += 2
|
||||
case 'r':
|
||||
buf.WriteString("\r")
|
||||
i += 2
|
||||
case 't':
|
||||
buf.WriteString("\t")
|
||||
i += 2
|
||||
default:
|
||||
buf.WriteByte('\\')
|
||||
i++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c == '"' {
|
||||
reader.Advance(i + 1)
|
||||
return buf.Bytes(), true
|
||||
}
|
||||
buf.WriteByte(c)
|
||||
i++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
|
||||
for {
|
||||
c := reader.Peek()
|
||||
if util.IsNumeric(c) {
|
||||
w.WriteByte(c)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
reader.Advance(1)
|
||||
}
|
||||
}
|
||||
|
||||
func parseAttributeNumber(reader text.Reader) (float64, bool) {
|
||||
sign := 1
|
||||
c := reader.Peek()
|
||||
if c == '-' {
|
||||
sign = -1
|
||||
reader.Advance(1)
|
||||
} else if c == '+' {
|
||||
reader.Advance(1)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if !util.IsNumeric(reader.Peek()) {
|
||||
return 0, false
|
||||
}
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
if buf.Len() == 0 {
|
||||
return 0, false
|
||||
}
|
||||
c = reader.Peek()
|
||||
if c == '.' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
}
|
||||
c = reader.Peek()
|
||||
if c == 'e' || c == 'E' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
c = reader.Peek()
|
||||
if c == '-' || c == '+' {
|
||||
buf.WriteByte(c)
|
||||
reader.Advance(1)
|
||||
}
|
||||
scanAttributeDecimal(reader, &buf)
|
||||
}
|
||||
f, err := strconv.ParseFloat(buf.String(), 10)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return float64(sign) * f, true
|
||||
}
|
||||
|
||||
var bytesTrue = []byte("true")
|
||||
var bytesFalse = []byte("false")
|
||||
var bytesNull = []byte("null")
|
||||
|
||||
func parseAttributeOthers(reader text.Reader) (interface{}, bool) {
|
||||
line, _ := reader.PeekLine()
|
||||
c := line[0]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
c == '_' || c == ':') {
|
||||
return nil, false
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||
break
|
||||
}
|
||||
}
|
||||
value := line[:i]
|
||||
reader.Advance(i)
|
||||
if bytes.Equal(value, bytesTrue) {
|
||||
return true, true
|
||||
}
|
||||
if bytes.Equal(value, bytesFalse) {
|
||||
return false, true
|
||||
}
|
||||
if bytes.Equal(value, bytesNull) {
|
||||
return nil, true
|
||||
}
|
||||
return value, true
|
||||
}
|
242
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
242
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A HeadingConfig struct is a data structure that holds configuration of the renderers related to headings.
|
||||
type HeadingConfig struct {
|
||||
AutoHeadingID bool
|
||||
Attribute bool
|
||||
}
|
||||
|
||||
// SetOption implements SetOptioner.
|
||||
func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
|
||||
switch name {
|
||||
case optAutoHeadingID:
|
||||
b.AutoHeadingID = true
|
||||
case optAttribute:
|
||||
b.Attribute = true
|
||||
}
|
||||
}
|
||||
|
||||
// A HeadingOption interface sets options for heading parsers.
|
||||
type HeadingOption interface {
|
||||
Option
|
||||
SetHeadingOption(*HeadingConfig)
|
||||
}
|
||||
|
||||
// AutoHeadingID is an option name that enables auto IDs for headings.
|
||||
const optAutoHeadingID OptionName = "AutoHeadingID"
|
||||
|
||||
type withAutoHeadingID struct {
|
||||
}
|
||||
|
||||
func (o *withAutoHeadingID) SetParserOption(c *Config) {
|
||||
c.Options[optAutoHeadingID] = true
|
||||
}
|
||||
|
||||
func (o *withAutoHeadingID) SetHeadingOption(p *HeadingConfig) {
|
||||
p.AutoHeadingID = true
|
||||
}
|
||||
|
||||
// WithAutoHeadingID is a functional option that enables custom heading ids and
|
||||
// auto generated heading ids.
|
||||
func WithAutoHeadingID() HeadingOption {
|
||||
return &withAutoHeadingID{}
|
||||
}
|
||||
|
||||
type withHeadingAttribute struct {
|
||||
Option
|
||||
}
|
||||
|
||||
func (o *withHeadingAttribute) SetHeadingOption(p *HeadingConfig) {
|
||||
p.Attribute = true
|
||||
}
|
||||
|
||||
// WithHeadingAttribute is a functional option that enables custom heading attributes.
|
||||
func WithHeadingAttribute() HeadingOption {
|
||||
return &withHeadingAttribute{WithAttribute()}
|
||||
}
|
||||
|
||||
type atxHeadingParser struct {
|
||||
HeadingConfig
|
||||
}
|
||||
|
||||
// NewATXHeadingParser return a new BlockParser that can parse ATX headings.
|
||||
func NewATXHeadingParser(opts ...HeadingOption) BlockParser {
|
||||
p := &atxHeadingParser{}
|
||||
for _, o := range opts {
|
||||
o.SetHeadingOption(&p.HeadingConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Trigger() []byte {
|
||||
return []byte{'#'}
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
if pos < 0 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == '#'; i++ {
|
||||
}
|
||||
level := i - pos
|
||||
if i == pos || level > 6 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
l := util.TrimLeftSpaceLength(line[i:])
|
||||
if l == 0 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
start := i + l
|
||||
if start >= len(line) {
|
||||
start = len(line) - 1
|
||||
}
|
||||
origstart := start
|
||||
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||
|
||||
node := ast.NewHeading(level)
|
||||
parsed := false
|
||||
if b.Attribute { // handles special case like ### heading ### {#id}
|
||||
start--
|
||||
closureClose := -1
|
||||
closureOpen := -1
|
||||
for j := start; j < stop; {
|
||||
c := line[j]
|
||||
if util.IsEscapedPunctuation(line, j) {
|
||||
j += 2
|
||||
} else if util.IsSpace(c) && j < stop-1 && line[j+1] == '#' {
|
||||
closureOpen = j + 1
|
||||
k := j + 1
|
||||
for ; k < stop && line[k] == '#'; k++ {
|
||||
}
|
||||
closureClose = k
|
||||
break
|
||||
} else {
|
||||
j++
|
||||
}
|
||||
}
|
||||
if closureClose > 0 {
|
||||
reader.Advance(closureClose)
|
||||
attrs, ok := ParseAttributes(reader)
|
||||
parsed = ok
|
||||
if parsed {
|
||||
for _, attr := range attrs {
|
||||
node.SetAttribute(attr.Name, attr.Value)
|
||||
}
|
||||
node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !parsed {
|
||||
start = origstart
|
||||
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||
if stop <= start { // empty headings like '##[space]'
|
||||
stop = start
|
||||
} else {
|
||||
i = stop - 1
|
||||
for ; line[i] == '#' && i >= start; i-- {
|
||||
}
|
||||
if i != stop-1 && !util.IsSpace(line[i]) {
|
||||
i = stop - 1
|
||||
}
|
||||
i++
|
||||
stop = i
|
||||
}
|
||||
|
||||
if len(util.TrimRight(line[start:stop], []byte{'#'})) != 0 { // empty heading like '### ###'
|
||||
node.Lines().Append(text.NewSegment(segment.Start+start-segment.Padding, segment.Start+stop-segment.Padding))
|
||||
}
|
||||
}
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
if b.Attribute {
|
||||
_, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
parseLastLineAttributes(node, reader, pc)
|
||||
}
|
||||
}
|
||||
|
||||
if b.AutoHeadingID {
|
||||
id, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
generateAutoHeadingID(node.(*ast.Heading), reader, pc)
|
||||
} else {
|
||||
pc.IDs().Put(id.([]byte))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *atxHeadingParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func generateAutoHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
|
||||
var line []byte
|
||||
lastIndex := node.Lines().Len() - 1
|
||||
if lastIndex > -1 {
|
||||
lastLine := node.Lines().At(lastIndex)
|
||||
line = lastLine.Value(reader.Source())
|
||||
}
|
||||
headingID := pc.IDs().Generate(line, ast.KindHeading)
|
||||
node.SetAttribute(attrNameID, headingID)
|
||||
}
|
||||
|
||||
func parseLastLineAttributes(node ast.Node, reader text.Reader, pc Context) {
|
||||
lastIndex := node.Lines().Len() - 1
|
||||
if lastIndex < 0 { // empty headings
|
||||
return
|
||||
}
|
||||
lastLine := node.Lines().At(lastIndex)
|
||||
line := lastLine.Value(reader.Source())
|
||||
lr := text.NewReader(line)
|
||||
var attrs Attributes
|
||||
var ok bool
|
||||
var start text.Segment
|
||||
var sl int
|
||||
var end text.Segment
|
||||
for {
|
||||
c := lr.Peek()
|
||||
if c == text.EOF {
|
||||
break
|
||||
}
|
||||
if c == '\\' {
|
||||
lr.Advance(1)
|
||||
if lr.Peek() == '{' {
|
||||
lr.Advance(1)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c == '{' {
|
||||
sl, start = lr.Position()
|
||||
attrs, ok = ParseAttributes(lr)
|
||||
_, end = lr.Position()
|
||||
lr.SetPosition(sl, start)
|
||||
}
|
||||
lr.Advance(1)
|
||||
}
|
||||
if ok && util.IsBlank(line[end.Stop:]) {
|
||||
for _, attr := range attrs {
|
||||
node.SetAttribute(attr.Name, attr.Value)
|
||||
}
|
||||
lastLine.Stop = lastLine.Start + start.Start
|
||||
node.Lines().Set(lastIndex, lastLine)
|
||||
}
|
||||
}
|
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type autoLinkParser struct {
|
||||
}
|
||||
|
||||
var defaultAutoLinkParser = &autoLinkParser{}
|
||||
|
||||
// NewAutoLinkParser returns a new InlineParser that parses autolinks
|
||||
// surrounded by '<' and '>' .
|
||||
func NewAutoLinkParser() InlineParser {
|
||||
return defaultAutoLinkParser
|
||||
}
|
||||
|
||||
func (s *autoLinkParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (s *autoLinkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
stop := util.FindEmailIndex(line[1:])
|
||||
typ := ast.AutoLinkType(ast.AutoLinkEmail)
|
||||
if stop < 0 {
|
||||
stop = util.FindURLIndex(line[1:])
|
||||
typ = ast.AutoLinkURL
|
||||
}
|
||||
if stop < 0 {
|
||||
return nil
|
||||
}
|
||||
stop++
|
||||
if stop >= len(line) || line[stop] != '>' {
|
||||
return nil
|
||||
}
|
||||
value := ast.NewTextSegment(text.NewSegment(segment.Start+1, segment.Start+stop))
|
||||
block.Advance(stop + 1)
|
||||
return ast.NewAutoLink(typ, value)
|
||||
}
|
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type blockquoteParser struct {
|
||||
}
|
||||
|
||||
var defaultBlockquoteParser = &blockquoteParser{}
|
||||
|
||||
// NewBlockquoteParser returns a new BlockParser that
|
||||
// parses blockquotes.
|
||||
func NewBlockquoteParser() BlockParser {
|
||||
return defaultBlockquoteParser
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) process(reader text.Reader) bool {
|
||||
line, _ := reader.PeekLine()
|
||||
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||
if w > 3 || pos >= len(line) || line[pos] != '>' {
|
||||
return false
|
||||
}
|
||||
pos++
|
||||
if pos >= len(line) || line[pos] == '\n' {
|
||||
reader.Advance(pos)
|
||||
return true
|
||||
}
|
||||
if line[pos] == ' ' || line[pos] == '\t' {
|
||||
pos++
|
||||
}
|
||||
reader.Advance(pos)
|
||||
if line[pos-1] == '\t' {
|
||||
reader.SetPadding(2)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Trigger() []byte {
|
||||
return []byte{'>'}
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
if b.process(reader) {
|
||||
return ast.NewBlockquote(), HasChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
if b.process(reader) {
|
||||
return Continue | HasChildren
|
||||
}
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *blockquoteParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type codeBlockParser struct {
|
||||
}
|
||||
|
||||
// CodeBlockParser is a BlockParser implementation that parses indented code blocks.
|
||||
var defaultCodeBlockParser = &codeBlockParser{}
|
||||
|
||||
// NewCodeBlockParser returns a new BlockParser that
|
||||
// parses code blocks.
|
||||
func NewCodeBlockParser() BlockParser {
|
||||
return defaultCodeBlockParser
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Trigger() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||
if pos < 0 || util.IsBlank(line) {
|
||||
return nil, NoChildren
|
||||
}
|
||||
node := ast.NewCodeBlock()
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
_, segment = reader.PeekLine()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return node, NoChildren
|
||||
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, segment := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
node.Lines().Append(segment.TrimLeftSpaceWidth(4, reader.Source()))
|
||||
return Continue | NoChildren
|
||||
}
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||
if pos < 0 {
|
||||
return Close
|
||||
}
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
_, segment = reader.PeekLine()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// trim trailing blank lines
|
||||
lines := node.Lines()
|
||||
length := lines.Len() - 1
|
||||
source := reader.Source()
|
||||
for length >= 0 {
|
||||
line := lines.At(length)
|
||||
if util.IsBlank(line.Value(source)) {
|
||||
length--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
lines.SetSliced(0, length+1)
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) CanInterruptParagraph() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *codeBlockParser) CanAcceptIndentedLine() bool {
|
||||
return true
|
||||
}
|
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type codeSpanParser struct {
|
||||
}
|
||||
|
||||
var defaultCodeSpanParser = &codeSpanParser{}
|
||||
|
||||
// NewCodeSpanParser return a new InlineParser that parses inline codes
|
||||
// surrounded by '`' .
|
||||
func NewCodeSpanParser() InlineParser {
|
||||
return defaultCodeSpanParser
|
||||
}
|
||||
|
||||
func (s *codeSpanParser) Trigger() []byte {
|
||||
return []byte{'`'}
|
||||
}
|
||||
|
||||
func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, startSegment := block.PeekLine()
|
||||
opener := 0
|
||||
for ; opener < len(line) && line[opener] == '`'; opener++ {
|
||||
}
|
||||
block.Advance(opener)
|
||||
l, pos := block.Position()
|
||||
node := ast.NewCodeSpan()
|
||||
for {
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
block.SetPosition(l, pos)
|
||||
return ast.NewTextSegment(startSegment.WithStop(startSegment.Start + opener))
|
||||
}
|
||||
for i := 0; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if c == '`' {
|
||||
oldi := i
|
||||
for ; i < len(line) && line[i] == '`'; i++ {
|
||||
}
|
||||
closure := i - oldi
|
||||
if closure == opener && (i >= len(line) || line[i] != '`') {
|
||||
segment = segment.WithStop(segment.Start + i - closure)
|
||||
if !segment.IsEmpty() {
|
||||
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||
}
|
||||
block.Advance(i)
|
||||
goto end
|
||||
}
|
||||
}
|
||||
}
|
||||
if !util.IsBlank(line) {
|
||||
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
end:
|
||||
if !node.IsBlank(block.Source()) {
|
||||
// trim first halfspace and last halfspace
|
||||
segment := node.FirstChild().(*ast.Text).Segment
|
||||
shouldTrimmed := true
|
||||
if !(!segment.IsEmpty() && block.Source()[segment.Start] == ' ') {
|
||||
shouldTrimmed = false
|
||||
}
|
||||
segment = node.LastChild().(*ast.Text).Segment
|
||||
if !(!segment.IsEmpty() && block.Source()[segment.Stop-1] == ' ') {
|
||||
shouldTrimmed = false
|
||||
}
|
||||
if shouldTrimmed {
|
||||
t := node.FirstChild().(*ast.Text)
|
||||
segment := t.Segment
|
||||
t.Segment = segment.WithStart(segment.Start + 1)
|
||||
t = node.LastChild().(*ast.Text)
|
||||
segment = node.LastChild().(*ast.Text).Segment
|
||||
t.Segment = segment.WithStop(segment.Stop - 1)
|
||||
}
|
||||
|
||||
}
|
||||
return node
|
||||
}
|
242
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
242
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
// A DelimiterProcessor interface provides a set of functions about
|
||||
// Deliiter nodes.
|
||||
type DelimiterProcessor interface {
|
||||
// IsDelimiter returns true if given character is a delimiter, otherwise false.
|
||||
IsDelimiter(byte) bool
|
||||
|
||||
// CanOpenCloser returns true if given opener can close given closer, otherwise false.
|
||||
CanOpenCloser(opener, closer *Delimiter) bool
|
||||
|
||||
// OnMatch will be called when new matched delimiter found.
|
||||
// OnMatch should return a new Node correspond to the matched delimiter.
|
||||
OnMatch(consumes int) ast.Node
|
||||
}
|
||||
|
||||
// A Delimiter struct represents a delimiter like '*' of the Markdown text.
|
||||
type Delimiter struct {
|
||||
ast.BaseInline
|
||||
|
||||
Segment text.Segment
|
||||
|
||||
// CanOpen is set true if this delimiter can open a span for a new node.
|
||||
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||
CanOpen bool
|
||||
|
||||
// CanClose is set true if this delimiter can close a span for a new node.
|
||||
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||
CanClose bool
|
||||
|
||||
// Length is a remaining length of this delmiter.
|
||||
Length int
|
||||
|
||||
// OriginalLength is a original length of this delimiter.
|
||||
OriginalLength int
|
||||
|
||||
// Char is a character of this delimiter.
|
||||
Char byte
|
||||
|
||||
// PreviousDelimiter is a previous sibling delimiter node of this delimiter.
|
||||
PreviousDelimiter *Delimiter
|
||||
|
||||
// NextDelimiter is a next sibling delimiter node of this delimiter.
|
||||
NextDelimiter *Delimiter
|
||||
|
||||
// Processor is a DelimiterProcessor associated with this delimiter.
|
||||
Processor DelimiterProcessor
|
||||
}
|
||||
|
||||
// Inline implements Inline.Inline.
|
||||
func (d *Delimiter) Inline() {}
|
||||
|
||||
// Dump implements Node.Dump.
|
||||
func (d *Delimiter) Dump(source []byte, level int) {
|
||||
fmt.Printf("%sDelimiter: \"%s\"\n", strings.Repeat(" ", level), string(d.Text(source)))
|
||||
}
|
||||
|
||||
var kindDelimiter = ast.NewNodeKind("Delimiter")
|
||||
|
||||
// Kind implements Node.Kind
|
||||
func (d *Delimiter) Kind() ast.NodeKind {
|
||||
return kindDelimiter
|
||||
}
|
||||
|
||||
// Text implements Node.Text
|
||||
func (d *Delimiter) Text(source []byte) []byte {
|
||||
return d.Segment.Value(source)
|
||||
}
|
||||
|
||||
// ConsumeCharacters consumes delimiters.
|
||||
func (d *Delimiter) ConsumeCharacters(n int) {
|
||||
d.Length -= n
|
||||
d.Segment = d.Segment.WithStop(d.Segment.Start + d.Length)
|
||||
}
|
||||
|
||||
// CalcComsumption calculates how many characters should be used for opening
|
||||
// a new span correspond to given closer.
|
||||
func (d *Delimiter) CalcComsumption(closer *Delimiter) int {
|
||||
if (d.CanClose || closer.CanOpen) && (d.OriginalLength+closer.OriginalLength)%3 == 0 && closer.OriginalLength%3 != 0 {
|
||||
return 0
|
||||
}
|
||||
if d.Length >= 2 && closer.Length >= 2 {
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// NewDelimiter returns a new Delimiter node.
|
||||
func NewDelimiter(canOpen, canClose bool, length int, char byte, processor DelimiterProcessor) *Delimiter {
|
||||
c := &Delimiter{
|
||||
BaseInline: ast.BaseInline{},
|
||||
CanOpen: canOpen,
|
||||
CanClose: canClose,
|
||||
Length: length,
|
||||
OriginalLength: length,
|
||||
Char: char,
|
||||
PreviousDelimiter: nil,
|
||||
NextDelimiter: nil,
|
||||
Processor: processor,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// ScanDelimiter scans a delimiter by given DelimiterProcessor.
|
||||
func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcessor) *Delimiter {
|
||||
i := 0
|
||||
c := line[i]
|
||||
j := i
|
||||
if !processor.IsDelimiter(c) {
|
||||
return nil
|
||||
}
|
||||
for ; j < len(line) && c == line[j]; j++ {
|
||||
}
|
||||
if (j - i) >= min {
|
||||
after := rune(' ')
|
||||
if j != len(line) {
|
||||
after = util.ToRune(line, j)
|
||||
}
|
||||
|
||||
canOpen, canClose := false, false
|
||||
beforeIsPunctuation := unicode.IsPunct(before)
|
||||
beforeIsWhitespace := unicode.IsSpace(before)
|
||||
afterIsPunctuation := unicode.IsPunct(after)
|
||||
afterIsWhitespace := unicode.IsSpace(after)
|
||||
|
||||
isLeft := !afterIsWhitespace &&
|
||||
(!afterIsPunctuation || beforeIsWhitespace || beforeIsPunctuation)
|
||||
isRight := !beforeIsWhitespace &&
|
||||
(!beforeIsPunctuation || afterIsWhitespace || afterIsPunctuation)
|
||||
|
||||
if line[i] == '_' {
|
||||
canOpen = isLeft && (!isRight || beforeIsPunctuation)
|
||||
canClose = isRight && (!isLeft || afterIsPunctuation)
|
||||
} else {
|
||||
canOpen = isLeft
|
||||
canClose = isRight
|
||||
}
|
||||
return NewDelimiter(canOpen, canClose, j-i, c, processor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessDelimiters processes the delimiter list in the context.
|
||||
// Processing will be stop when reaching the bottom.
|
||||
//
|
||||
// If you implement an inline parser that can have other inline nodes as
|
||||
// children, you should call this function when nesting span has closed.
|
||||
func ProcessDelimiters(bottom ast.Node, pc Context) {
|
||||
lastDelimiter := pc.LastDelimiter()
|
||||
if lastDelimiter == nil {
|
||||
return
|
||||
}
|
||||
var closer *Delimiter
|
||||
if bottom != nil {
|
||||
if bottom != lastDelimiter {
|
||||
for c := lastDelimiter.PreviousSibling(); c != nil; {
|
||||
if d, ok := c.(*Delimiter); ok {
|
||||
closer = d
|
||||
}
|
||||
prev := c.PreviousSibling()
|
||||
if prev == bottom {
|
||||
break
|
||||
}
|
||||
c = prev
|
||||
}
|
||||
}
|
||||
} else {
|
||||
closer = pc.FirstDelimiter()
|
||||
}
|
||||
if closer == nil {
|
||||
pc.ClearDelimiters(bottom)
|
||||
return
|
||||
}
|
||||
for closer != nil {
|
||||
if !closer.CanClose {
|
||||
closer = closer.NextDelimiter
|
||||
continue
|
||||
}
|
||||
consume := 0
|
||||
found := false
|
||||
maybeOpener := false
|
||||
var opener *Delimiter
|
||||
for opener = closer.PreviousDelimiter; opener != nil; opener = opener.PreviousDelimiter {
|
||||
if opener.CanOpen && opener.Processor.CanOpenCloser(opener, closer) {
|
||||
maybeOpener = true
|
||||
consume = opener.CalcComsumption(closer)
|
||||
if consume > 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !maybeOpener && !closer.CanOpen {
|
||||
pc.RemoveDelimiter(closer)
|
||||
}
|
||||
closer = closer.NextDelimiter
|
||||
continue
|
||||
}
|
||||
opener.ConsumeCharacters(consume)
|
||||
closer.ConsumeCharacters(consume)
|
||||
|
||||
node := opener.Processor.OnMatch(consume)
|
||||
|
||||
parent := opener.Parent()
|
||||
child := opener.NextSibling()
|
||||
|
||||
for child != nil && child != closer {
|
||||
next := child.NextSibling()
|
||||
node.AppendChild(node, child)
|
||||
child = next
|
||||
}
|
||||
parent.InsertAfter(parent, opener, node)
|
||||
|
||||
for c := opener.NextDelimiter; c != nil && c != closer; {
|
||||
next := c.NextDelimiter
|
||||
pc.RemoveDelimiter(c)
|
||||
c = next
|
||||
}
|
||||
|
||||
if opener.Length == 0 {
|
||||
pc.RemoveDelimiter(opener)
|
||||
}
|
||||
|
||||
if closer.Length == 0 {
|
||||
next := closer.NextDelimiter
|
||||
pc.RemoveDelimiter(closer)
|
||||
closer = next
|
||||
}
|
||||
}
|
||||
pc.ClearDelimiters(bottom)
|
||||
}
|
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
)
|
||||
|
||||
type emphasisDelimiterProcessor struct {
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||
return b == '*' || b == '_'
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) CanOpenCloser(opener, closer *Delimiter) bool {
|
||||
return opener.Char == closer.Char
|
||||
}
|
||||
|
||||
func (p *emphasisDelimiterProcessor) OnMatch(consumes int) ast.Node {
|
||||
return ast.NewEmphasis(consumes)
|
||||
}
|
||||
|
||||
var defaultEmphasisDelimiterProcessor = &emphasisDelimiterProcessor{}
|
||||
|
||||
type emphasisParser struct {
|
||||
}
|
||||
|
||||
var defaultEmphasisParser = &emphasisParser{}
|
||||
|
||||
// NewEmphasisParser return a new InlineParser that parses emphasises.
|
||||
func NewEmphasisParser() InlineParser {
|
||||
return defaultEmphasisParser
|
||||
}
|
||||
|
||||
func (s *emphasisParser) Trigger() []byte {
|
||||
return []byte{'*', '_'}
|
||||
}
|
||||
|
||||
func (s *emphasisParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
before := block.PrecendingCharacter()
|
||||
line, segment := block.PeekLine()
|
||||
node := ScanDelimiter(line, before, 1, defaultEmphasisDelimiterProcessor)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||
block.Advance(node.OriginalLength)
|
||||
pc.PushDelimiter(node)
|
||||
return node
|
||||
}
|
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type fencedCodeBlockParser struct {
|
||||
}
|
||||
|
||||
var defaultFencedCodeBlockParser = &fencedCodeBlockParser{}
|
||||
|
||||
// NewFencedCodeBlockParser returns a new BlockParser that
|
||||
// parses fenced code blocks.
|
||||
func NewFencedCodeBlockParser() BlockParser {
|
||||
return defaultFencedCodeBlockParser
|
||||
}
|
||||
|
||||
type fenceData struct {
|
||||
char byte
|
||||
indent int
|
||||
length int
|
||||
node ast.Node
|
||||
}
|
||||
|
||||
var fencedCodeBlockInfoKey = NewContextKey()
|
||||
|
||||
func (b *fencedCodeBlockParser) Trigger() []byte {
|
||||
return []byte{'~', '`'}
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
pos := pc.BlockOffset()
|
||||
if pos < 0 || (line[pos] != '`' && line[pos] != '~') {
|
||||
return nil, NoChildren
|
||||
}
|
||||
findent := pos
|
||||
fenceChar := line[pos]
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == fenceChar; i++ {
|
||||
}
|
||||
oFenceLength := i - pos
|
||||
if oFenceLength < 3 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
var info *ast.Text
|
||||
if i < len(line)-1 {
|
||||
rest := line[i:]
|
||||
left := util.TrimLeftSpaceLength(rest)
|
||||
right := util.TrimRightSpaceLength(rest)
|
||||
if left < len(rest)-right {
|
||||
infoStart, infoStop := segment.Start-segment.Padding+i+left, segment.Stop-right
|
||||
value := rest[left : len(rest)-right]
|
||||
if fenceChar == '`' && bytes.IndexByte(value, '`') > -1 {
|
||||
return nil, NoChildren
|
||||
} else if infoStart != infoStop {
|
||||
info = ast.NewTextSegment(text.NewSegment(infoStart, infoStop))
|
||||
}
|
||||
}
|
||||
}
|
||||
node := ast.NewFencedCodeBlock(info)
|
||||
pc.Set(fencedCodeBlockInfoKey, &fenceData{fenceChar, findent, oFenceLength, node})
|
||||
return node, NoChildren
|
||||
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, segment := reader.PeekLine()
|
||||
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||
if w < 4 {
|
||||
i := pos
|
||||
for ; i < len(line) && line[i] == fdata.char; i++ {
|
||||
}
|
||||
length := i - pos
|
||||
if length >= fdata.length && util.IsBlank(line[i:]) {
|
||||
newline := 1
|
||||
if line[len(line)-1] != '\n' {
|
||||
newline = 0
|
||||
}
|
||||
reader.Advance(segment.Stop - segment.Start - newline - segment.Padding)
|
||||
return Close
|
||||
}
|
||||
}
|
||||
pos, padding := util.DedentPositionPadding(line, reader.LineOffset(), segment.Padding, fdata.indent)
|
||||
|
||||
seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
|
||||
node.Lines().Append(seg)
|
||||
reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||
if fdata.node == node {
|
||||
pc.Set(fencedCodeBlockInfoKey, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *fencedCodeBlockParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var allowedBlockTags = map[string]bool{
|
||||
"address": true,
|
||||
"article": true,
|
||||
"aside": true,
|
||||
"base": true,
|
||||
"basefont": true,
|
||||
"blockquote": true,
|
||||
"body": true,
|
||||
"caption": true,
|
||||
"center": true,
|
||||
"col": true,
|
||||
"colgroup": true,
|
||||
"dd": true,
|
||||
"details": true,
|
||||
"dialog": true,
|
||||
"dir": true,
|
||||
"div": true,
|
||||
"dl": true,
|
||||
"dt": true,
|
||||
"fieldset": true,
|
||||
"figcaption": true,
|
||||
"figure": true,
|
||||
"footer": true,
|
||||
"form": true,
|
||||
"frame": true,
|
||||
"frameset": true,
|
||||
"h1": true,
|
||||
"h2": true,
|
||||
"h3": true,
|
||||
"h4": true,
|
||||
"h5": true,
|
||||
"h6": true,
|
||||
"head": true,
|
||||
"header": true,
|
||||
"hr": true,
|
||||
"html": true,
|
||||
"iframe": true,
|
||||
"legend": true,
|
||||
"li": true,
|
||||
"link": true,
|
||||
"main": true,
|
||||
"menu": true,
|
||||
"menuitem": true,
|
||||
"meta": true,
|
||||
"nav": true,
|
||||
"noframes": true,
|
||||
"ol": true,
|
||||
"optgroup": true,
|
||||
"option": true,
|
||||
"p": true,
|
||||
"param": true,
|
||||
"section": true,
|
||||
"source": true,
|
||||
"summary": true,
|
||||
"table": true,
|
||||
"tbody": true,
|
||||
"td": true,
|
||||
"tfoot": true,
|
||||
"th": true,
|
||||
"thead": true,
|
||||
"title": true,
|
||||
"tr": true,
|
||||
"track": true,
|
||||
"ul": true,
|
||||
}
|
||||
|
||||
var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||
var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style)>.*`)
|
||||
|
||||
var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
|
||||
var htmlBlockType2Close = []byte{'-', '-', '>'}
|
||||
|
||||
var htmlBlockType3OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\?`)
|
||||
var htmlBlockType3Close = []byte{'?', '>'}
|
||||
|
||||
var htmlBlockType4OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<![A-Z]+.*\n?$`)
|
||||
var htmlBlockType4Close = []byte{'>'}
|
||||
|
||||
var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
|
||||
var htmlBlockType5Close = []byte{']', ']', '>'}
|
||||
|
||||
var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}</?([a-zA-Z0-9]+)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||
|
||||
var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/)?([a-zA-Z0-9]+)(` + attributePattern + `*)(:?>|/>)\s*\n?$`)
|
||||
|
||||
type htmlBlockParser struct {
|
||||
}
|
||||
|
||||
var defaultHTMLBlockParser = &htmlBlockParser{}
|
||||
|
||||
// NewHTMLBlockParser return a new BlockParser that can parse html
|
||||
// blocks.
|
||||
func NewHTMLBlockParser() BlockParser {
|
||||
return defaultHTMLBlockParser
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
var node *ast.HTMLBlock
|
||||
line, segment := reader.PeekLine()
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if pos := pc.BlockOffset(); pos < 0 || line[pos] != '<' {
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
if m := htmlBlockType1OpenRegexp.FindSubmatchIndex(line); m != nil {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType1)
|
||||
} else if htmlBlockType2OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType2)
|
||||
} else if htmlBlockType3OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType3)
|
||||
} else if htmlBlockType4OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType4)
|
||||
} else if htmlBlockType5OpenRegexp.Match(line) {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType5)
|
||||
} else if match := htmlBlockType7Regexp.FindSubmatchIndex(line); match != nil {
|
||||
isCloseTag := match[2] > -1 && bytes.Equal(line[match[2]:match[3]], []byte("/"))
|
||||
hasAttr := match[6] != match[7]
|
||||
tagName := strings.ToLower(string(line[match[4]:match[5]]))
|
||||
_, ok := allowedBlockTags[tagName]
|
||||
if ok {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||
} else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType7)
|
||||
}
|
||||
}
|
||||
if node == nil {
|
||||
if match := htmlBlockType6Regexp.FindSubmatchIndex(line); match != nil {
|
||||
tagName := string(line[match[2]:match[3]])
|
||||
_, ok := allowedBlockTags[strings.ToLower(tagName)]
|
||||
if ok {
|
||||
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node != nil {
|
||||
reader.Advance(segment.Len() - 1)
|
||||
node.Lines().Append(segment)
|
||||
return node, NoChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
htmlBlock := node.(*ast.HTMLBlock)
|
||||
lines := htmlBlock.Lines()
|
||||
line, segment := reader.PeekLine()
|
||||
var closurePattern []byte
|
||||
|
||||
switch htmlBlock.HTMLBlockType {
|
||||
case ast.HTMLBlockType1:
|
||||
if lines.Len() == 1 {
|
||||
firstLine := lines.At(0)
|
||||
if htmlBlockType1CloseRegexp.Match(firstLine.Value(reader.Source())) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
if htmlBlockType1CloseRegexp.Match(line) {
|
||||
htmlBlock.ClosureLine = segment
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Close
|
||||
}
|
||||
case ast.HTMLBlockType2:
|
||||
closurePattern = htmlBlockType2Close
|
||||
fallthrough
|
||||
case ast.HTMLBlockType3:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType3Close
|
||||
}
|
||||
fallthrough
|
||||
case ast.HTMLBlockType4:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType4Close
|
||||
}
|
||||
fallthrough
|
||||
case ast.HTMLBlockType5:
|
||||
if closurePattern == nil {
|
||||
closurePattern = htmlBlockType5Close
|
||||
}
|
||||
|
||||
if lines.Len() == 1 {
|
||||
firstLine := lines.At(0)
|
||||
if bytes.Contains(firstLine.Value(reader.Source()), closurePattern) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
if bytes.Contains(line, closurePattern) {
|
||||
htmlBlock.ClosureLine = segment
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Close
|
||||
}
|
||||
|
||||
case ast.HTMLBlockType6, ast.HTMLBlockType7:
|
||||
if util.IsBlank(line) {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *htmlBlockParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
375
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
375
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var linkLabelStateKey = NewContextKey()
|
||||
|
||||
type linkLabelState struct {
|
||||
ast.BaseInline
|
||||
|
||||
Segment text.Segment
|
||||
|
||||
IsImage bool
|
||||
|
||||
Prev *linkLabelState
|
||||
|
||||
Next *linkLabelState
|
||||
|
||||
First *linkLabelState
|
||||
|
||||
Last *linkLabelState
|
||||
}
|
||||
|
||||
func newLinkLabelState(segment text.Segment, isImage bool) *linkLabelState {
|
||||
return &linkLabelState{
|
||||
Segment: segment,
|
||||
IsImage: isImage,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *linkLabelState) Text(source []byte) []byte {
|
||||
return s.Segment.Value(source)
|
||||
}
|
||||
|
||||
func (s *linkLabelState) Dump(source []byte, level int) {
|
||||
fmt.Printf("%slinkLabelState: \"%s\"\n", strings.Repeat(" ", level), s.Text(source))
|
||||
}
|
||||
|
||||
var kindLinkLabelState = ast.NewNodeKind("LinkLabelState")
|
||||
|
||||
func (s *linkLabelState) Kind() ast.NodeKind {
|
||||
return kindLinkLabelState
|
||||
}
|
||||
|
||||
func pushLinkLabelState(pc Context, v *linkLabelState) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
var list *linkLabelState
|
||||
if tlist == nil {
|
||||
list = v
|
||||
v.First = v
|
||||
v.Last = v
|
||||
pc.Set(linkLabelStateKey, list)
|
||||
} else {
|
||||
list = tlist.(*linkLabelState)
|
||||
l := list.Last
|
||||
list.Last = v
|
||||
l.Next = v
|
||||
v.Prev = l
|
||||
}
|
||||
}
|
||||
|
||||
func removeLinkLabelState(pc Context, d *linkLabelState) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
var list *linkLabelState
|
||||
if tlist == nil {
|
||||
return
|
||||
}
|
||||
list = tlist.(*linkLabelState)
|
||||
|
||||
if d.Prev == nil {
|
||||
list = d.Next
|
||||
if list != nil {
|
||||
list.First = d
|
||||
list.Last = d.Last
|
||||
list.Prev = nil
|
||||
pc.Set(linkLabelStateKey, list)
|
||||
} else {
|
||||
pc.Set(linkLabelStateKey, nil)
|
||||
}
|
||||
} else {
|
||||
d.Prev.Next = d.Next
|
||||
if d.Next != nil {
|
||||
d.Next.Prev = d.Prev
|
||||
}
|
||||
}
|
||||
if list != nil && d.Next == nil {
|
||||
list.Last = d.Prev
|
||||
}
|
||||
d.Next = nil
|
||||
d.Prev = nil
|
||||
d.First = nil
|
||||
d.Last = nil
|
||||
}
|
||||
|
||||
type linkParser struct {
|
||||
}
|
||||
|
||||
var defaultLinkParser = &linkParser{}
|
||||
|
||||
// NewLinkParser return a new InlineParser that parses links.
|
||||
func NewLinkParser() InlineParser {
|
||||
return defaultLinkParser
|
||||
}
|
||||
|
||||
func (s *linkParser) Trigger() []byte {
|
||||
return []byte{'!', '[', ']'}
|
||||
}
|
||||
|
||||
var linkDestinationRegexp = regexp.MustCompile(`\s*([^\s].+)`)
|
||||
var linkTitleRegexp = regexp.MustCompile(`\s+(\)|["'\(].+)`)
|
||||
var linkBottom = NewContextKey()
|
||||
|
||||
func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
if line[0] == '!' {
|
||||
if len(line) > 1 && line[1] == '[' {
|
||||
block.Advance(1)
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
return processLinkLabelOpen(block, segment.Start+1, true, pc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if line[0] == '[' {
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
return processLinkLabelOpen(block, segment.Start, false, pc)
|
||||
}
|
||||
|
||||
// line[0] == ']'
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
if tlist == nil {
|
||||
return nil
|
||||
}
|
||||
last := tlist.(*linkLabelState).Last
|
||||
if last == nil {
|
||||
return nil
|
||||
}
|
||||
block.Advance(1)
|
||||
removeLinkLabelState(pc, last)
|
||||
if s.containsLink(last) { // a link in a link text is not allowed
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
labelValue := block.Value(text.NewSegment(last.Segment.Start+1, segment.Start))
|
||||
if util.IsBlank(labelValue) && !last.IsImage {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
|
||||
c := block.Peek()
|
||||
l, pos := block.Position()
|
||||
var link *ast.Link
|
||||
var hasValue bool
|
||||
if c == '(' { // normal link
|
||||
link = s.parseLink(parent, last, block, pc)
|
||||
} else if c == '[' { // reference link
|
||||
link, hasValue = s.parseReferenceLink(parent, last, block, pc)
|
||||
if link == nil && hasValue {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if link == nil {
|
||||
// maybe shortcut reference link
|
||||
block.SetPosition(l, pos)
|
||||
ssegment := text.NewSegment(last.Segment.Stop, segment.Start)
|
||||
maybeReference := block.Value(ssegment)
|
||||
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||
if !ok {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
return nil
|
||||
}
|
||||
link = ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Title = ref.Title()
|
||||
link.Destination = ref.Destination()
|
||||
}
|
||||
if last.IsImage {
|
||||
last.Parent().RemoveChild(last.Parent(), last)
|
||||
return ast.NewImage(link)
|
||||
}
|
||||
last.Parent().RemoveChild(last.Parent(), last)
|
||||
return link
|
||||
}
|
||||
|
||||
func (s *linkParser) containsLink(last *linkLabelState) bool {
|
||||
if last.IsImage {
|
||||
return false
|
||||
}
|
||||
var c ast.Node
|
||||
for c = last; c != nil; c = c.NextSibling() {
|
||||
if _, ok := c.(*ast.Link); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context) *linkLabelState {
|
||||
start := pos
|
||||
if isImage {
|
||||
start--
|
||||
}
|
||||
state := newLinkLabelState(text.NewSegment(start, pos+1), isImage)
|
||||
pushLinkLabelState(pc, state)
|
||||
block.Advance(1)
|
||||
return state
|
||||
}
|
||||
|
||||
func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
|
||||
var bottom ast.Node
|
||||
if v := pc.Get(linkBottom); v != nil {
|
||||
bottom = v.(ast.Node)
|
||||
}
|
||||
pc.Set(linkBottom, nil)
|
||||
ProcessDelimiters(bottom, pc)
|
||||
for c := last.NextSibling(); c != nil; {
|
||||
next := c.NextSibling()
|
||||
parent.RemoveChild(parent, c)
|
||||
link.AppendChild(link, c)
|
||||
c = next
|
||||
}
|
||||
}
|
||||
|
||||
func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
|
||||
_, orgpos := block.Position()
|
||||
block.Advance(1) // skip '['
|
||||
line, segment := block.PeekLine()
|
||||
endIndex := util.FindClosure(line, '[', ']', false, true)
|
||||
if endIndex < 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
block.Advance(endIndex + 1)
|
||||
ssegment := segment.WithStop(segment.Start + endIndex)
|
||||
maybeReference := block.Value(ssegment)
|
||||
if util.IsBlank(maybeReference) { // collapsed reference link
|
||||
ssegment = text.NewSegment(last.Segment.Stop, orgpos.Start-1)
|
||||
maybeReference = block.Value(ssegment)
|
||||
}
|
||||
|
||||
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||
if !ok {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
link := ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Title = ref.Title()
|
||||
link.Destination = ref.Destination()
|
||||
return link, true
|
||||
}
|
||||
|
||||
func (s *linkParser) parseLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) *ast.Link {
|
||||
block.Advance(1) // skip '('
|
||||
block.SkipSpaces()
|
||||
var title []byte
|
||||
var destination []byte
|
||||
var ok bool
|
||||
if block.Peek() == ')' { // empty link like '[link]()'
|
||||
block.Advance(1)
|
||||
} else {
|
||||
destination, ok = parseLinkDestination(block)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
block.SkipSpaces()
|
||||
if block.Peek() == ')' {
|
||||
block.Advance(1)
|
||||
} else {
|
||||
title, ok = parseLinkTitle(block)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
block.SkipSpaces()
|
||||
if block.Peek() == ')' {
|
||||
block.Advance(1)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
link := ast.NewLink()
|
||||
s.processLinkLabel(parent, link, last, pc)
|
||||
link.Destination = destination
|
||||
link.Title = title
|
||||
return link
|
||||
}
|
||||
|
||||
func parseLinkDestination(block text.Reader) ([]byte, bool) {
|
||||
block.SkipSpaces()
|
||||
line, _ := block.PeekLine()
|
||||
buf := []byte{}
|
||||
if block.Peek() == '<' {
|
||||
i := 1
|
||||
for i < len(line) {
|
||||
c := line[i]
|
||||
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||
buf = append(buf, '\\', line[i+1])
|
||||
i += 2
|
||||
continue
|
||||
} else if c == '>' {
|
||||
block.Advance(i + 1)
|
||||
return line[1:i], true
|
||||
}
|
||||
buf = append(buf, c)
|
||||
i++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
opened := 0
|
||||
i := 0
|
||||
for i < len(line) {
|
||||
c := line[i]
|
||||
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||
buf = append(buf, '\\', line[i+1])
|
||||
i += 2
|
||||
continue
|
||||
} else if c == '(' {
|
||||
opened++
|
||||
} else if c == ')' {
|
||||
opened--
|
||||
if opened < 0 {
|
||||
break
|
||||
}
|
||||
} else if util.IsSpace(c) {
|
||||
break
|
||||
}
|
||||
buf = append(buf, c)
|
||||
i++
|
||||
}
|
||||
block.Advance(i)
|
||||
return line[:i], len(line[:i]) != 0
|
||||
}
|
||||
|
||||
func parseLinkTitle(block text.Reader) ([]byte, bool) {
|
||||
block.SkipSpaces()
|
||||
opener := block.Peek()
|
||||
if opener != '"' && opener != '\'' && opener != '(' {
|
||||
return nil, false
|
||||
}
|
||||
closer := opener
|
||||
if opener == '(' {
|
||||
closer = ')'
|
||||
}
|
||||
line, _ := block.PeekLine()
|
||||
pos := util.FindClosure(line[1:], opener, closer, false, true)
|
||||
if pos < 0 {
|
||||
return nil, false
|
||||
}
|
||||
pos += 2 // opener + closer
|
||||
block.Advance(pos)
|
||||
return line[1 : pos-1], true
|
||||
}
|
||||
|
||||
func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
if tlist == nil {
|
||||
return
|
||||
}
|
||||
for s := tlist.(*linkLabelState); s != nil; {
|
||||
next := s.Next
|
||||
removeLinkLabelState(pc, s)
|
||||
s.Parent().ReplaceChild(s.Parent(), s, ast.NewTextSegment(s.Segment))
|
||||
s = next
|
||||
}
|
||||
}
|
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type linkReferenceParagraphTransformer struct {
|
||||
}
|
||||
|
||||
// LinkReferenceParagraphTransformer is a ParagraphTransformer implementation
|
||||
// that parses and extracts link reference from paragraphs.
|
||||
var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
|
||||
|
||||
func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
|
||||
lines := node.Lines()
|
||||
block := text.NewBlockReader(reader.Source(), lines)
|
||||
removes := [][2]int{}
|
||||
for {
|
||||
start, end := parseLinkReferenceDefinition(block, pc)
|
||||
if start > -1 {
|
||||
if start == end {
|
||||
end++
|
||||
}
|
||||
removes = append(removes, [2]int{start, end})
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
offset := 0
|
||||
for _, remove := range removes {
|
||||
if lines.Len() == 0 {
|
||||
break
|
||||
}
|
||||
s := lines.Sliced(remove[1]-offset, lines.Len())
|
||||
lines.SetSliced(0, remove[0]-offset)
|
||||
lines.AppendAll(s)
|
||||
offset = remove[1]
|
||||
}
|
||||
|
||||
if lines.Len() == 0 {
|
||||
t := ast.NewTextBlock()
|
||||
t.SetBlankPreviousLines(node.HasBlankPreviousLines())
|
||||
node.Parent().ReplaceChild(node.Parent(), node, t)
|
||||
return
|
||||
}
|
||||
|
||||
node.SetLines(lines)
|
||||
}
|
||||
|
||||
func parseLinkReferenceDefinition(block text.Reader, pc Context) (int, int) {
|
||||
block.SkipSpaces()
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
startLine, _ := block.Position()
|
||||
width, pos := util.IndentWidth(line, 0)
|
||||
if width > 3 {
|
||||
return -1, -1
|
||||
}
|
||||
if width != 0 {
|
||||
pos++
|
||||
}
|
||||
if line[pos] != '[' {
|
||||
return -1, -1
|
||||
}
|
||||
open := segment.Start + pos + 1
|
||||
closes := -1
|
||||
block.Advance(pos + 1)
|
||||
for {
|
||||
line, segment = block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
closure := util.FindClosure(line, '[', ']', false, false)
|
||||
if closure > -1 {
|
||||
closes = segment.Start + closure
|
||||
next := closure + 1
|
||||
if next >= len(line) || line[next] != ':' {
|
||||
return -1, -1
|
||||
}
|
||||
block.Advance(next + 1)
|
||||
break
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
if closes < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
label := block.Value(text.NewSegment(open, closes))
|
||||
if util.IsBlank(label) {
|
||||
return -1, -1
|
||||
}
|
||||
block.SkipSpaces()
|
||||
destination, ok := parseLinkDestination(block)
|
||||
if !ok {
|
||||
return -1, -1
|
||||
}
|
||||
line, segment = block.PeekLine()
|
||||
isNewLine := line == nil || util.IsBlank(line)
|
||||
|
||||
endLine, _ := block.Position()
|
||||
_, spaces, _ := block.SkipSpaces()
|
||||
opener := block.Peek()
|
||||
if opener != '"' && opener != '\'' && opener != '(' {
|
||||
if !isNewLine {
|
||||
return -1, -1
|
||||
}
|
||||
ref := NewReference(label, destination, nil)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine + 1
|
||||
}
|
||||
if spaces == 0 {
|
||||
return -1, -1
|
||||
}
|
||||
block.Advance(1)
|
||||
open = -1
|
||||
closes = -1
|
||||
closer := opener
|
||||
if opener == '(' {
|
||||
closer = ')'
|
||||
}
|
||||
for {
|
||||
line, segment = block.PeekLine()
|
||||
if line == nil {
|
||||
return -1, -1
|
||||
}
|
||||
if open < 0 {
|
||||
open = segment.Start
|
||||
}
|
||||
closure := util.FindClosure(line, opener, closer, false, true)
|
||||
if closure > -1 {
|
||||
closes = segment.Start + closure
|
||||
block.Advance(closure + 1)
|
||||
break
|
||||
}
|
||||
block.AdvanceLine()
|
||||
}
|
||||
if closes < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
line, segment = block.PeekLine()
|
||||
if line != nil && !util.IsBlank(line) {
|
||||
if !isNewLine {
|
||||
return -1, -1
|
||||
}
|
||||
title := block.Value(text.NewSegment(open, closes))
|
||||
ref := NewReference(label, destination, title)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine
|
||||
}
|
||||
|
||||
title := block.Value(text.NewSegment(open, closes))
|
||||
|
||||
endLine, _ = block.Position()
|
||||
ref := NewReference(label, destination, title)
|
||||
pc.AddReference(ref)
|
||||
return startLine, endLine + 1
|
||||
}
|
250
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
250
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type listItemType int
|
||||
|
||||
const (
|
||||
notList listItemType = iota
|
||||
bulletList
|
||||
orderedList
|
||||
)
|
||||
|
||||
// Same as
|
||||
// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
|
||||
// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
|
||||
func parseListItem(line []byte) ([6]int, listItemType) {
|
||||
i := 0
|
||||
l := len(line)
|
||||
ret := [6]int{}
|
||||
for ; i < l && line[i] == ' '; i++ {
|
||||
c := line[i]
|
||||
if c == '\t' {
|
||||
return ret, notList
|
||||
}
|
||||
}
|
||||
if i > 3 {
|
||||
return ret, notList
|
||||
}
|
||||
ret[0] = 0
|
||||
ret[1] = i
|
||||
ret[2] = i
|
||||
var typ listItemType
|
||||
if i < l && (line[i] == '-' || line[i] == '*' || line[i] == '+') {
|
||||
i++
|
||||
ret[3] = i
|
||||
typ = bulletList
|
||||
} else if i < l {
|
||||
for ; i < l && util.IsNumeric(line[i]); i++ {
|
||||
}
|
||||
ret[3] = i
|
||||
if ret[3] == ret[2] || ret[3]-ret[2] > 9 {
|
||||
return ret, notList
|
||||
}
|
||||
if i < l && (line[i] == '.' || line[i] == ')') {
|
||||
i++
|
||||
ret[3] = i
|
||||
} else {
|
||||
return ret, notList
|
||||
}
|
||||
typ = orderedList
|
||||
} else {
|
||||
return ret, notList
|
||||
}
|
||||
if i < l && line[i] != '\n' {
|
||||
w, _ := util.IndentWidth(line[i:], 0)
|
||||
if w == 0 {
|
||||
return ret, notList
|
||||
}
|
||||
}
|
||||
if i >= l {
|
||||
ret[4] = -1
|
||||
ret[5] = -1
|
||||
return ret, typ
|
||||
}
|
||||
ret[4] = i
|
||||
ret[5] = len(line)
|
||||
if line[ret[5]-1] == '\n' && line[i] != '\n' {
|
||||
ret[5]--
|
||||
}
|
||||
return ret, typ
|
||||
}
|
||||
|
||||
func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
|
||||
m, typ := parseListItem(source)
|
||||
if typ != notList && (!strict || strict && m[1] < 4) {
|
||||
return m, typ
|
||||
}
|
||||
return m, notList
|
||||
}
|
||||
|
||||
func calcListOffset(source []byte, match [6]int) int {
|
||||
offset := 0
|
||||
if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
|
||||
offset = 1
|
||||
} else {
|
||||
offset, _ = util.IndentWidth(source[match[4]:], match[4])
|
||||
if offset > 4 { // offseted codeblock
|
||||
offset = 1
|
||||
}
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
func lastOffset(node ast.Node) int {
|
||||
lastChild := node.LastChild()
|
||||
if lastChild != nil {
|
||||
return lastChild.(*ast.ListItem).Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type listParser struct {
|
||||
}
|
||||
|
||||
var defaultListParser = &listParser{}
|
||||
|
||||
// NewListParser returns a new BlockParser that
|
||||
// parses lists.
|
||||
// This parser must take precedence over the ListItemParser.
|
||||
func NewListParser() BlockParser {
|
||||
return defaultListParser
|
||||
}
|
||||
|
||||
func (b *listParser) Trigger() []byte {
|
||||
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
}
|
||||
|
||||
func (b *listParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if _, lok := last.(*ast.List); lok || pc.Get(skipListParser) != nil {
|
||||
pc.Set(skipListParser, nil)
|
||||
return nil, NoChildren
|
||||
}
|
||||
line, _ := reader.PeekLine()
|
||||
match, typ := matchesListItem(line, true)
|
||||
if typ == notList {
|
||||
return nil, NoChildren
|
||||
}
|
||||
start := -1
|
||||
if typ == orderedList {
|
||||
number := line[match[2] : match[3]-1]
|
||||
start, _ = strconv.Atoi(string(number))
|
||||
}
|
||||
|
||||
if ast.IsParagraph(last) && last.Parent() == parent {
|
||||
// we allow only lists starting with 1 to interrupt paragraphs.
|
||||
if typ == orderedList && start != 1 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
//an empty list item cannot interrupt a paragraph:
|
||||
if match[5]-match[4] == 1 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
}
|
||||
|
||||
marker := line[match[3]-1]
|
||||
node := ast.NewList(marker)
|
||||
if start > -1 {
|
||||
node.Start = start
|
||||
}
|
||||
return node, HasChildren
|
||||
}
|
||||
|
||||
func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
list := node.(*ast.List)
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
// A list item can begin with at most one blank line
|
||||
if node.ChildCount() == 1 && node.LastChild().ChildCount() == 0 {
|
||||
return Close
|
||||
}
|
||||
return Continue | HasChildren
|
||||
}
|
||||
// Thematic Breaks take precedence over lists
|
||||
if isThematicBreak(line, reader.LineOffset()) {
|
||||
isHeading := false
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if ast.IsParagraph(last) {
|
||||
c, ok := matchesSetextHeadingBar(line)
|
||||
if ok && c == '-' {
|
||||
isHeading = true
|
||||
}
|
||||
}
|
||||
if !isHeading {
|
||||
return Close
|
||||
}
|
||||
}
|
||||
|
||||
// "offset" means a width that bar indicates.
|
||||
// - aaaaaaaa
|
||||
// |----|
|
||||
//
|
||||
// If the indent is less than the last offset like
|
||||
// - a
|
||||
// - b <--- current line
|
||||
// it maybe a new child of the list.
|
||||
offset := lastOffset(node)
|
||||
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||
|
||||
if indent < offset {
|
||||
if indent < 4 {
|
||||
match, typ := matchesListItem(line, false) // may have a leading spaces more than 3
|
||||
if typ != notList && match[1]-offset < 4 {
|
||||
marker := line[match[3]-1]
|
||||
if !list.CanContinue(marker, typ == orderedList) {
|
||||
return Close
|
||||
}
|
||||
return Continue | HasChildren
|
||||
}
|
||||
}
|
||||
return Close
|
||||
}
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
list := node.(*ast.List)
|
||||
|
||||
for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
|
||||
if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
|
||||
for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
|
||||
if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||
list.IsTight = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if c != node.FirstChild() {
|
||||
if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||
list.IsTight = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if list.IsTight {
|
||||
for child := node.FirstChild(); child != nil; child = child.NextSibling() {
|
||||
for gc := child.FirstChild(); gc != nil; gc = gc.NextSibling() {
|
||||
paragraph, ok := gc.(*ast.Paragraph)
|
||||
if ok {
|
||||
textBlock := ast.NewTextBlock()
|
||||
textBlock.SetLines(paragraph.Lines())
|
||||
child.ReplaceChild(child, paragraph, textBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *listParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *listParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type listItemParser struct {
|
||||
}
|
||||
|
||||
var defaultListItemParser = &listItemParser{}
|
||||
|
||||
// NewListItemParser returns a new BlockParser that
|
||||
// parses list items.
|
||||
func NewListItemParser() BlockParser {
|
||||
return defaultListItemParser
|
||||
}
|
||||
|
||||
var skipListParser = NewContextKey()
|
||||
var skipListParserValue interface{} = true
|
||||
|
||||
func (b *listItemParser) Trigger() []byte {
|
||||
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
}
|
||||
|
||||
func (b *listItemParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
list, lok := parent.(*ast.List)
|
||||
if !lok { // list item must be a child of a list
|
||||
return nil, NoChildren
|
||||
}
|
||||
offset := lastOffset(list)
|
||||
line, _ := reader.PeekLine()
|
||||
match, typ := matchesListItem(line, false)
|
||||
if typ == notList {
|
||||
return nil, NoChildren
|
||||
}
|
||||
if match[1]-offset > 3 {
|
||||
return nil, NoChildren
|
||||
}
|
||||
itemOffset := calcListOffset(line, match)
|
||||
node := ast.NewListItem(match[3] + itemOffset)
|
||||
if match[4] < 0 || match[5]-match[4] == 1 {
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
pos, padding := util.IndentPosition(line[match[4]:], match[4], itemOffset)
|
||||
child := match[3] + pos
|
||||
reader.AdvanceAndSetPadding(child, padding)
|
||||
return node, HasChildren
|
||||
}
|
||||
|
||||
func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
line, _ := reader.PeekLine()
|
||||
if util.IsBlank(line) {
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||
offset := lastOffset(node.Parent())
|
||||
if indent < offset && indent < 4 {
|
||||
_, typ := matchesListItem(line, true)
|
||||
// new list item found
|
||||
if typ != notList {
|
||||
pc.Set(skipListParser, skipListParserValue)
|
||||
}
|
||||
return Close
|
||||
}
|
||||
pos, padding := util.IndentPosition(line, reader.LineOffset(), offset)
|
||||
reader.AdvanceAndSetPadding(pos, padding)
|
||||
|
||||
return Continue | HasChildren
|
||||
}
|
||||
|
||||
func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *listItemParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *listItemParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
)
|
||||
|
||||
type paragraphParser struct {
|
||||
}
|
||||
|
||||
var defaultParagraphParser = ¶graphParser{}
|
||||
|
||||
// NewParagraphParser returns a new BlockParser that
|
||||
// parses paragraphs.
|
||||
func NewParagraphParser() BlockParser {
|
||||
return defaultParagraphParser
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Trigger() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
_, segment := reader.PeekLine()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if segment.IsEmpty() {
|
||||
return nil, NoChildren
|
||||
}
|
||||
node := ast.NewParagraph()
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return node, NoChildren
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
_, segment := reader.PeekLine()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if segment.IsEmpty() {
|
||||
return Close
|
||||
}
|
||||
node.Lines().Append(segment)
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return Continue | NoChildren
|
||||
}
|
||||
|
||||
func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
parent := node.Parent()
|
||||
if parent == nil {
|
||||
// paragraph has been transformed
|
||||
return
|
||||
}
|
||||
lines := node.Lines()
|
||||
if lines.Len() != 0 {
|
||||
// trim trailing spaces
|
||||
length := lines.Len()
|
||||
lastLine := node.Lines().At(length - 1)
|
||||
node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
|
||||
}
|
||||
if lines.Len() == 0 {
|
||||
node.Parent().RemoveChild(node.Parent(), node)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (b *paragraphParser) CanInterruptParagraph() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *paragraphParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type rawHTMLParser struct {
|
||||
}
|
||||
|
||||
var defaultRawHTMLParser = &rawHTMLParser{}
|
||||
|
||||
// NewRawHTMLParser return a new InlineParser that can parse
|
||||
// inline htmls
|
||||
func NewRawHTMLParser() InlineParser {
|
||||
return defaultRawHTMLParser
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) Trigger() []byte {
|
||||
return []byte{'<'}
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||
line, _ := block.PeekLine()
|
||||
if len(line) > 1 && util.IsAlphaNumeric(line[1]) {
|
||||
return s.parseMultiLineRegexp(openTagRegexp, block, pc)
|
||||
}
|
||||
if len(line) > 2 && line[1] == '/' && util.IsAlphaNumeric(line[2]) {
|
||||
return s.parseMultiLineRegexp(closeTagRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<!--")) {
|
||||
return s.parseMultiLineRegexp(commentRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<?")) {
|
||||
return s.parseSingleLineRegexp(processingInstructionRegexp, block, pc)
|
||||
}
|
||||
if len(line) > 2 && line[1] == '!' && line[2] >= 'A' && line[2] <= 'Z' {
|
||||
return s.parseSingleLineRegexp(declRegexp, block, pc)
|
||||
}
|
||||
if bytes.HasPrefix(line, []byte("<![CDATA[")) {
|
||||
return s.parseMultiLineRegexp(cdataRegexp, block, pc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
|
||||
var attributePattern = `(?:\s+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:\s*=\s*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
|
||||
var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*\s*/?>`)
|
||||
var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
|
||||
var commentRegexp = regexp.MustCompile(`^<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->`)
|
||||
var processingInstructionRegexp = regexp.MustCompile(`^(?:<\?).*?(?:\?>)`)
|
||||
var declRegexp = regexp.MustCompile(`^<![A-Z]+\s+[^>]*>`)
|
||||
var cdataRegexp = regexp.MustCompile(`<!\[CDATA\[[\s\S]*?\]\]>`)
|
||||
|
||||
func (s *rawHTMLParser) parseSingleLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||
line, segment := block.PeekLine()
|
||||
match := reg.FindSubmatchIndex(line)
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
node := ast.NewRawHTML()
|
||||
node.Segments.Append(segment.WithStop(segment.Start + match[1]))
|
||||
block.Advance(match[1])
|
||||
return node
|
||||
}
|
||||
|
||||
var dummyMatch = [][]byte{}
|
||||
|
||||
func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||
sline, ssegment := block.Position()
|
||||
if block.Match(reg) {
|
||||
node := ast.NewRawHTML()
|
||||
eline, esegment := block.Position()
|
||||
block.SetPosition(sline, ssegment)
|
||||
for {
|
||||
line, segment := block.PeekLine()
|
||||
if line == nil {
|
||||
break
|
||||
}
|
||||
l, _ := block.Position()
|
||||
start := segment.Start
|
||||
if l == sline {
|
||||
start = ssegment.Start
|
||||
}
|
||||
end := segment.Stop
|
||||
if l == eline {
|
||||
end = esegment.Start
|
||||
}
|
||||
|
||||
node.Segments.Append(text.NewSegment(start, end))
|
||||
if l == eline {
|
||||
block.Advance(end - start)
|
||||
break
|
||||
} else {
|
||||
block.AdvanceLine()
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *rawHTMLParser) CloseBlock(parent ast.Node, pc Context) {
|
||||
// nothing to do
|
||||
}
|
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var temporaryParagraphKey = NewContextKey()
|
||||
|
||||
type setextHeadingParser struct {
|
||||
HeadingConfig
|
||||
}
|
||||
|
||||
func matchesSetextHeadingBar(line []byte) (byte, bool) {
|
||||
start := 0
|
||||
end := len(line)
|
||||
space := util.TrimLeftLength(line, []byte{' '})
|
||||
if space > 3 {
|
||||
return 0, false
|
||||
}
|
||||
start += space
|
||||
level1 := util.TrimLeftLength(line[start:end], []byte{'='})
|
||||
c := byte('=')
|
||||
var level2 int
|
||||
if level1 == 0 {
|
||||
level2 = util.TrimLeftLength(line[start:end], []byte{'-'})
|
||||
c = '-'
|
||||
}
|
||||
if util.IsSpace(line[end-1]) {
|
||||
end -= util.TrimRightSpaceLength(line[start:end])
|
||||
}
|
||||
if !((level1 > 0 && start+level1 == end) || (level2 > 0 && start+level2 == end)) {
|
||||
return 0, false
|
||||
}
|
||||
return c, true
|
||||
}
|
||||
|
||||
// NewSetextHeadingParser return a new BlockParser that can parse Setext headings.
|
||||
func NewSetextHeadingParser(opts ...HeadingOption) BlockParser {
|
||||
p := &setextHeadingParser{}
|
||||
for _, o := range opts {
|
||||
o.SetHeadingOption(&p.HeadingConfig)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Trigger() []byte {
|
||||
return []byte{'-', '='}
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
last := pc.LastOpenedBlock().Node
|
||||
if last == nil {
|
||||
return nil, NoChildren
|
||||
}
|
||||
paragraph, ok := last.(*ast.Paragraph)
|
||||
if !ok || paragraph.Parent() != parent {
|
||||
return nil, NoChildren
|
||||
}
|
||||
line, segment := reader.PeekLine()
|
||||
c, ok := matchesSetextHeadingBar(line)
|
||||
if !ok {
|
||||
return nil, NoChildren
|
||||
}
|
||||
level := 1
|
||||
if c == '-' {
|
||||
level = 2
|
||||
}
|
||||
node := ast.NewHeading(level)
|
||||
node.Lines().Append(segment)
|
||||
pc.Set(temporaryParagraphKey, last)
|
||||
return node, NoChildren | RequireParagraph
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
heading := node.(*ast.Heading)
|
||||
segment := node.Lines().At(0)
|
||||
heading.Lines().Clear()
|
||||
tmp := pc.Get(temporaryParagraphKey).(*ast.Paragraph)
|
||||
pc.Set(temporaryParagraphKey, nil)
|
||||
if tmp.Lines().Len() == 0 {
|
||||
next := heading.NextSibling()
|
||||
segment = segment.TrimLeftSpace(reader.Source())
|
||||
if next == nil || !ast.IsParagraph(next) {
|
||||
para := ast.NewParagraph()
|
||||
para.Lines().Append(segment)
|
||||
heading.Parent().InsertAfter(heading.Parent(), heading, para)
|
||||
} else {
|
||||
next.(ast.Node).Lines().Unshift(segment)
|
||||
}
|
||||
heading.Parent().RemoveChild(heading.Parent(), heading)
|
||||
} else {
|
||||
heading.SetLines(tmp.Lines())
|
||||
heading.SetBlankPreviousLines(tmp.HasBlankPreviousLines())
|
||||
tp := tmp.Parent()
|
||||
if tp != nil {
|
||||
tp.RemoveChild(tp, tmp)
|
||||
}
|
||||
}
|
||||
|
||||
if b.Attribute {
|
||||
parseLastLineAttributes(node, reader, pc)
|
||||
}
|
||||
|
||||
if b.AutoHeadingID {
|
||||
id, ok := node.AttributeString("id")
|
||||
if !ok {
|
||||
generateAutoHeadingID(heading, reader, pc)
|
||||
} else {
|
||||
pc.IDs().Put(id.([]byte))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *setextHeadingParser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"github.com/yuin/goldmark/ast"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
type thematicBreakPraser struct {
|
||||
}
|
||||
|
||||
var defaultThematicBreakPraser = &thematicBreakPraser{}
|
||||
|
||||
// NewThematicBreakParser returns a new BlockParser that
|
||||
// parses thematic breaks.
|
||||
func NewThematicBreakParser() BlockParser {
|
||||
return defaultThematicBreakPraser
|
||||
}
|
||||
|
||||
func isThematicBreak(line []byte, offset int) bool {
|
||||
w, pos := util.IndentWidth(line, offset)
|
||||
if w > 3 {
|
||||
return false
|
||||
}
|
||||
mark := byte(0)
|
||||
count := 0
|
||||
for i := pos; i < len(line); i++ {
|
||||
c := line[i]
|
||||
if util.IsSpace(c) {
|
||||
continue
|
||||
}
|
||||
if mark == 0 {
|
||||
mark = c
|
||||
count = 1
|
||||
if mark == '*' || mark == '-' || mark == '_' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
if c != mark {
|
||||
return false
|
||||
}
|
||||
count++
|
||||
}
|
||||
return count > 2
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Trigger() []byte {
|
||||
return []byte{'-', '*', '_'}
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||
line, segment := reader.PeekLine()
|
||||
if isThematicBreak(line, reader.LineOffset()) {
|
||||
reader.Advance(segment.Len() - 1)
|
||||
return ast.NewThematicBreak(), NoChildren
|
||||
}
|
||||
return nil, NoChildren
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||
return Close
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) CanInterruptParagraph() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *thematicBreakPraser) CanAcceptIndentedLine() bool {
|
||||
return false
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue