github.com/vugu/vugu@v0.3.5/internal/htmlx/token.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package htmlx
     6  
     7  import (
     8  	"bytes"
     9  	"errors"
    10  	"io"
    11  	"strconv"
    12  	"strings"
    13  
    14  	"github.com/vugu/vugu/internal/htmlx/atom"
    15  )
    16  
    17  // A TokenType is the type of a Token.
    18  type TokenType uint32
    19  
    20  const (
    21  	// ErrorToken means that an error occurred during tokenization.
    22  	ErrorToken TokenType = iota
    23  	// TextToken means a text node.
    24  	TextToken
    25  	// A StartTagToken looks like <a>.
    26  	StartTagToken
    27  	// An EndTagToken looks like </a>.
    28  	EndTagToken
    29  	// A SelfClosingTagToken tag looks like <br/>.
    30  	SelfClosingTagToken
    31  	// A CommentToken looks like <!--x-->.
    32  	CommentToken
    33  	// A DoctypeToken looks like <!DOCTYPE x>
    34  	DoctypeToken
    35  )
    36  
    37  // ErrBufferExceeded means that the buffering limit was exceeded.
    38  var ErrBufferExceeded = errors.New("max buffer exceeded")
    39  
    40  // String returns a string representation of the TokenType.
    41  func (t TokenType) String() string {
    42  	switch t {
    43  	case ErrorToken:
    44  		return "Error"
    45  	case TextToken:
    46  		return "Text"
    47  	case StartTagToken:
    48  		return "StartTag"
    49  	case EndTagToken:
    50  		return "EndTag"
    51  	case SelfClosingTagToken:
    52  		return "SelfClosingTag"
    53  	case CommentToken:
    54  		return "Comment"
    55  	case DoctypeToken:
    56  		return "Doctype"
    57  	}
    58  	return "Invalid(" + strconv.Itoa(int(t)) + ")"
    59  }
    60  
    61  // An Attribute is an attribute namespace-key-value triple. Namespace is
    62  // non-empty for foreign attributes like xlink, Key is alphabetic (and hence
    63  // does not contain escapable characters like '&', '<' or '>'), and Val is
    64  // unescaped (it looks like "a<b" rather than "a&lt;b").
    65  //
    66  // Namespace is only used by the parser, not the tokenizer.
    67  type Attribute struct {
    68  	Namespace, Key, Val string
    69  }
    70  
    71  // A Token consists of a TokenType and some Data (tag name for start and end
    72  // tags, content for text, comments and doctypes). A tag Token may also contain
    73  // a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
    74  // rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
    75  // zero if Data is not a known tag name.
    76  type Token struct {
    77  	Type     TokenType
    78  	DataAtom atom.Atom
    79  	Data     string
    80  	Attr     []Attribute
    81  	Column   int
    82  	Line     int
    83  }
    84  
    85  // tagString returns a string representation of a tag Token's Data and Attr.
    86  func (t Token) tagString() string {
    87  	if len(t.Attr) == 0 {
    88  		return t.Data
    89  	}
    90  	buf := bytes.NewBufferString(t.Data)
    91  	for _, a := range t.Attr {
    92  		buf.WriteByte(' ')
    93  		buf.WriteString(a.Key)
    94  		buf.WriteString(`="`)
    95  		escape(buf, a.Val)
    96  		buf.WriteByte('"')
    97  	}
    98  	return buf.String()
    99  }
   100  
   101  // String returns a string representation of the Token.
   102  func (t Token) String() string {
   103  	switch t.Type {
   104  	case ErrorToken:
   105  		return ""
   106  	case TextToken:
   107  		return EscapeString(t.Data)
   108  	case StartTagToken:
   109  		return "<" + t.tagString() + ">"
   110  	case EndTagToken:
   111  		return "</" + t.tagString() + ">"
   112  	case SelfClosingTagToken:
   113  		return "<" + t.tagString() + "/>"
   114  	case CommentToken:
   115  		return "<!--" + t.Data + "-->"
   116  	case DoctypeToken:
   117  		return "<!DOCTYPE " + t.Data + ">"
   118  	}
   119  	return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
   120  }
   121  
   122  // span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
   123  // the end is exclusive.
   124  type span struct {
   125  	start, end int
   126  }
   127  
   128  // A Tokenizer returns a stream of HTML Tokens.
   129  type Tokenizer struct {
   130  	// r is the source of the HTML text.
   131  	r io.Reader
   132  	// tt is the TokenType of the current token.
   133  	tt TokenType
   134  	// err is the first error encountered during tokenization. It is possible
   135  	// for tt != Error && err != nil to hold: this means that Next returned a
   136  	// valid token but the subsequent Next call will return an error token.
   137  	// For example, if the HTML text input was just "plain", then the first
   138  	// Next call would set z.err to io.EOF but return a TextToken, and all
   139  	// subsequent Next calls would return an ErrorToken.
   140  	// err is never reset. Once it becomes non-nil, it stays non-nil.
   141  	err error
   142  	// readErr is the error returned by the io.Reader r. It is separate from
   143  	// err because it is valid for an io.Reader to return (n int, err1 error)
   144  	// such that n > 0 && err1 != nil, and callers should always process the
   145  	// n > 0 bytes before considering the error err1.
   146  	readErr error
   147  	// buf[raw.start:raw.end] holds the raw bytes of the current token.
   148  	// buf[raw.end:] is buffered input that will yield future tokens.
   149  	raw span
   150  	buf []byte
   151  	// maxBuf limits the data buffered in buf. A value of 0 means unlimited.
   152  	maxBuf int
   153  	// buf[data.start:data.end] holds the raw bytes of the current token's data:
   154  	// a text token's text, a tag token's tag name, etc.
   155  	data span
   156  	// pendingAttr is the attribute key and value currently being tokenized.
   157  	// When complete, pendingAttr is pushed onto attr. nAttrReturned is
   158  	// incremented on each call to TagAttr.
   159  	pendingAttr   [2]span
   160  	attr          [][2]span
   161  	nAttrReturned int
   162  	// rawTag is the "script" in "</script>" that closes the next token. If
   163  	// non-empty, the subsequent call to Next will return a raw or RCDATA text
   164  	// token: one that treats "<p>" as text instead of an element.
   165  	// rawTag's contents are lower-cased.
   166  	rawTag string
   167  	// textIsRaw is whether the current text token's data is not escaped.
   168  	textIsRaw bool
   169  	// convertNUL is whether NUL bytes in the current token's data should
   170  	// be converted into \ufffd replacement characters.
   171  	convertNUL bool
   172  	// allowCDATA is whether CDATA sections are allowed in the current context.
   173  	allowCDATA bool
   174  	// tokenLine is the line that tt is found on.
   175  	tokenLine int
   176  	// tokenColumn is the column that tt starts on.
   177  	tokenColumn int
   178  	// currentLine is the ongoing temporary variable for tracking lines.
   179  	currentLine int
   180  	// currentColumn is the ongoing temporary variable for tracking columns.
   181  	currentColumn int
   182  }
   183  
   184  // AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
   185  // the text "foo". The default value is false, which means to recognize it as
   186  // a bogus comment "<!-- [CDATA[foo]] -->" instead.
   187  //
   188  // Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
   189  // only if tokenizing foreign content, such as MathML and SVG. However,
   190  // tracking foreign-contentness is difficult to do purely in the tokenizer,
   191  // as opposed to the parser, due to HTML integration points: an <svg> element
   192  // can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
   193  // HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
   194  // responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
   195  // In practice, if using the tokenizer without caring whether MathML or SVG
   196  // CDATA is text or comments, such as tokenizing HTML to find all the anchor
   197  // text, it is acceptable to ignore this responsibility.
   198  func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
   199  	z.allowCDATA = allowCDATA
   200  }
   201  
   202  // NextIsNotRawText instructs the tokenizer that the next token should not be
   203  // considered as 'raw text'. Some elements, such as script and title elements,
   204  // normally require the next token after the opening tag to be 'raw text' that
   205  // has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
   206  // yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
   207  // an end tag token for "</title>". There are no distinct start tag or end tag
   208  // tokens for the "<b>" and "</b>".
   209  //
   210  // This tokenizer implementation will generally look for raw text at the right
   211  // times. Strictly speaking, an HTML5 compliant tokenizer should not look for
   212  // raw text if in foreign content: <title> generally needs raw text, but a
   213  // <title> inside an <svg> does not. Another example is that a <textarea>
   214  // generally needs raw text, but a <textarea> is not allowed as an immediate
   215  // child of a <select>; in normal parsing, a <textarea> implies </select>, but
   216  // one cannot close the implicit element when parsing a <select>'s InnerHTML.
   217  // Similarly to AllowCDATA, tracking the correct moment to override raw-text-
   218  // ness is difficult to do purely in the tokenizer, as opposed to the parser.
   219  // For strict compliance with the HTML5 tokenization algorithm, it is the
   220  // responsibility of the user of a tokenizer to call NextIsNotRawText as
   221  // appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
   222  // responsibility for basic usage.
   223  //
   224  // Note that this 'raw text' concept is different from the one offered by the
   225  // Tokenizer.Raw method.
   226  func (z *Tokenizer) NextIsNotRawText() {
   227  	z.rawTag = ""
   228  }
   229  
   230  // Err returns the error associated with the most recent ErrorToken token.
   231  // This is typically io.EOF, meaning the end of tokenization.
   232  func (z *Tokenizer) Err() error {
   233  	if z.tt != ErrorToken {
   234  		return nil
   235  	}
   236  	return z.err
   237  }
   238  
   239  // readByte returns the next byte from the input stream, doing a buffered read
   240  // from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
   241  // slice that holds all the bytes read so far for the current token.
   242  // It sets z.err if the underlying reader returns an error.
   243  // Pre-condition: z.err == nil.
   244  func (z *Tokenizer) readByte() byte {
   245  	if z.raw.end >= len(z.buf) {
   246  		// Our buffer is exhausted and we have to read from z.r. Check if the
   247  		// previous read resulted in an error.
   248  		if z.readErr != nil {
   249  			z.err = z.readErr
   250  			return 0
   251  		}
   252  		// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
   253  		// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
   254  		// allocate a new buffer before the copy.
   255  		c := cap(z.buf)
   256  		d := z.raw.end - z.raw.start
   257  		var buf1 []byte
   258  		if 2*d > c {
   259  			buf1 = make([]byte, d, 2*c)
   260  		} else {
   261  			buf1 = z.buf[:d]
   262  		}
   263  		copy(buf1, z.buf[z.raw.start:z.raw.end])
   264  		if x := z.raw.start; x != 0 {
   265  			// Adjust the data/attr spans to refer to the same contents after the copy.
   266  			z.data.start -= x
   267  			z.data.end -= x
   268  			z.pendingAttr[0].start -= x
   269  			z.pendingAttr[0].end -= x
   270  			z.pendingAttr[1].start -= x
   271  			z.pendingAttr[1].end -= x
   272  			for i := range z.attr {
   273  				z.attr[i][0].start -= x
   274  				z.attr[i][0].end -= x
   275  				z.attr[i][1].start -= x
   276  				z.attr[i][1].end -= x
   277  			}
   278  		}
   279  		z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
   280  		// Now that we have copied the live bytes to the start of the buffer,
   281  		// we read from z.r into the remainder.
   282  		var n int
   283  		n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
   284  		if n == 0 {
   285  			z.err = z.readErr
   286  			return 0
   287  		}
   288  		z.buf = buf1[:d+n]
   289  	}
   290  	x := z.buf[z.raw.end]
   291  	z.raw.end++
   292  	if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
   293  		z.err = ErrBufferExceeded
   294  		return 0
   295  	}
   296  
   297  	// Increment the line and column tracker
   298  	if x == '\n' {
   299  		z.currentLine++
   300  		z.currentColumn = 0
   301  	} else {
   302  		z.currentColumn++
   303  	}
   304  
   305  	return x
   306  }
   307  
   308  // Buffered returns a slice containing data buffered but not yet tokenized.
   309  func (z *Tokenizer) Buffered() []byte {
   310  	return z.buf[z.raw.end:]
   311  }
   312  
   313  // readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
   314  // It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
   315  // too many times in succession.
   316  func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
   317  	for i := 0; i < 100; i++ {
   318  		n, err := r.Read(b)
   319  		if n != 0 || err != nil {
   320  			return n, err
   321  		}
   322  	}
   323  	return 0, io.ErrNoProgress
   324  }
   325  
   326  // skipWhiteSpace skips past any white space.
   327  func (z *Tokenizer) skipWhiteSpace() {
   328  	if z.err != nil {
   329  		return
   330  	}
   331  	for {
   332  		c := z.readByte()
   333  		if z.err != nil {
   334  			return
   335  		}
   336  		switch c {
   337  		case ' ', '\n', '\r', '\t', '\f':
   338  			// No-op.
   339  		default:
   340  			z.raw.end--
   341  			return
   342  		}
   343  	}
   344  }
   345  
   346  // readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
   347  // is typically something like "script" or "textarea".
   348  func (z *Tokenizer) readRawOrRCDATA() {
   349  	if z.rawTag == "script" {
   350  		z.readScript()
   351  		z.textIsRaw = true
   352  		z.rawTag = ""
   353  		return
   354  	}
   355  loop:
   356  	for {
   357  		c := z.readByte()
   358  		if z.err != nil {
   359  			break loop
   360  		}
   361  		if c != '<' {
   362  			continue loop
   363  		}
   364  		c = z.readByte()
   365  		if z.err != nil {
   366  			break loop
   367  		}
   368  		if c != '/' {
   369  			continue loop
   370  		}
   371  		if z.readRawEndTag() || z.err != nil {
   372  			break loop
   373  		}
   374  	}
   375  	z.data.end = z.raw.end
   376  	// A textarea's or title's RCDATA can contain escaped entities.
   377  	z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
   378  	z.rawTag = ""
   379  }
   380  
   381  // readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
   382  // If it succeeds, it backs up the input position to reconsume the tag and
   383  // returns true. Otherwise it returns false. The opening "</" has already been
   384  // consumed.
   385  func (z *Tokenizer) readRawEndTag() bool {
   386  	for i := 0; i < len(z.rawTag); i++ {
   387  		c := z.readByte()
   388  		if z.err != nil {
   389  			return false
   390  		}
   391  		if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
   392  			z.raw.end--
   393  			return false
   394  		}
   395  	}
   396  	c := z.readByte()
   397  	if z.err != nil {
   398  		return false
   399  	}
   400  	switch c {
   401  	case ' ', '\n', '\r', '\t', '\f', '/', '>':
   402  		// The 3 is 2 for the leading "</" plus 1 for the trailing character c.
   403  		z.raw.end -= 3 + len(z.rawTag)
   404  		return true
   405  	}
   406  	z.raw.end--
   407  	return false
   408  }
   409  
   410  // readScript reads until the next </script> tag, following the byzantine
   411  // rules for escaping/hiding the closing tag.
   412  func (z *Tokenizer) readScript() {
   413  	defer func() {
   414  		z.data.end = z.raw.end
   415  	}()
   416  	var c byte
   417  
   418  scriptData:
   419  	c = z.readByte()
   420  	if z.err != nil {
   421  		return
   422  	}
   423  	if c == '<' {
   424  		goto scriptDataLessThanSign
   425  	}
   426  	goto scriptData
   427  
   428  scriptDataLessThanSign:
   429  	c = z.readByte()
   430  	if z.err != nil {
   431  		return
   432  	}
   433  	switch c {
   434  	case '/':
   435  		goto scriptDataEndTagOpen
   436  	case '!':
   437  		goto scriptDataEscapeStart
   438  	}
   439  	z.raw.end--
   440  	goto scriptData
   441  
   442  scriptDataEndTagOpen:
   443  	if z.readRawEndTag() || z.err != nil {
   444  		return
   445  	}
   446  	goto scriptData
   447  
   448  scriptDataEscapeStart:
   449  	c = z.readByte()
   450  	if z.err != nil {
   451  		return
   452  	}
   453  	if c == '-' {
   454  		goto scriptDataEscapeStartDash
   455  	}
   456  	z.raw.end--
   457  	goto scriptData
   458  
   459  scriptDataEscapeStartDash:
   460  	c = z.readByte()
   461  	if z.err != nil {
   462  		return
   463  	}
   464  	if c == '-' {
   465  		goto scriptDataEscapedDashDash
   466  	}
   467  	z.raw.end--
   468  	goto scriptData
   469  
   470  scriptDataEscaped:
   471  	c = z.readByte()
   472  	if z.err != nil {
   473  		return
   474  	}
   475  	switch c {
   476  	case '-':
   477  		goto scriptDataEscapedDash
   478  	case '<':
   479  		goto scriptDataEscapedLessThanSign
   480  	}
   481  	goto scriptDataEscaped
   482  
   483  scriptDataEscapedDash:
   484  	c = z.readByte()
   485  	if z.err != nil {
   486  		return
   487  	}
   488  	switch c {
   489  	case '-':
   490  		goto scriptDataEscapedDashDash
   491  	case '<':
   492  		goto scriptDataEscapedLessThanSign
   493  	}
   494  	goto scriptDataEscaped
   495  
   496  scriptDataEscapedDashDash:
   497  	c = z.readByte()
   498  	if z.err != nil {
   499  		return
   500  	}
   501  	switch c {
   502  	case '-':
   503  		goto scriptDataEscapedDashDash
   504  	case '<':
   505  		goto scriptDataEscapedLessThanSign
   506  	case '>':
   507  		goto scriptData
   508  	}
   509  	goto scriptDataEscaped
   510  
   511  scriptDataEscapedLessThanSign:
   512  	c = z.readByte()
   513  	if z.err != nil {
   514  		return
   515  	}
   516  	if c == '/' {
   517  		goto scriptDataEscapedEndTagOpen
   518  	}
   519  	if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
   520  		goto scriptDataDoubleEscapeStart
   521  	}
   522  	z.raw.end--
   523  	goto scriptData
   524  
   525  scriptDataEscapedEndTagOpen:
   526  	if z.readRawEndTag() || z.err != nil {
   527  		return
   528  	}
   529  	goto scriptDataEscaped
   530  
   531  scriptDataDoubleEscapeStart:
   532  	z.raw.end--
   533  	for i := 0; i < len("script"); i++ {
   534  		c = z.readByte()
   535  		if z.err != nil {
   536  			return
   537  		}
   538  		if c != "script"[i] && c != "SCRIPT"[i] {
   539  			z.raw.end--
   540  			goto scriptDataEscaped
   541  		}
   542  	}
   543  	c = z.readByte()
   544  	if z.err != nil {
   545  		return
   546  	}
   547  	switch c {
   548  	case ' ', '\n', '\r', '\t', '\f', '/', '>':
   549  		goto scriptDataDoubleEscaped
   550  	}
   551  	z.raw.end--
   552  	goto scriptDataEscaped
   553  
   554  scriptDataDoubleEscaped:
   555  	c = z.readByte()
   556  	if z.err != nil {
   557  		return
   558  	}
   559  	switch c {
   560  	case '-':
   561  		goto scriptDataDoubleEscapedDash
   562  	case '<':
   563  		goto scriptDataDoubleEscapedLessThanSign
   564  	}
   565  	goto scriptDataDoubleEscaped
   566  
   567  scriptDataDoubleEscapedDash:
   568  	c = z.readByte()
   569  	if z.err != nil {
   570  		return
   571  	}
   572  	switch c {
   573  	case '-':
   574  		goto scriptDataDoubleEscapedDashDash
   575  	case '<':
   576  		goto scriptDataDoubleEscapedLessThanSign
   577  	}
   578  	goto scriptDataDoubleEscaped
   579  
   580  scriptDataDoubleEscapedDashDash:
   581  	c = z.readByte()
   582  	if z.err != nil {
   583  		return
   584  	}
   585  	switch c {
   586  	case '-':
   587  		goto scriptDataDoubleEscapedDashDash
   588  	case '<':
   589  		goto scriptDataDoubleEscapedLessThanSign
   590  	case '>':
   591  		goto scriptData
   592  	}
   593  	goto scriptDataDoubleEscaped
   594  
   595  scriptDataDoubleEscapedLessThanSign:
   596  	c = z.readByte()
   597  	if z.err != nil {
   598  		return
   599  	}
   600  	if c == '/' {
   601  		goto scriptDataDoubleEscapeEnd
   602  	}
   603  	z.raw.end--
   604  	goto scriptDataDoubleEscaped
   605  
   606  scriptDataDoubleEscapeEnd:
   607  	if z.readRawEndTag() {
   608  		z.raw.end += len("</script>")
   609  		goto scriptDataEscaped
   610  	}
   611  	if z.err != nil {
   612  		return
   613  	}
   614  	goto scriptDataDoubleEscaped
   615  }
   616  
   617  // readComment reads the next comment token starting with "<!--". The opening
   618  // "<!--" has already been consumed.
   619  func (z *Tokenizer) readComment() {
   620  	z.data.start = z.raw.end
   621  	defer func() {
   622  		if z.data.end < z.data.start {
   623  			// It's a comment with no data, like <!-->.
   624  			z.data.end = z.data.start
   625  		}
   626  	}()
   627  	for dashCount := 2; ; {
   628  		c := z.readByte()
   629  		if z.err != nil {
   630  			// Ignore up to two dashes at EOF.
   631  			if dashCount > 2 {
   632  				dashCount = 2
   633  			}
   634  			z.data.end = z.raw.end - dashCount
   635  			return
   636  		}
   637  		switch c {
   638  		case '-':
   639  			dashCount++
   640  			continue
   641  		case '>':
   642  			if dashCount >= 2 {
   643  				z.data.end = z.raw.end - len("-->")
   644  				return
   645  			}
   646  		case '!':
   647  			if dashCount >= 2 {
   648  				c = z.readByte()
   649  				if z.err != nil {
   650  					z.data.end = z.raw.end
   651  					return
   652  				}
   653  				if c == '>' {
   654  					z.data.end = z.raw.end - len("--!>")
   655  					return
   656  				}
   657  			}
   658  		}
   659  		dashCount = 0
   660  	}
   661  }
   662  
   663  // readUntilCloseAngle reads until the next ">".
   664  func (z *Tokenizer) readUntilCloseAngle() {
   665  	z.data.start = z.raw.end
   666  	for {
   667  		c := z.readByte()
   668  		if z.err != nil {
   669  			z.data.end = z.raw.end
   670  			return
   671  		}
   672  		if c == '>' {
   673  			z.data.end = z.raw.end - len(">")
   674  			return
   675  		}
   676  	}
   677  }
   678  
   679  // readMarkupDeclaration reads the next token starting with "<!". It might be
   680  // a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
   681  // "<!a bogus comment". The opening "<!" has already been consumed.
   682  func (z *Tokenizer) readMarkupDeclaration() TokenType {
   683  	z.data.start = z.raw.end
   684  	var c [2]byte
   685  	for i := 0; i < 2; i++ {
   686  		c[i] = z.readByte()
   687  		if z.err != nil {
   688  			z.data.end = z.raw.end
   689  			return CommentToken
   690  		}
   691  	}
   692  	if c[0] == '-' && c[1] == '-' {
   693  		z.readComment()
   694  		return CommentToken
   695  	}
   696  	z.raw.end -= 2
   697  	if z.readDoctype() {
   698  		return DoctypeToken
   699  	}
   700  	if z.allowCDATA && z.readCDATA() {
   701  		z.convertNUL = true
   702  		return TextToken
   703  	}
   704  	// It's a bogus comment.
   705  	z.readUntilCloseAngle()
   706  	return CommentToken
   707  }
   708  
   709  // readDoctype attempts to read a doctype declaration and returns true if
   710  // successful. The opening "<!" has already been consumed.
   711  func (z *Tokenizer) readDoctype() bool {
   712  	const s = "DOCTYPE"
   713  	for i := 0; i < len(s); i++ {
   714  		c := z.readByte()
   715  		if z.err != nil {
   716  			z.data.end = z.raw.end
   717  			return false
   718  		}
   719  		if c != s[i] && c != s[i]+('a'-'A') {
   720  			// Back up to read the fragment of "DOCTYPE" again.
   721  			z.raw.end = z.data.start
   722  			return false
   723  		}
   724  	}
   725  	if z.skipWhiteSpace(); z.err != nil {
   726  		z.data.start = z.raw.end
   727  		z.data.end = z.raw.end
   728  		return true
   729  	}
   730  	z.readUntilCloseAngle()
   731  	return true
   732  }
   733  
   734  // readCDATA attempts to read a CDATA section and returns true if
   735  // successful. The opening "<!" has already been consumed.
   736  func (z *Tokenizer) readCDATA() bool {
   737  	const s = "[CDATA["
   738  	for i := 0; i < len(s); i++ {
   739  		c := z.readByte()
   740  		if z.err != nil {
   741  			z.data.end = z.raw.end
   742  			return false
   743  		}
   744  		if c != s[i] {
   745  			// Back up to read the fragment of "[CDATA[" again.
   746  			z.raw.end = z.data.start
   747  			return false
   748  		}
   749  	}
   750  	z.data.start = z.raw.end
   751  	brackets := 0
   752  	for {
   753  		c := z.readByte()
   754  		if z.err != nil {
   755  			z.data.end = z.raw.end
   756  			return true
   757  		}
   758  		switch c {
   759  		case ']':
   760  			brackets++
   761  		case '>':
   762  			if brackets >= 2 {
   763  				z.data.end = z.raw.end - len("]]>")
   764  				return true
   765  			}
   766  			brackets = 0
   767  		default:
   768  			brackets = 0
   769  		}
   770  	}
   771  }
   772  
   773  // startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
   774  // case-insensitively matches any element of ss.
   775  func (z *Tokenizer) startTagIn(ss ...string) bool {
   776  loop:
   777  	for _, s := range ss {
   778  		if z.data.end-z.data.start != len(s) {
   779  			continue loop
   780  		}
   781  		for i := 0; i < len(s); i++ {
   782  			c := z.buf[z.data.start+i]
   783  			if 'A' <= c && c <= 'Z' {
   784  				c += 'a' - 'A'
   785  			}
   786  			if c != s[i] {
   787  				continue loop
   788  			}
   789  		}
   790  		return true
   791  	}
   792  	return false
   793  }
   794  
   795  // readStartTag reads the next start tag token. The opening "<a" has already
   796  // been consumed, where 'a' means anything in [A-Za-z].
   797  func (z *Tokenizer) readStartTag() TokenType {
   798  	z.readTag(true)
   799  	if z.err != nil {
   800  		return ErrorToken
   801  	}
   802  	// Several tags flag the tokenizer's next token as raw.
   803  	c, raw := z.buf[z.data.start], false
   804  	if 'A' <= c && c <= 'Z' {
   805  		c += 'a' - 'A'
   806  	}
   807  	switch c {
   808  	case 'i':
   809  		raw = z.startTagIn("iframe")
   810  	case 'n':
   811  		raw = z.startTagIn("noembed", "noframes", "noscript")
   812  	case 'p':
   813  		raw = z.startTagIn("plaintext")
   814  	case 's':
   815  		raw = z.startTagIn("script", "style")
   816  	case 't':
   817  		raw = z.startTagIn("textarea", "title")
   818  	case 'x':
   819  		raw = z.startTagIn("xmp")
   820  	}
   821  	if raw {
   822  		z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
   823  	}
   824  	// Look for a self-closing token like "<br/>".
   825  	if z.err == nil && z.buf[z.raw.end-2] == '/' {
   826  		return SelfClosingTagToken
   827  	}
   828  	return StartTagToken
   829  }
   830  
   831  // readTag reads the next tag token and its attributes. If saveAttr, those
   832  // attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
   833  // The opening "<a" or "</a" has already been consumed, where 'a' means anything
   834  // in [A-Za-z].
   835  func (z *Tokenizer) readTag(saveAttr bool) {
   836  	z.attr = z.attr[:0]
   837  	z.nAttrReturned = 0
   838  	// Read the tag name and attribute key/value pairs.
   839  	z.readTagName()
   840  	if z.skipWhiteSpace(); z.err != nil {
   841  		return
   842  	}
   843  	for {
   844  		c := z.readByte()
   845  		if z.err != nil || c == '>' {
   846  			break
   847  		}
   848  		z.raw.end--
   849  		z.readTagAttrKey()
   850  		z.readTagAttrVal()
   851  		// Save pendingAttr if saveAttr and that attribute has a non-empty key.
   852  		if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
   853  			z.attr = append(z.attr, z.pendingAttr)
   854  		}
   855  		if z.skipWhiteSpace(); z.err != nil {
   856  			break
   857  		}
   858  	}
   859  }
   860  
   861  // readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
   862  // is positioned such that the first byte of the tag name (the "d" in "<div")
   863  // has already been consumed.
   864  func (z *Tokenizer) readTagName() {
   865  	z.data.start = z.raw.end - 1
   866  	for {
   867  		c := z.readByte()
   868  		if z.err != nil {
   869  			z.data.end = z.raw.end
   870  			return
   871  		}
   872  		switch c {
   873  		case ' ', '\n', '\r', '\t', '\f':
   874  			z.data.end = z.raw.end - 1
   875  			return
   876  		case '/', '>':
   877  			z.raw.end--
   878  			z.data.end = z.raw.end
   879  			return
   880  		}
   881  	}
   882  }
   883  
   884  // readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
   885  // Precondition: z.err == nil.
   886  func (z *Tokenizer) readTagAttrKey() {
   887  	z.pendingAttr[0].start = z.raw.end
   888  	for {
   889  		c := z.readByte()
   890  		if z.err != nil {
   891  			z.pendingAttr[0].end = z.raw.end
   892  			return
   893  		}
   894  		switch c {
   895  		case ' ', '\n', '\r', '\t', '\f', '/':
   896  			z.pendingAttr[0].end = z.raw.end - 1
   897  			return
   898  		case '=', '>':
   899  			z.raw.end--
   900  			z.pendingAttr[0].end = z.raw.end
   901  			return
   902  		}
   903  	}
   904  }
   905  
   906  // readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
   907  func (z *Tokenizer) readTagAttrVal() {
   908  	z.pendingAttr[1].start = z.raw.end
   909  	z.pendingAttr[1].end = z.raw.end
   910  	if z.skipWhiteSpace(); z.err != nil {
   911  		return
   912  	}
   913  	c := z.readByte()
   914  	if z.err != nil {
   915  		return
   916  	}
   917  	if c != '=' {
   918  		z.raw.end--
   919  		return
   920  	}
   921  	if z.skipWhiteSpace(); z.err != nil {
   922  		return
   923  	}
   924  	quote := z.readByte()
   925  	if z.err != nil {
   926  		return
   927  	}
   928  	switch quote {
   929  	case '>':
   930  		z.raw.end--
   931  		return
   932  
   933  	case '\'', '"':
   934  		z.pendingAttr[1].start = z.raw.end
   935  		for {
   936  			c := z.readByte()
   937  			if z.err != nil {
   938  				z.pendingAttr[1].end = z.raw.end
   939  				return
   940  			}
   941  			if c == quote {
   942  				z.pendingAttr[1].end = z.raw.end - 1
   943  				return
   944  			}
   945  		}
   946  
   947  	default:
   948  		z.pendingAttr[1].start = z.raw.end - 1
   949  		for {
   950  			c := z.readByte()
   951  			if z.err != nil {
   952  				z.pendingAttr[1].end = z.raw.end
   953  				return
   954  			}
   955  			switch c {
   956  			case ' ', '\n', '\r', '\t', '\f':
   957  				z.pendingAttr[1].end = z.raw.end - 1
   958  				return
   959  			case '>':
   960  				z.raw.end--
   961  				z.pendingAttr[1].end = z.raw.end
   962  				return
   963  			}
   964  		}
   965  	}
   966  }
   967  
   968  // Next scans the next token and returns its type.
   969  func (z *Tokenizer) Next() TokenType {
   970  	z.tokenLine = z.currentLine
   971  	z.tokenColumn = z.currentColumn
   972  
   973  	z.raw.start = z.raw.end
   974  	z.data.start = z.raw.end
   975  	z.data.end = z.raw.end
   976  	if z.err != nil {
   977  		z.tt = ErrorToken
   978  
   979  		return z.tt
   980  	}
   981  	if z.rawTag != "" {
   982  		if z.rawTag == "plaintext" {
   983  			// Read everything up to EOF.
   984  			for z.err == nil {
   985  				z.readByte()
   986  			}
   987  			z.data.end = z.raw.end
   988  			z.textIsRaw = true
   989  		} else {
   990  			z.readRawOrRCDATA()
   991  		}
   992  		if z.data.end > z.data.start {
   993  			z.tt = TextToken
   994  			z.convertNUL = true
   995  			return z.tt
   996  		}
   997  	}
   998  	z.textIsRaw = false
   999  	z.convertNUL = false
  1000  
  1001  loop:
  1002  	for {
  1003  		c := z.readByte()
  1004  		if z.err != nil {
  1005  			break loop
  1006  		}
  1007  		if c != '<' {
  1008  			continue loop
  1009  		}
  1010  
  1011  		// Check if the '<' we have just read is part of a tag, comment
  1012  		// or doctype. If not, it's part of the accumulated text token.
  1013  		c = z.readByte()
  1014  		if z.err != nil {
  1015  			break loop
  1016  		}
  1017  		var tokenType TokenType
  1018  		switch {
  1019  		case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
  1020  			tokenType = StartTagToken
  1021  		case c == '/':
  1022  			tokenType = EndTagToken
  1023  		case c == '!' || c == '?':
  1024  			// We use CommentToken to mean any of "<!--actual comments-->",
  1025  			// "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
  1026  			tokenType = CommentToken
  1027  		default:
  1028  			// Reconsume the current character.
  1029  			z.raw.end--
  1030  			continue
  1031  		}
  1032  
  1033  		// We have a non-text token, but we might have accumulated some text
  1034  		// before that. If so, we return the text first, and return the non-
  1035  		// text token on the subsequent call to Next.
  1036  		if x := z.raw.end - len("<a"); z.raw.start < x {
  1037  			z.raw.end = x
  1038  			z.data.end = x
  1039  			z.tt = TextToken
  1040  			return z.tt
  1041  		}
  1042  		switch tokenType {
  1043  		case StartTagToken:
  1044  			z.tt = z.readStartTag()
  1045  			return z.tt
  1046  		case EndTagToken:
  1047  			c = z.readByte()
  1048  			if z.err != nil {
  1049  				break loop
  1050  			}
  1051  			if c == '>' {
  1052  				// "</>" does not generate a token at all. Generate an empty comment
  1053  				// to allow passthrough clients to pick up the data using Raw.
  1054  				// Reset the tokenizer state and start again.
  1055  				z.tt = CommentToken
  1056  				return z.tt
  1057  			}
  1058  			if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
  1059  				z.readTag(false)
  1060  				if z.err != nil {
  1061  					z.tt = ErrorToken
  1062  				} else {
  1063  					z.tt = EndTagToken
  1064  				}
  1065  				return z.tt
  1066  			}
  1067  			z.raw.end--
  1068  			z.readUntilCloseAngle()
  1069  			z.tt = CommentToken
  1070  			return z.tt
  1071  		case CommentToken:
  1072  			if c == '!' {
  1073  				z.tt = z.readMarkupDeclaration()
  1074  				return z.tt
  1075  			}
  1076  			z.raw.end--
  1077  			z.readUntilCloseAngle()
  1078  			z.tt = CommentToken
  1079  			return z.tt
  1080  		}
  1081  	}
  1082  	if z.raw.start < z.raw.end {
  1083  		z.data.end = z.raw.end
  1084  		z.tt = TextToken
  1085  		return z.tt
  1086  	}
  1087  	z.tt = ErrorToken
  1088  	return z.tt
  1089  }
  1090  
  1091  // Raw returns the unmodified text of the current token. Calling Next, Token,
  1092  // Text, TagName or TagAttr may change the contents of the returned slice.
  1093  func (z *Tokenizer) Raw() []byte {
  1094  	return z.buf[z.raw.start:z.raw.end]
  1095  }
  1096  
  1097  // convertNewlines converts "\r" and "\r\n" in s to "\n".
  1098  // The conversion happens in place, but the resulting slice may be shorter.
  1099  func convertNewlines(s []byte) []byte {
  1100  	for i, c := range s {
  1101  		if c != '\r' {
  1102  			continue
  1103  		}
  1104  
  1105  		src := i + 1
  1106  		if src >= len(s) || s[src] != '\n' {
  1107  			s[i] = '\n'
  1108  			continue
  1109  		}
  1110  
  1111  		dst := i
  1112  		for src < len(s) {
  1113  			if s[src] == '\r' {
  1114  				if src+1 < len(s) && s[src+1] == '\n' {
  1115  					src++
  1116  				}
  1117  				s[dst] = '\n'
  1118  			} else {
  1119  				s[dst] = s[src]
  1120  			}
  1121  			src++
  1122  			dst++
  1123  		}
  1124  		return s[:dst]
  1125  	}
  1126  	return s
  1127  }
  1128  
  1129  var (
  1130  	nul         = []byte("\x00")
  1131  	replacement = []byte("\ufffd")
  1132  )
  1133  
  1134  // Text returns the unescaped text of a text, comment or doctype token. The
  1135  // contents of the returned slice may change on the next call to Next.
  1136  func (z *Tokenizer) Text() []byte {
  1137  	switch z.tt {
  1138  	case TextToken, CommentToken, DoctypeToken:
  1139  		s := z.buf[z.data.start:z.data.end]
  1140  		z.data.start = z.raw.end
  1141  		z.data.end = z.raw.end
  1142  		s = convertNewlines(s)
  1143  		if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
  1144  			s = bytes.Replace(s, nul, replacement, -1)
  1145  		}
  1146  		if !z.textIsRaw {
  1147  			s = unescape(s, false)
  1148  		}
  1149  		return s
  1150  	}
  1151  	return nil
  1152  }
  1153  
  1154  // TagName returns the lower-cased name of a tag token (the `img` out of
  1155  // `<IMG SRC="foo">`) and whether the tag has attributes.
  1156  // The contents of the returned slice may change on the next call to Next.
  1157  func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
  1158  	if z.data.start < z.data.end {
  1159  		switch z.tt {
  1160  		case StartTagToken, EndTagToken, SelfClosingTagToken:
  1161  			s := z.buf[z.data.start:z.data.end]
  1162  			z.data.start = z.raw.end
  1163  			z.data.end = z.raw.end
  1164  			if !strings.Contains(string(s), ":") {
  1165  				s = lower(s)
  1166  			}
  1167  			return s, z.nAttrReturned < len(z.attr)
  1168  		}
  1169  	}
  1170  	return nil, false
  1171  }
  1172  
  1173  // TagAttr returns the lower-cased key and unescaped value of the next unparsed
  1174  // attribute for the current tag token and whether there are more attributes.
  1175  // The contents of the returned slices may change on the next call to Next.
  1176  func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
  1177  	if z.nAttrReturned < len(z.attr) {
  1178  		switch z.tt {
  1179  		case StartTagToken, SelfClosingTagToken:
  1180  			x := z.attr[z.nAttrReturned]
  1181  			z.nAttrReturned++
  1182  			key = z.buf[x[0].start:x[0].end]
  1183  			val = z.buf[x[1].start:x[1].end]
  1184  			if !strings.HasPrefix(string(key), ":") {
  1185  				key = lower(key)
  1186  			}
  1187  			return key, unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
  1188  		}
  1189  	}
  1190  	return nil, nil, false
  1191  }
  1192  
  1193  // Token returns the current Token. The result's Data and Attr values remain
  1194  // valid after subsequent Next calls.
  1195  func (z *Tokenizer) Token() Token {
  1196  	t := Token{Type: z.tt, Line: z.tokenLine, Column: z.tokenColumn}
  1197  	switch z.tt {
  1198  	case TextToken, CommentToken, DoctypeToken:
  1199  		t.Data = string(z.Text())
  1200  	case StartTagToken, SelfClosingTagToken, EndTagToken:
  1201  		name, moreAttr := z.TagName()
  1202  		for moreAttr {
  1203  			var key, val []byte
  1204  			key, val, moreAttr = z.TagAttr()
  1205  			t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
  1206  		}
  1207  		t.Data = string(name)
  1208  		if a := atom.Lookup(name); a != 0 {
  1209  			t.DataAtom, t.Data = a, a.String()
  1210  		} else {
  1211  			t.DataAtom, t.Data = 0, string(name)
  1212  		}
  1213  	}
  1214  	return t
  1215  }
  1216  
  1217  // SetMaxBuf sets a limit on the amount of data buffered during tokenization.
  1218  // A value of 0 means unlimited.
  1219  func (z *Tokenizer) SetMaxBuf(n int) {
  1220  	z.maxBuf = n
  1221  }
  1222  
  1223  // NewTokenizer returns a new HTML Tokenizer for the given Reader.
  1224  // The input is assumed to be UTF-8 encoded.
  1225  func NewTokenizer(r io.Reader) *Tokenizer {
  1226  	return NewTokenizerFragment(r, "")
  1227  }
  1228  
  1229  // NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
  1230  // tokenizing an existing element's InnerHTML fragment. contextTag is that
  1231  // element's tag, such as "div" or "iframe".
  1232  //
  1233  // For example, how the InnerHTML "a<b" is tokenized depends on whether it is
  1234  // for a <p> tag or a <script> tag.
  1235  //
  1236  // The input is assumed to be UTF-8 encoded.
  1237  func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
  1238  	z := &Tokenizer{
  1239  		r:   r,
  1240  		buf: make([]byte, 0, 4096),
  1241  	}
  1242  	if contextTag != "" {
  1243  		switch s := strings.ToLower(contextTag); s {
  1244  		case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
  1245  			z.rawTag = s
  1246  		}
  1247  	}
  1248  	return z
  1249  }