...

Source file src/go/parser/parser.go

Documentation: go/parser

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Package parser implements a parser for Go source files. Input may be
		 6  // provided in a variety of forms (see the various Parse* functions); the
		 7  // output is an abstract syntax tree (AST) representing the Go source. The
		 8  // parser is invoked through one of the Parse* functions.
		 9  //
		10  // The parser accepts a larger language than is syntactically permitted by
		11  // the Go spec, for simplicity, and for improved robustness in the presence
		12  // of syntax errors. For instance, in method declarations, the receiver is
		13  // treated like an ordinary parameter list and thus may contain multiple
		14  // entries where the spec permits exactly one. Consequently, the corresponding
		15  // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
		16  //
		17  package parser
		18  
		19  import (
		20  	"fmt"
		21  	"go/ast"
		22  	"go/internal/typeparams"
		23  	"go/scanner"
		24  	"go/token"
		25  	"strconv"
		26  	"strings"
		27  	"unicode"
		28  )
		29  
		30  // The parser structure holds the parser's internal state.
		31  type parser struct {
		32  	file		*token.File
		33  	errors	scanner.ErrorList
		34  	scanner scanner.Scanner
		35  
		36  	// Tracing/debugging
		37  	mode	 Mode // parsing mode
		38  	trace	bool // == (mode&Trace != 0)
		39  	indent int	// indentation used for tracing output
		40  
		41  	// Comments
		42  	comments		[]*ast.CommentGroup
		43  	leadComment *ast.CommentGroup // last lead comment
		44  	lineComment *ast.CommentGroup // last line comment
		45  
		46  	// Next token
		47  	pos token.Pos	 // token position
		48  	tok token.Token // one token look-ahead
		49  	lit string			// token literal
		50  
		51  	// Error recovery
		52  	// (used to limit the number of calls to parser.advance
		53  	// w/o making scanning progress - avoids potential endless
		54  	// loops across multiple parser functions during error recovery)
		55  	syncPos token.Pos // last synchronization position
		56  	syncCnt int			 // number of parser.advance calls without progress
		57  
		58  	// Non-syntactic parser control
		59  	exprLev int	// < 0: in control clause, >= 0: in expression
		60  	inRhs	 bool // if set, the parser is parsing a rhs expression
		61  
		62  	imports []*ast.ImportSpec // list of imports
		63  
		64  	// nestLev is used to track and limit the recursion depth
		65  	// during parsing.
		66  	nestLev int
		67  }
		68  
		69  func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
		70  	p.file = fset.AddFile(filename, -1, len(src))
		71  	var m scanner.Mode
		72  	if mode&ParseComments != 0 {
		73  		m = scanner.ScanComments
		74  	}
		75  	eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
		76  	p.scanner.Init(p.file, src, eh, m)
		77  
		78  	p.mode = mode
		79  	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
		80  	p.next()
		81  }
		82  
		83  func (p *parser) parseTypeParams() bool {
		84  	return typeparams.Enabled && p.mode&typeparams.DisallowParsing == 0
		85  }
		86  
		87  // ----------------------------------------------------------------------------
		88  // Parsing support
		89  
		90  func (p *parser) printTrace(a ...interface{}) {
		91  	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
		92  	const n = len(dots)
		93  	pos := p.file.Position(p.pos)
		94  	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
		95  	i := 2 * p.indent
		96  	for i > n {
		97  		fmt.Print(dots)
		98  		i -= n
		99  	}
	 100  	// i <= n
	 101  	fmt.Print(dots[0:i])
	 102  	fmt.Println(a...)
	 103  }
	 104  
	 105  func trace(p *parser, msg string) *parser {
	 106  	p.printTrace(msg, "(")
	 107  	p.indent++
	 108  	return p
	 109  }
	 110  
	 111  // Usage pattern: defer un(trace(p, "..."))
	 112  func un(p *parser) {
	 113  	p.indent--
	 114  	p.printTrace(")")
	 115  }
	 116  
	 117  // maxNestLev is the deepest we're willing to recurse during parsing
	 118  const maxNestLev int = 1e5
	 119  
	 120  func incNestLev(p *parser) *parser {
	 121  	p.nestLev++
	 122  	if p.nestLev > maxNestLev {
	 123  		p.error(p.pos, "exceeded max nesting depth")
	 124  		panic(bailout{})
	 125  	}
	 126  	return p
	 127  }
	 128  
	 129  // decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
	 130  // It is used along with incNestLev in a similar fashion to how un and trace are used.
	 131  func decNestLev(p *parser) {
	 132  	p.nestLev--
	 133  }
	 134  
	 135  // Advance to the next token.
	 136  func (p *parser) next0() {
	 137  	// Because of one-token look-ahead, print the previous token
	 138  	// when tracing as it provides a more readable output. The
	 139  	// very first token (!p.pos.IsValid()) is not initialized
	 140  	// (it is token.ILLEGAL), so don't print it.
	 141  	if p.trace && p.pos.IsValid() {
	 142  		s := p.tok.String()
	 143  		switch {
	 144  		case p.tok.IsLiteral():
	 145  			p.printTrace(s, p.lit)
	 146  		case p.tok.IsOperator(), p.tok.IsKeyword():
	 147  			p.printTrace("\"" + s + "\"")
	 148  		default:
	 149  			p.printTrace(s)
	 150  		}
	 151  	}
	 152  
	 153  	p.pos, p.tok, p.lit = p.scanner.Scan()
	 154  }
	 155  
	 156  // Consume a comment and return it and the line on which it ends.
	 157  func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
	 158  	// /*-style comments may end on a different line than where they start.
	 159  	// Scan the comment for '\n' chars and adjust endline accordingly.
	 160  	endline = p.file.Line(p.pos)
	 161  	if p.lit[1] == '*' {
	 162  		// don't use range here - no need to decode Unicode code points
	 163  		for i := 0; i < len(p.lit); i++ {
	 164  			if p.lit[i] == '\n' {
	 165  				endline++
	 166  			}
	 167  		}
	 168  	}
	 169  
	 170  	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
	 171  	p.next0()
	 172  
	 173  	return
	 174  }
	 175  
	 176  // Consume a group of adjacent comments, add it to the parser's
	 177  // comments list, and return it together with the line at which
	 178  // the last comment in the group ends. A non-comment token or n
	 179  // empty lines terminate a comment group.
	 180  //
	 181  func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
	 182  	var list []*ast.Comment
	 183  	endline = p.file.Line(p.pos)
	 184  	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
	 185  		var comment *ast.Comment
	 186  		comment, endline = p.consumeComment()
	 187  		list = append(list, comment)
	 188  	}
	 189  
	 190  	// add comment group to the comments list
	 191  	comments = &ast.CommentGroup{List: list}
	 192  	p.comments = append(p.comments, comments)
	 193  
	 194  	return
	 195  }
	 196  
	 197  // Advance to the next non-comment token. In the process, collect
	 198  // any comment groups encountered, and remember the last lead and
	 199  // line comments.
	 200  //
	 201  // A lead comment is a comment group that starts and ends in a
	 202  // line without any other tokens and that is followed by a non-comment
	 203  // token on the line immediately after the comment group.
	 204  //
	 205  // A line comment is a comment group that follows a non-comment
	 206  // token on the same line, and that has no tokens after it on the line
	 207  // where it ends.
	 208  //
	 209  // Lead and line comments may be considered documentation that is
	 210  // stored in the AST.
	 211  //
	 212  func (p *parser) next() {
	 213  	p.leadComment = nil
	 214  	p.lineComment = nil
	 215  	prev := p.pos
	 216  	p.next0()
	 217  
	 218  	if p.tok == token.COMMENT {
	 219  		var comment *ast.CommentGroup
	 220  		var endline int
	 221  
	 222  		if p.file.Line(p.pos) == p.file.Line(prev) {
	 223  			// The comment is on same line as the previous token; it
	 224  			// cannot be a lead comment but may be a line comment.
	 225  			comment, endline = p.consumeCommentGroup(0)
	 226  			if p.file.Line(p.pos) != endline || p.tok == token.EOF {
	 227  				// The next token is on a different line, thus
	 228  				// the last comment group is a line comment.
	 229  				p.lineComment = comment
	 230  			}
	 231  		}
	 232  
	 233  		// consume successor comments, if any
	 234  		endline = -1
	 235  		for p.tok == token.COMMENT {
	 236  			comment, endline = p.consumeCommentGroup(1)
	 237  		}
	 238  
	 239  		if endline+1 == p.file.Line(p.pos) {
	 240  			// The next token is following on the line immediately after the
	 241  			// comment group, thus the last comment group is a lead comment.
	 242  			p.leadComment = comment
	 243  		}
	 244  	}
	 245  }
	 246  
	 247  // A bailout panic is raised to indicate early termination. pos and msg are
	 248  // only populated when bailing out of object resolution.
	 249  type bailout struct {
	 250  	pos token.Pos
	 251  	msg string
	 252  }
	 253  
	 254  func (p *parser) error(pos token.Pos, msg string) {
	 255  	if p.trace {
	 256  		defer un(trace(p, "error: "+msg))
	 257  	}
	 258  
	 259  	epos := p.file.Position(pos)
	 260  
	 261  	// If AllErrors is not set, discard errors reported on the same line
	 262  	// as the last recorded error and stop parsing if there are more than
	 263  	// 10 errors.
	 264  	if p.mode&AllErrors == 0 {
	 265  		n := len(p.errors)
	 266  		if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
	 267  			return // discard - likely a spurious error
	 268  		}
	 269  		if n > 10 {
	 270  			panic(bailout{})
	 271  		}
	 272  	}
	 273  
	 274  	p.errors.Add(epos, msg)
	 275  }
	 276  
	 277  func (p *parser) errorExpected(pos token.Pos, msg string) {
	 278  	msg = "expected " + msg
	 279  	if pos == p.pos {
	 280  		// the error happened at the current position;
	 281  		// make the error message more specific
	 282  		switch {
	 283  		case p.tok == token.SEMICOLON && p.lit == "\n":
	 284  			msg += ", found newline"
	 285  		case p.tok.IsLiteral():
	 286  			// print 123 rather than 'INT', etc.
	 287  			msg += ", found " + p.lit
	 288  		default:
	 289  			msg += ", found '" + p.tok.String() + "'"
	 290  		}
	 291  	}
	 292  	p.error(pos, msg)
	 293  }
	 294  
	 295  func (p *parser) expect(tok token.Token) token.Pos {
	 296  	pos := p.pos
	 297  	if p.tok != tok {
	 298  		p.errorExpected(pos, "'"+tok.String()+"'")
	 299  	}
	 300  	p.next() // make progress
	 301  	return pos
	 302  }
	 303  
	 304  // expect2 is like expect, but it returns an invalid position
	 305  // if the expected token is not found.
	 306  func (p *parser) expect2(tok token.Token) (pos token.Pos) {
	 307  	if p.tok == tok {
	 308  		pos = p.pos
	 309  	} else {
	 310  		p.errorExpected(p.pos, "'"+tok.String()+"'")
	 311  	}
	 312  	p.next() // make progress
	 313  	return
	 314  }
	 315  
	 316  // expectClosing is like expect but provides a better error message
	 317  // for the common case of a missing comma before a newline.
	 318  //
	 319  func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
	 320  	if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
	 321  		p.error(p.pos, "missing ',' before newline in "+context)
	 322  		p.next()
	 323  	}
	 324  	return p.expect(tok)
	 325  }
	 326  
	 327  func (p *parser) expectSemi() {
	 328  	// semicolon is optional before a closing ')' or '}'
	 329  	if p.tok != token.RPAREN && p.tok != token.RBRACE {
	 330  		switch p.tok {
	 331  		case token.COMMA:
	 332  			// permit a ',' instead of a ';' but complain
	 333  			p.errorExpected(p.pos, "';'")
	 334  			fallthrough
	 335  		case token.SEMICOLON:
	 336  			p.next()
	 337  		default:
	 338  			p.errorExpected(p.pos, "';'")
	 339  			p.advance(stmtStart)
	 340  		}
	 341  	}
	 342  }
	 343  
	 344  func (p *parser) atComma(context string, follow token.Token) bool {
	 345  	if p.tok == token.COMMA {
	 346  		return true
	 347  	}
	 348  	if p.tok != follow {
	 349  		msg := "missing ','"
	 350  		if p.tok == token.SEMICOLON && p.lit == "\n" {
	 351  			msg += " before newline"
	 352  		}
	 353  		p.error(p.pos, msg+" in "+context)
	 354  		return true // "insert" comma and continue
	 355  	}
	 356  	return false
	 357  }
	 358  
	 359  func assert(cond bool, msg string) {
	 360  	if !cond {
	 361  		panic("go/parser internal error: " + msg)
	 362  	}
	 363  }
	 364  
	 365  // advance consumes tokens until the current token p.tok
	 366  // is in the 'to' set, or token.EOF. For error recovery.
	 367  func (p *parser) advance(to map[token.Token]bool) {
	 368  	for ; p.tok != token.EOF; p.next() {
	 369  		if to[p.tok] {
	 370  			// Return only if parser made some progress since last
	 371  			// sync or if it has not reached 10 advance calls without
	 372  			// progress. Otherwise consume at least one token to
	 373  			// avoid an endless parser loop (it is possible that
	 374  			// both parseOperand and parseStmt call advance and
	 375  			// correctly do not advance, thus the need for the
	 376  			// invocation limit p.syncCnt).
	 377  			if p.pos == p.syncPos && p.syncCnt < 10 {
	 378  				p.syncCnt++
	 379  				return
	 380  			}
	 381  			if p.pos > p.syncPos {
	 382  				p.syncPos = p.pos
	 383  				p.syncCnt = 0
	 384  				return
	 385  			}
	 386  			// Reaching here indicates a parser bug, likely an
	 387  			// incorrect token list in this function, but it only
	 388  			// leads to skipping of possibly correct code if a
	 389  			// previous error is present, and thus is preferred
	 390  			// over a non-terminating parse.
	 391  		}
	 392  	}
	 393  }
	 394  
	 395  var stmtStart = map[token.Token]bool{
	 396  	token.BREAK:			 true,
	 397  	token.CONST:			 true,
	 398  	token.CONTINUE:		true,
	 399  	token.DEFER:			 true,
	 400  	token.FALLTHROUGH: true,
	 401  	token.FOR:				 true,
	 402  	token.GO:					true,
	 403  	token.GOTO:				true,
	 404  	token.IF:					true,
	 405  	token.RETURN:			true,
	 406  	token.SELECT:			true,
	 407  	token.SWITCH:			true,
	 408  	token.TYPE:				true,
	 409  	token.VAR:				 true,
	 410  }
	 411  
	 412  var declStart = map[token.Token]bool{
	 413  	token.CONST: true,
	 414  	token.TYPE:	true,
	 415  	token.VAR:	 true,
	 416  }
	 417  
	 418  var exprEnd = map[token.Token]bool{
	 419  	token.COMMA:		 true,
	 420  	token.COLON:		 true,
	 421  	token.SEMICOLON: true,
	 422  	token.RPAREN:		true,
	 423  	token.RBRACK:		true,
	 424  	token.RBRACE:		true,
	 425  }
	 426  
	 427  // safePos returns a valid file position for a given position: If pos
	 428  // is valid to begin with, safePos returns pos. If pos is out-of-range,
	 429  // safePos returns the EOF position.
	 430  //
	 431  // This is hack to work around "artificial" end positions in the AST which
	 432  // are computed by adding 1 to (presumably valid) token positions. If the
	 433  // token positions are invalid due to parse errors, the resulting end position
	 434  // may be past the file's EOF position, which would lead to panics if used
	 435  // later on.
	 436  //
	 437  func (p *parser) safePos(pos token.Pos) (res token.Pos) {
	 438  	defer func() {
	 439  		if recover() != nil {
	 440  			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
	 441  		}
	 442  	}()
	 443  	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
	 444  	return pos
	 445  }
	 446  
	 447  // ----------------------------------------------------------------------------
	 448  // Identifiers
	 449  
	 450  func (p *parser) parseIdent() *ast.Ident {
	 451  	pos := p.pos
	 452  	name := "_"
	 453  	if p.tok == token.IDENT {
	 454  		name = p.lit
	 455  		p.next()
	 456  	} else {
	 457  		p.expect(token.IDENT) // use expect() error handling
	 458  	}
	 459  	return &ast.Ident{NamePos: pos, Name: name}
	 460  }
	 461  
	 462  func (p *parser) parseIdentList() (list []*ast.Ident) {
	 463  	if p.trace {
	 464  		defer un(trace(p, "IdentList"))
	 465  	}
	 466  
	 467  	list = append(list, p.parseIdent())
	 468  	for p.tok == token.COMMA {
	 469  		p.next()
	 470  		list = append(list, p.parseIdent())
	 471  	}
	 472  
	 473  	return
	 474  }
	 475  
	 476  // ----------------------------------------------------------------------------
	 477  // Common productions
	 478  
	 479  // If lhs is set, result list elements which are identifiers are not resolved.
	 480  func (p *parser) parseExprList() (list []ast.Expr) {
	 481  	if p.trace {
	 482  		defer un(trace(p, "ExpressionList"))
	 483  	}
	 484  
	 485  	list = append(list, p.checkExpr(p.parseExpr()))
	 486  	for p.tok == token.COMMA {
	 487  		p.next()
	 488  		list = append(list, p.checkExpr(p.parseExpr()))
	 489  	}
	 490  
	 491  	return
	 492  }
	 493  
	 494  func (p *parser) parseList(inRhs bool) []ast.Expr {
	 495  	old := p.inRhs
	 496  	p.inRhs = inRhs
	 497  	list := p.parseExprList()
	 498  	p.inRhs = old
	 499  	return list
	 500  }
	 501  
	 502  // ----------------------------------------------------------------------------
	 503  // Types
	 504  
	 505  func (p *parser) parseType() ast.Expr {
	 506  	if p.trace {
	 507  		defer un(trace(p, "Type"))
	 508  	}
	 509  
	 510  	typ := p.tryIdentOrType()
	 511  
	 512  	if typ == nil {
	 513  		pos := p.pos
	 514  		p.errorExpected(pos, "type")
	 515  		p.advance(exprEnd)
	 516  		return &ast.BadExpr{From: pos, To: p.pos}
	 517  	}
	 518  
	 519  	return typ
	 520  }
	 521  
	 522  func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
	 523  	if p.trace {
	 524  		defer un(trace(p, "QualifiedIdent"))
	 525  	}
	 526  
	 527  	typ := p.parseTypeName(ident)
	 528  	if p.tok == token.LBRACK && p.parseTypeParams() {
	 529  		typ = p.parseTypeInstance(typ)
	 530  	}
	 531  
	 532  	return typ
	 533  }
	 534  
	 535  // If the result is an identifier, it is not resolved.
	 536  func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
	 537  	if p.trace {
	 538  		defer un(trace(p, "TypeName"))
	 539  	}
	 540  
	 541  	if ident == nil {
	 542  		ident = p.parseIdent()
	 543  	}
	 544  
	 545  	if p.tok == token.PERIOD {
	 546  		// ident is a package name
	 547  		p.next()
	 548  		sel := p.parseIdent()
	 549  		return &ast.SelectorExpr{X: ident, Sel: sel}
	 550  	}
	 551  
	 552  	return ident
	 553  }
	 554  
	 555  func (p *parser) parseArrayLen() ast.Expr {
	 556  	if p.trace {
	 557  		defer un(trace(p, "ArrayLen"))
	 558  	}
	 559  
	 560  	p.exprLev++
	 561  	var len ast.Expr
	 562  	// always permit ellipsis for more fault-tolerant parsing
	 563  	if p.tok == token.ELLIPSIS {
	 564  		len = &ast.Ellipsis{Ellipsis: p.pos}
	 565  		p.next()
	 566  	} else if p.tok != token.RBRACK {
	 567  		len = p.parseRhs()
	 568  	}
	 569  	p.exprLev--
	 570  
	 571  	return len
	 572  }
	 573  
	 574  func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
	 575  	if p.trace {
	 576  		defer un(trace(p, "ArrayFieldOrTypeInstance"))
	 577  	}
	 578  
	 579  	// TODO(gri) Should we allow a trailing comma in a type argument
	 580  	//					 list such as T[P,]? (We do in parseTypeInstance).
	 581  	lbrack := p.expect(token.LBRACK)
	 582  	var args []ast.Expr
	 583  	var firstComma token.Pos
	 584  	// TODO(rfindley): consider changing parseRhsOrType so that this function variable
	 585  	// is not needed.
	 586  	argparser := p.parseRhsOrType
	 587  	if !p.parseTypeParams() {
	 588  		argparser = p.parseRhs
	 589  	}
	 590  	if p.tok != token.RBRACK {
	 591  		p.exprLev++
	 592  		args = append(args, argparser())
	 593  		for p.tok == token.COMMA {
	 594  			if !firstComma.IsValid() {
	 595  				firstComma = p.pos
	 596  			}
	 597  			p.next()
	 598  			args = append(args, argparser())
	 599  		}
	 600  		p.exprLev--
	 601  	}
	 602  	rbrack := p.expect(token.RBRACK)
	 603  
	 604  	if len(args) == 0 {
	 605  		// x []E
	 606  		elt := p.parseType()
	 607  		return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
	 608  	}
	 609  
	 610  	// x [P]E or x[P]
	 611  	if len(args) == 1 {
	 612  		elt := p.tryIdentOrType()
	 613  		if elt != nil {
	 614  			// x [P]E
	 615  			return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
	 616  		}
	 617  		if !p.parseTypeParams() {
	 618  			p.error(rbrack, "missing element type in array type expression")
	 619  			return nil, &ast.BadExpr{From: args[0].Pos(), To: args[0].End()}
	 620  		}
	 621  	}
	 622  
	 623  	if !p.parseTypeParams() {
	 624  		p.error(firstComma, "expected ']', found ','")
	 625  		return x, &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
	 626  	}
	 627  
	 628  	// x[P], x[P1, P2], ...
	 629  	return nil, &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
	 630  }
	 631  
	 632  func (p *parser) parseFieldDecl() *ast.Field {
	 633  	if p.trace {
	 634  		defer un(trace(p, "FieldDecl"))
	 635  	}
	 636  
	 637  	doc := p.leadComment
	 638  
	 639  	var names []*ast.Ident
	 640  	var typ ast.Expr
	 641  	if p.tok == token.IDENT {
	 642  		name := p.parseIdent()
	 643  		if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
	 644  			// embedded type
	 645  			typ = name
	 646  			if p.tok == token.PERIOD {
	 647  				typ = p.parseQualifiedIdent(name)
	 648  			}
	 649  		} else {
	 650  			// name1, name2, ... T
	 651  			names = []*ast.Ident{name}
	 652  			for p.tok == token.COMMA {
	 653  				p.next()
	 654  				names = append(names, p.parseIdent())
	 655  			}
	 656  			// Careful dance: We don't know if we have an embedded instantiated
	 657  			// type T[P1, P2, ...] or a field T of array type []E or [P]E.
	 658  			if len(names) == 1 && p.tok == token.LBRACK {
	 659  				name, typ = p.parseArrayFieldOrTypeInstance(name)
	 660  				if name == nil {
	 661  					names = nil
	 662  				}
	 663  			} else {
	 664  				// T P
	 665  				typ = p.parseType()
	 666  			}
	 667  		}
	 668  	} else {
	 669  		// embedded, possibly generic type
	 670  		// (using the enclosing parentheses to distinguish it from a named field declaration)
	 671  		// TODO(rFindley) confirm that this doesn't allow parenthesized embedded type
	 672  		typ = p.parseType()
	 673  	}
	 674  
	 675  	var tag *ast.BasicLit
	 676  	if p.tok == token.STRING {
	 677  		tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
	 678  		p.next()
	 679  	}
	 680  
	 681  	p.expectSemi() // call before accessing p.linecomment
	 682  
	 683  	field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: p.lineComment}
	 684  	return field
	 685  }
	 686  
	 687  func (p *parser) parseStructType() *ast.StructType {
	 688  	if p.trace {
	 689  		defer un(trace(p, "StructType"))
	 690  	}
	 691  
	 692  	pos := p.expect(token.STRUCT)
	 693  	lbrace := p.expect(token.LBRACE)
	 694  	var list []*ast.Field
	 695  	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
	 696  		// a field declaration cannot start with a '(' but we accept
	 697  		// it here for more robust parsing and better error messages
	 698  		// (parseFieldDecl will check and complain if necessary)
	 699  		list = append(list, p.parseFieldDecl())
	 700  	}
	 701  	rbrace := p.expect(token.RBRACE)
	 702  
	 703  	return &ast.StructType{
	 704  		Struct: pos,
	 705  		Fields: &ast.FieldList{
	 706  			Opening: lbrace,
	 707  			List:		list,
	 708  			Closing: rbrace,
	 709  		},
	 710  	}
	 711  }
	 712  
	 713  func (p *parser) parsePointerType() *ast.StarExpr {
	 714  	if p.trace {
	 715  		defer un(trace(p, "PointerType"))
	 716  	}
	 717  
	 718  	star := p.expect(token.MUL)
	 719  	base := p.parseType()
	 720  
	 721  	return &ast.StarExpr{Star: star, X: base}
	 722  }
	 723  
	 724  func (p *parser) parseDotsType() *ast.Ellipsis {
	 725  	if p.trace {
	 726  		defer un(trace(p, "DotsType"))
	 727  	}
	 728  
	 729  	pos := p.expect(token.ELLIPSIS)
	 730  	elt := p.parseType()
	 731  
	 732  	return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
	 733  }
	 734  
	 735  type field struct {
	 736  	name *ast.Ident
	 737  	typ	ast.Expr
	 738  }
	 739  
	 740  func (p *parser) parseParamDecl(name *ast.Ident) (f field) {
	 741  	// TODO(rFindley) compare with parser.paramDeclOrNil in the syntax package
	 742  	if p.trace {
	 743  		defer un(trace(p, "ParamDeclOrNil"))
	 744  	}
	 745  
	 746  	ptok := p.tok
	 747  	if name != nil {
	 748  		p.tok = token.IDENT // force token.IDENT case in switch below
	 749  	}
	 750  
	 751  	switch p.tok {
	 752  	case token.IDENT:
	 753  		if name != nil {
	 754  			f.name = name
	 755  			p.tok = ptok
	 756  		} else {
	 757  			f.name = p.parseIdent()
	 758  		}
	 759  		switch p.tok {
	 760  		case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
	 761  			// name type
	 762  			f.typ = p.parseType()
	 763  
	 764  		case token.LBRACK:
	 765  			// name[type1, type2, ...] or name []type or name [len]type
	 766  			f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
	 767  
	 768  		case token.ELLIPSIS:
	 769  			// name ...type
	 770  			f.typ = p.parseDotsType()
	 771  
	 772  		case token.PERIOD:
	 773  			// qualified.typename
	 774  			f.typ = p.parseQualifiedIdent(f.name)
	 775  			f.name = nil
	 776  		}
	 777  
	 778  	case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
	 779  		// type
	 780  		f.typ = p.parseType()
	 781  
	 782  	case token.ELLIPSIS:
	 783  		// ...type
	 784  		// (always accepted)
	 785  		f.typ = p.parseDotsType()
	 786  
	 787  	default:
	 788  		p.errorExpected(p.pos, ")")
	 789  		p.advance(exprEnd)
	 790  	}
	 791  
	 792  	return
	 793  }
	 794  
	 795  func (p *parser) parseParameterList(name0 *ast.Ident, closing token.Token, parseParamDecl func(*ast.Ident) field, tparams bool) (params []*ast.Field) {
	 796  	if p.trace {
	 797  		defer un(trace(p, "ParameterList"))
	 798  	}
	 799  
	 800  	pos := p.pos
	 801  	if name0 != nil {
	 802  		pos = name0.Pos()
	 803  	}
	 804  
	 805  	var list []field
	 806  	var named int // number of parameters that have an explicit name and type
	 807  
	 808  	for name0 != nil || p.tok != closing && p.tok != token.EOF {
	 809  		par := parseParamDecl(name0)
	 810  		name0 = nil // 1st name was consumed if present
	 811  		if par.name != nil || par.typ != nil {
	 812  			list = append(list, par)
	 813  			if par.name != nil && par.typ != nil {
	 814  				named++
	 815  			}
	 816  		}
	 817  		if !p.atComma("parameter list", closing) {
	 818  			break
	 819  		}
	 820  		p.next()
	 821  	}
	 822  
	 823  	if len(list) == 0 {
	 824  		return // not uncommon
	 825  	}
	 826  
	 827  	// TODO(gri) parameter distribution and conversion to []*ast.Field
	 828  	//					 can be combined and made more efficient
	 829  
	 830  	// distribute parameter types
	 831  	if named == 0 {
	 832  		// all unnamed => found names are type names
	 833  		for i := 0; i < len(list); i++ {
	 834  			par := &list[i]
	 835  			if typ := par.name; typ != nil {
	 836  				par.typ = typ
	 837  				par.name = nil
	 838  			}
	 839  		}
	 840  		if tparams {
	 841  			p.error(pos, "all type parameters must be named")
	 842  		}
	 843  	} else if named != len(list) {
	 844  		// some named => all must be named
	 845  		ok := true
	 846  		var typ ast.Expr
	 847  		for i := len(list) - 1; i >= 0; i-- {
	 848  			if par := &list[i]; par.typ != nil {
	 849  				typ = par.typ
	 850  				if par.name == nil {
	 851  					ok = false
	 852  					n := ast.NewIdent("_")
	 853  					n.NamePos = typ.Pos() // correct position
	 854  					par.name = n
	 855  				}
	 856  			} else if typ != nil {
	 857  				par.typ = typ
	 858  			} else {
	 859  				// par.typ == nil && typ == nil => we only have a par.name
	 860  				ok = false
	 861  				par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
	 862  			}
	 863  		}
	 864  		if !ok {
	 865  			if tparams {
	 866  				p.error(pos, "all type parameters must be named")
	 867  			} else {
	 868  				p.error(pos, "mixed named and unnamed parameters")
	 869  			}
	 870  		}
	 871  	}
	 872  
	 873  	// convert list []*ast.Field
	 874  	if named == 0 {
	 875  		// parameter list consists of types only
	 876  		for _, par := range list {
	 877  			assert(par.typ != nil, "nil type in unnamed parameter list")
	 878  			params = append(params, &ast.Field{Type: par.typ})
	 879  		}
	 880  		return
	 881  	}
	 882  
	 883  	// parameter list consists of named parameters with types
	 884  	var names []*ast.Ident
	 885  	var typ ast.Expr
	 886  	addParams := func() {
	 887  		assert(typ != nil, "nil type in named parameter list")
	 888  		field := &ast.Field{Names: names, Type: typ}
	 889  		params = append(params, field)
	 890  		names = nil
	 891  	}
	 892  	for _, par := range list {
	 893  		if par.typ != typ {
	 894  			if len(names) > 0 {
	 895  				addParams()
	 896  			}
	 897  			typ = par.typ
	 898  		}
	 899  		names = append(names, par.name)
	 900  	}
	 901  	if len(names) > 0 {
	 902  		addParams()
	 903  	}
	 904  	return
	 905  }
	 906  
	 907  func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
	 908  	if p.trace {
	 909  		defer un(trace(p, "Parameters"))
	 910  	}
	 911  
	 912  	if p.parseTypeParams() && acceptTParams && p.tok == token.LBRACK {
	 913  		opening := p.pos
	 914  		p.next()
	 915  		// [T any](params) syntax
	 916  		list := p.parseParameterList(nil, token.RBRACK, p.parseParamDecl, true)
	 917  		rbrack := p.expect(token.RBRACK)
	 918  		tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
	 919  		// Type parameter lists must not be empty.
	 920  		if tparams.NumFields() == 0 {
	 921  			p.error(tparams.Closing, "empty type parameter list")
	 922  			tparams = nil // avoid follow-on errors
	 923  		}
	 924  	}
	 925  
	 926  	opening := p.expect(token.LPAREN)
	 927  
	 928  	var fields []*ast.Field
	 929  	if p.tok != token.RPAREN {
	 930  		fields = p.parseParameterList(nil, token.RPAREN, p.parseParamDecl, false)
	 931  	}
	 932  
	 933  	rparen := p.expect(token.RPAREN)
	 934  	params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
	 935  
	 936  	return
	 937  }
	 938  
	 939  func (p *parser) parseResult() *ast.FieldList {
	 940  	if p.trace {
	 941  		defer un(trace(p, "Result"))
	 942  	}
	 943  
	 944  	if p.tok == token.LPAREN {
	 945  		_, results := p.parseParameters(false)
	 946  		return results
	 947  	}
	 948  
	 949  	typ := p.tryIdentOrType()
	 950  	if typ != nil {
	 951  		list := make([]*ast.Field, 1)
	 952  		list[0] = &ast.Field{Type: typ}
	 953  		return &ast.FieldList{List: list}
	 954  	}
	 955  
	 956  	return nil
	 957  }
	 958  
	 959  func (p *parser) parseFuncType() *ast.FuncType {
	 960  	if p.trace {
	 961  		defer un(trace(p, "FuncType"))
	 962  	}
	 963  
	 964  	pos := p.expect(token.FUNC)
	 965  	tparams, params := p.parseParameters(true)
	 966  	if tparams != nil {
	 967  		p.error(tparams.Pos(), "function type cannot have type parameters")
	 968  	}
	 969  	results := p.parseResult()
	 970  
	 971  	return &ast.FuncType{Func: pos, Params: params, Results: results}
	 972  }
	 973  
	 974  func (p *parser) parseMethodSpec() *ast.Field {
	 975  	if p.trace {
	 976  		defer un(trace(p, "MethodSpec"))
	 977  	}
	 978  
	 979  	doc := p.leadComment
	 980  	var idents []*ast.Ident
	 981  	var typ ast.Expr
	 982  	x := p.parseTypeName(nil)
	 983  	if ident, _ := x.(*ast.Ident); ident != nil {
	 984  		switch {
	 985  		case p.tok == token.LBRACK && p.parseTypeParams():
	 986  			// generic method or embedded instantiated type
	 987  			lbrack := p.pos
	 988  			p.next()
	 989  			p.exprLev++
	 990  			x := p.parseExpr()
	 991  			p.exprLev--
	 992  			if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
	 993  				// generic method m[T any]
	 994  				list := p.parseParameterList(name0, token.RBRACK, p.parseParamDecl, true)
	 995  				rbrack := p.expect(token.RBRACK)
	 996  				tparams := &ast.FieldList{Opening: lbrack, List: list, Closing: rbrack}
	 997  				// TODO(rfindley) refactor to share code with parseFuncType.
	 998  				_, params := p.parseParameters(false)
	 999  				results := p.parseResult()
	1000  				idents = []*ast.Ident{ident}
	1001  				typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
	1002  				typeparams.Set(typ, tparams)
	1003  			} else {
	1004  				// embedded instantiated type
	1005  				// TODO(rfindley) should resolve all identifiers in x.
	1006  				list := []ast.Expr{x}
	1007  				if p.atComma("type argument list", token.RBRACK) {
	1008  					p.exprLev++
	1009  					for p.tok != token.RBRACK && p.tok != token.EOF {
	1010  						list = append(list, p.parseType())
	1011  						if !p.atComma("type argument list", token.RBRACK) {
	1012  							break
	1013  						}
	1014  						p.next()
	1015  					}
	1016  					p.exprLev--
	1017  				}
	1018  				rbrack := p.expectClosing(token.RBRACK, "type argument list")
	1019  				typ = &ast.IndexExpr{X: ident, Lbrack: lbrack, Index: typeparams.PackExpr(list), Rbrack: rbrack}
	1020  			}
	1021  		case p.tok == token.LPAREN:
	1022  			// ordinary method
	1023  			// TODO(rfindley) refactor to share code with parseFuncType.
	1024  			_, params := p.parseParameters(false)
	1025  			results := p.parseResult()
	1026  			idents = []*ast.Ident{ident}
	1027  			typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
	1028  		default:
	1029  			// embedded type
	1030  			typ = x
	1031  		}
	1032  	} else {
	1033  		// embedded, possibly instantiated type
	1034  		typ = x
	1035  		if p.tok == token.LBRACK && p.parseTypeParams() {
	1036  			// embedded instantiated interface
	1037  			typ = p.parseTypeInstance(typ)
	1038  		}
	1039  	}
	1040  	p.expectSemi() // call before accessing p.linecomment
	1041  
	1042  	spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
	1043  
	1044  	return spec
	1045  }
	1046  
	1047  func (p *parser) parseInterfaceType() *ast.InterfaceType {
	1048  	if p.trace {
	1049  		defer un(trace(p, "InterfaceType"))
	1050  	}
	1051  
	1052  	pos := p.expect(token.INTERFACE)
	1053  	lbrace := p.expect(token.LBRACE)
	1054  	var list []*ast.Field
	1055  	for p.tok == token.IDENT || p.parseTypeParams() && p.tok == token.TYPE {
	1056  		if p.tok == token.IDENT {
	1057  			list = append(list, p.parseMethodSpec())
	1058  		} else {
	1059  			// all types in a type list share the same field name "type"
	1060  			// (since type is a keyword, a Go program cannot have that field name)
	1061  			name := []*ast.Ident{{NamePos: p.pos, Name: "type"}}
	1062  			p.next()
	1063  			// add each type as a field named "type"
	1064  			for _, typ := range p.parseTypeList() {
	1065  				list = append(list, &ast.Field{Names: name, Type: typ})
	1066  			}
	1067  			p.expectSemi()
	1068  		}
	1069  	}
	1070  	// TODO(rfindley): the error produced here could be improved, since we could
	1071  	// accept a identifier, 'type', or a '}' at this point.
	1072  	rbrace := p.expect(token.RBRACE)
	1073  
	1074  	return &ast.InterfaceType{
	1075  		Interface: pos,
	1076  		Methods: &ast.FieldList{
	1077  			Opening: lbrace,
	1078  			List:		list,
	1079  			Closing: rbrace,
	1080  		},
	1081  	}
	1082  }
	1083  
	1084  func (p *parser) parseMapType() *ast.MapType {
	1085  	if p.trace {
	1086  		defer un(trace(p, "MapType"))
	1087  	}
	1088  
	1089  	pos := p.expect(token.MAP)
	1090  	p.expect(token.LBRACK)
	1091  	key := p.parseType()
	1092  	p.expect(token.RBRACK)
	1093  	value := p.parseType()
	1094  
	1095  	return &ast.MapType{Map: pos, Key: key, Value: value}
	1096  }
	1097  
	1098  func (p *parser) parseChanType() *ast.ChanType {
	1099  	if p.trace {
	1100  		defer un(trace(p, "ChanType"))
	1101  	}
	1102  
	1103  	pos := p.pos
	1104  	dir := ast.SEND | ast.RECV
	1105  	var arrow token.Pos
	1106  	if p.tok == token.CHAN {
	1107  		p.next()
	1108  		if p.tok == token.ARROW {
	1109  			arrow = p.pos
	1110  			p.next()
	1111  			dir = ast.SEND
	1112  		}
	1113  	} else {
	1114  		arrow = p.expect(token.ARROW)
	1115  		p.expect(token.CHAN)
	1116  		dir = ast.RECV
	1117  	}
	1118  	value := p.parseType()
	1119  
	1120  	return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
	1121  }
	1122  
	1123  func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
	1124  	assert(p.parseTypeParams(), "parseTypeInstance while not parsing type params")
	1125  	if p.trace {
	1126  		defer un(trace(p, "TypeInstance"))
	1127  	}
	1128  
	1129  	opening := p.expect(token.LBRACK)
	1130  
	1131  	p.exprLev++
	1132  	var list []ast.Expr
	1133  	for p.tok != token.RBRACK && p.tok != token.EOF {
	1134  		list = append(list, p.parseType())
	1135  		if !p.atComma("type argument list", token.RBRACK) {
	1136  			break
	1137  		}
	1138  		p.next()
	1139  	}
	1140  	p.exprLev--
	1141  
	1142  	closing := p.expectClosing(token.RBRACK, "type argument list")
	1143  
	1144  	return &ast.IndexExpr{X: typ, Lbrack: opening, Index: typeparams.PackExpr(list), Rbrack: closing}
	1145  }
	1146  
	1147  func (p *parser) tryIdentOrType() ast.Expr {
	1148  	defer decNestLev(incNestLev(p))
	1149  
	1150  	switch p.tok {
	1151  	case token.IDENT:
	1152  		typ := p.parseTypeName(nil)
	1153  		if p.tok == token.LBRACK && p.parseTypeParams() {
	1154  			typ = p.parseTypeInstance(typ)
	1155  		}
	1156  		return typ
	1157  	case token.LBRACK:
	1158  		lbrack := p.expect(token.LBRACK)
	1159  		alen := p.parseArrayLen()
	1160  		p.expect(token.RBRACK)
	1161  		elt := p.parseType()
	1162  		return &ast.ArrayType{Lbrack: lbrack, Len: alen, Elt: elt}
	1163  	case token.STRUCT:
	1164  		return p.parseStructType()
	1165  	case token.MUL:
	1166  		return p.parsePointerType()
	1167  	case token.FUNC:
	1168  		typ := p.parseFuncType()
	1169  		return typ
	1170  	case token.INTERFACE:
	1171  		return p.parseInterfaceType()
	1172  	case token.MAP:
	1173  		return p.parseMapType()
	1174  	case token.CHAN, token.ARROW:
	1175  		return p.parseChanType()
	1176  	case token.LPAREN:
	1177  		lparen := p.pos
	1178  		p.next()
	1179  		typ := p.parseType()
	1180  		rparen := p.expect(token.RPAREN)
	1181  		return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
	1182  	}
	1183  
	1184  	// no type found
	1185  	return nil
	1186  }
	1187  
	1188  // ----------------------------------------------------------------------------
	1189  // Blocks
	1190  
	1191  func (p *parser) parseStmtList() (list []ast.Stmt) {
	1192  	if p.trace {
	1193  		defer un(trace(p, "StatementList"))
	1194  	}
	1195  
	1196  	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
	1197  		list = append(list, p.parseStmt())
	1198  	}
	1199  
	1200  	return
	1201  }
	1202  
	1203  func (p *parser) parseBody() *ast.BlockStmt {
	1204  	if p.trace {
	1205  		defer un(trace(p, "Body"))
	1206  	}
	1207  
	1208  	lbrace := p.expect(token.LBRACE)
	1209  	list := p.parseStmtList()
	1210  	rbrace := p.expect2(token.RBRACE)
	1211  
	1212  	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
	1213  }
	1214  
	1215  func (p *parser) parseBlockStmt() *ast.BlockStmt {
	1216  	if p.trace {
	1217  		defer un(trace(p, "BlockStmt"))
	1218  	}
	1219  
	1220  	lbrace := p.expect(token.LBRACE)
	1221  	list := p.parseStmtList()
	1222  	rbrace := p.expect2(token.RBRACE)
	1223  
	1224  	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
	1225  }
	1226  
	1227  // ----------------------------------------------------------------------------
	1228  // Expressions
	1229  
	1230  func (p *parser) parseFuncTypeOrLit() ast.Expr {
	1231  	if p.trace {
	1232  		defer un(trace(p, "FuncTypeOrLit"))
	1233  	}
	1234  
	1235  	typ := p.parseFuncType()
	1236  	if p.tok != token.LBRACE {
	1237  		// function type only
	1238  		return typ
	1239  	}
	1240  
	1241  	p.exprLev++
	1242  	body := p.parseBody()
	1243  	p.exprLev--
	1244  
	1245  	return &ast.FuncLit{Type: typ, Body: body}
	1246  }
	1247  
	1248  // parseOperand may return an expression or a raw type (incl. array
	1249  // types of the form [...]T. Callers must verify the result.
	1250  //
	1251  func (p *parser) parseOperand() ast.Expr {
	1252  	if p.trace {
	1253  		defer un(trace(p, "Operand"))
	1254  	}
	1255  
	1256  	switch p.tok {
	1257  	case token.IDENT:
	1258  		x := p.parseIdent()
	1259  		return x
	1260  
	1261  	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
	1262  		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
	1263  		p.next()
	1264  		return x
	1265  
	1266  	case token.LPAREN:
	1267  		lparen := p.pos
	1268  		p.next()
	1269  		p.exprLev++
	1270  		x := p.parseRhsOrType() // types may be parenthesized: (some type)
	1271  		p.exprLev--
	1272  		rparen := p.expect(token.RPAREN)
	1273  		return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
	1274  
	1275  	case token.FUNC:
	1276  		return p.parseFuncTypeOrLit()
	1277  	}
	1278  
	1279  	if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
	1280  		// could be type for composite literal or conversion
	1281  		_, isIdent := typ.(*ast.Ident)
	1282  		assert(!isIdent, "type cannot be identifier")
	1283  		return typ
	1284  	}
	1285  
	1286  	// we have an error
	1287  	pos := p.pos
	1288  	p.errorExpected(pos, "operand")
	1289  	p.advance(stmtStart)
	1290  	return &ast.BadExpr{From: pos, To: p.pos}
	1291  }
	1292  
	1293  func (p *parser) parseSelector(x ast.Expr) ast.Expr {
	1294  	if p.trace {
	1295  		defer un(trace(p, "Selector"))
	1296  	}
	1297  
	1298  	sel := p.parseIdent()
	1299  
	1300  	return &ast.SelectorExpr{X: x, Sel: sel}
	1301  }
	1302  
	1303  func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
	1304  	if p.trace {
	1305  		defer un(trace(p, "TypeAssertion"))
	1306  	}
	1307  
	1308  	lparen := p.expect(token.LPAREN)
	1309  	var typ ast.Expr
	1310  	if p.tok == token.TYPE {
	1311  		// type switch: typ == nil
	1312  		p.next()
	1313  	} else {
	1314  		typ = p.parseType()
	1315  	}
	1316  	rparen := p.expect(token.RPAREN)
	1317  
	1318  	return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
	1319  }
	1320  
	1321  func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
	1322  	if p.trace {
	1323  		defer un(trace(p, "parseIndexOrSliceOrInstance"))
	1324  	}
	1325  
	1326  	lbrack := p.expect(token.LBRACK)
	1327  	if p.tok == token.RBRACK {
	1328  		// empty index, slice or index expressions are not permitted;
	1329  		// accept them for parsing tolerance, but complain
	1330  		p.errorExpected(p.pos, "operand")
	1331  		rbrack := p.pos
	1332  		p.next()
	1333  		return &ast.IndexExpr{
	1334  			X:			x,
	1335  			Lbrack: lbrack,
	1336  			Index:	&ast.BadExpr{From: rbrack, To: rbrack},
	1337  			Rbrack: rbrack,
	1338  		}
	1339  	}
	1340  	p.exprLev++
	1341  
	1342  	const N = 3 // change the 3 to 2 to disable 3-index slices
	1343  	var args []ast.Expr
	1344  	var index [N]ast.Expr
	1345  	var colons [N - 1]token.Pos
	1346  	var firstComma token.Pos
	1347  	if p.tok != token.COLON {
	1348  		// We can't know if we have an index expression or a type instantiation;
	1349  		// so even if we see a (named) type we are not going to be in type context.
	1350  		index[0] = p.parseRhsOrType()
	1351  	}
	1352  	ncolons := 0
	1353  	switch p.tok {
	1354  	case token.COLON:
	1355  		// slice expression
	1356  		for p.tok == token.COLON && ncolons < len(colons) {
	1357  			colons[ncolons] = p.pos
	1358  			ncolons++
	1359  			p.next()
	1360  			if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
	1361  				index[ncolons] = p.parseRhs()
	1362  			}
	1363  		}
	1364  	case token.COMMA:
	1365  		firstComma = p.pos
	1366  		// instance expression
	1367  		args = append(args, index[0])
	1368  		for p.tok == token.COMMA {
	1369  			p.next()
	1370  			if p.tok != token.RBRACK && p.tok != token.EOF {
	1371  				args = append(args, p.parseType())
	1372  			}
	1373  		}
	1374  	}
	1375  
	1376  	p.exprLev--
	1377  	rbrack := p.expect(token.RBRACK)
	1378  
	1379  	if ncolons > 0 {
	1380  		// slice expression
	1381  		slice3 := false
	1382  		if ncolons == 2 {
	1383  			slice3 = true
	1384  			// Check presence of 2nd and 3rd index here rather than during type-checking
	1385  			// to prevent erroneous programs from passing through gofmt (was issue 7305).
	1386  			if index[1] == nil {
	1387  				p.error(colons[0], "2nd index required in 3-index slice")
	1388  				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
	1389  			}
	1390  			if index[2] == nil {
	1391  				p.error(colons[1], "3rd index required in 3-index slice")
	1392  				index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
	1393  			}
	1394  		}
	1395  		return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
	1396  	}
	1397  
	1398  	if len(args) == 0 {
	1399  		// index expression
	1400  		return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
	1401  	}
	1402  
	1403  	if !p.parseTypeParams() {
	1404  		p.error(firstComma, "expected ']' or ':', found ','")
	1405  		return &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
	1406  	}
	1407  
	1408  	// instance expression
	1409  	return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
	1410  }
	1411  
	1412  func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
	1413  	if p.trace {
	1414  		defer un(trace(p, "CallOrConversion"))
	1415  	}
	1416  
	1417  	lparen := p.expect(token.LPAREN)
	1418  	p.exprLev++
	1419  	var list []ast.Expr
	1420  	var ellipsis token.Pos
	1421  	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
	1422  		list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
	1423  		if p.tok == token.ELLIPSIS {
	1424  			ellipsis = p.pos
	1425  			p.next()
	1426  		}
	1427  		if !p.atComma("argument list", token.RPAREN) {
	1428  			break
	1429  		}
	1430  		p.next()
	1431  	}
	1432  	p.exprLev--
	1433  	rparen := p.expectClosing(token.RPAREN, "argument list")
	1434  
	1435  	return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
	1436  }
	1437  
	1438  func (p *parser) parseValue() ast.Expr {
	1439  	if p.trace {
	1440  		defer un(trace(p, "Element"))
	1441  	}
	1442  
	1443  	if p.tok == token.LBRACE {
	1444  		return p.parseLiteralValue(nil)
	1445  	}
	1446  
	1447  	x := p.checkExpr(p.parseExpr())
	1448  
	1449  	return x
	1450  }
	1451  
	1452  func (p *parser) parseElement() ast.Expr {
	1453  	if p.trace {
	1454  		defer un(trace(p, "Element"))
	1455  	}
	1456  
	1457  	x := p.parseValue()
	1458  	if p.tok == token.COLON {
	1459  		colon := p.pos
	1460  		p.next()
	1461  		x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
	1462  	}
	1463  
	1464  	return x
	1465  }
	1466  
	1467  func (p *parser) parseElementList() (list []ast.Expr) {
	1468  	if p.trace {
	1469  		defer un(trace(p, "ElementList"))
	1470  	}
	1471  
	1472  	for p.tok != token.RBRACE && p.tok != token.EOF {
	1473  		list = append(list, p.parseElement())
	1474  		if !p.atComma("composite literal", token.RBRACE) {
	1475  			break
	1476  		}
	1477  		p.next()
	1478  	}
	1479  
	1480  	return
	1481  }
	1482  
	1483  func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
	1484  	if p.trace {
	1485  		defer un(trace(p, "LiteralValue"))
	1486  	}
	1487  
	1488  	lbrace := p.expect(token.LBRACE)
	1489  	var elts []ast.Expr
	1490  	p.exprLev++
	1491  	if p.tok != token.RBRACE {
	1492  		elts = p.parseElementList()
	1493  	}
	1494  	p.exprLev--
	1495  	rbrace := p.expectClosing(token.RBRACE, "composite literal")
	1496  	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
	1497  }
	1498  
	1499  // checkExpr checks that x is an expression (and not a type).
	1500  func (p *parser) checkExpr(x ast.Expr) ast.Expr {
	1501  	switch unparen(x).(type) {
	1502  	case *ast.BadExpr:
	1503  	case *ast.Ident:
	1504  	case *ast.BasicLit:
	1505  	case *ast.FuncLit:
	1506  	case *ast.CompositeLit:
	1507  	case *ast.ParenExpr:
	1508  		panic("unreachable")
	1509  	case *ast.SelectorExpr:
	1510  	case *ast.IndexExpr:
	1511  	case *ast.SliceExpr:
	1512  	case *ast.TypeAssertExpr:
	1513  		// If t.Type == nil we have a type assertion of the form
	1514  		// y.(type), which is only allowed in type switch expressions.
	1515  		// It's hard to exclude those but for the case where we are in
	1516  		// a type switch. Instead be lenient and test this in the type
	1517  		// checker.
	1518  	case *ast.CallExpr:
	1519  	case *ast.StarExpr:
	1520  	case *ast.UnaryExpr:
	1521  	case *ast.BinaryExpr:
	1522  	default:
	1523  		// all other nodes are not proper expressions
	1524  		p.errorExpected(x.Pos(), "expression")
	1525  		x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
	1526  	}
	1527  	return x
	1528  }
	1529  
	1530  // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
	1531  func unparen(x ast.Expr) ast.Expr {
	1532  	if p, isParen := x.(*ast.ParenExpr); isParen {
	1533  		x = unparen(p.X)
	1534  	}
	1535  	return x
	1536  }
	1537  
	1538  // checkExprOrType checks that x is an expression or a type
	1539  // (and not a raw type such as [...]T).
	1540  //
	1541  func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
	1542  	switch t := unparen(x).(type) {
	1543  	case *ast.ParenExpr:
	1544  		panic("unreachable")
	1545  	case *ast.ArrayType:
	1546  		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
	1547  			p.error(len.Pos(), "expected array length, found '...'")
	1548  			x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
	1549  		}
	1550  	}
	1551  
	1552  	// all other nodes are expressions or types
	1553  	return x
	1554  }
	1555  
	1556  func (p *parser) parsePrimaryExpr() (x ast.Expr) {
	1557  	if p.trace {
	1558  		defer un(trace(p, "PrimaryExpr"))
	1559  	}
	1560  
	1561  	x = p.parseOperand()
	1562  	// We track the nesting here rather than at the entry for the function,
	1563  	// since it can iteratively produce a nested output, and we want to
	1564  	// limit how deep a structure we generate.
	1565  	var n int
	1566  	defer func() { p.nestLev -= n }()
	1567  	for n = 1; ; n++ {
	1568  		incNestLev(p)
	1569  		switch p.tok {
	1570  		case token.PERIOD:
	1571  			p.next()
	1572  			switch p.tok {
	1573  			case token.IDENT:
	1574  				x = p.parseSelector(p.checkExprOrType(x))
	1575  			case token.LPAREN:
	1576  				x = p.parseTypeAssertion(p.checkExpr(x))
	1577  			default:
	1578  				pos := p.pos
	1579  				p.errorExpected(pos, "selector or type assertion")
	1580  				// TODO(rFindley) The check for token.RBRACE below is a targeted fix
	1581  				//								to error recovery sufficient to make the x/tools tests to
	1582  				//								pass with the new parsing logic introduced for type
	1583  				//								parameters. Remove this once error recovery has been
	1584  				//								more generally reconsidered.
	1585  				if p.tok != token.RBRACE {
	1586  					p.next() // make progress
	1587  				}
	1588  				sel := &ast.Ident{NamePos: pos, Name: "_"}
	1589  				x = &ast.SelectorExpr{X: x, Sel: sel}
	1590  			}
	1591  		case token.LBRACK:
	1592  			x = p.parseIndexOrSliceOrInstance(p.checkExpr(x))
	1593  		case token.LPAREN:
	1594  			x = p.parseCallOrConversion(p.checkExprOrType(x))
	1595  		case token.LBRACE:
	1596  			// operand may have returned a parenthesized complit
	1597  			// type; accept it but complain if we have a complit
	1598  			t := unparen(x)
	1599  			// determine if '{' belongs to a composite literal or a block statement
	1600  			switch t.(type) {
	1601  			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
	1602  				if p.exprLev < 0 {
	1603  					return
	1604  				}
	1605  				// x is possibly a composite literal type
	1606  			case *ast.IndexExpr:
	1607  				if p.exprLev < 0 {
	1608  					return
	1609  				}
	1610  				// x is possibly a composite literal type
	1611  			case *ast.ArrayType, *ast.StructType, *ast.MapType:
	1612  				// x is a composite literal type
	1613  			default:
	1614  				return
	1615  			}
	1616  			if t != x {
	1617  				p.error(t.Pos(), "cannot parenthesize type in composite literal")
	1618  				// already progressed, no need to advance
	1619  			}
	1620  			x = p.parseLiteralValue(x)
	1621  		default:
	1622  			return
	1623  		}
	1624  	}
	1625  }
	1626  
	1627  func (p *parser) parseUnaryExpr() ast.Expr {
	1628  	defer decNestLev(incNestLev(p))
	1629  
	1630  	if p.trace {
	1631  		defer un(trace(p, "UnaryExpr"))
	1632  	}
	1633  
	1634  	switch p.tok {
	1635  	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
	1636  		pos, op := p.pos, p.tok
	1637  		p.next()
	1638  		x := p.parseUnaryExpr()
	1639  		return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
	1640  
	1641  	case token.ARROW:
	1642  		// channel type or receive expression
	1643  		arrow := p.pos
	1644  		p.next()
	1645  
	1646  		// If the next token is token.CHAN we still don't know if it
	1647  		// is a channel type or a receive operation - we only know
	1648  		// once we have found the end of the unary expression. There
	1649  		// are two cases:
	1650  		//
	1651  		//	 <- type	=> (<-type) must be channel type
	1652  		//	 <- expr	=> <-(expr) is a receive from an expression
	1653  		//
	1654  		// In the first case, the arrow must be re-associated with
	1655  		// the channel type parsed already:
	1656  		//
	1657  		//	 <- (chan type)		=>	(<-chan type)
	1658  		//	 <- (chan<- type)	=>	(<-chan (<-type))
	1659  
	1660  		x := p.parseUnaryExpr()
	1661  
	1662  		// determine which case we have
	1663  		if typ, ok := x.(*ast.ChanType); ok {
	1664  			// (<-type)
	1665  
	1666  			// re-associate position info and <-
	1667  			dir := ast.SEND
	1668  			for ok && dir == ast.SEND {
	1669  				if typ.Dir == ast.RECV {
	1670  					// error: (<-type) is (<-(<-chan T))
	1671  					p.errorExpected(typ.Arrow, "'chan'")
	1672  				}
	1673  				arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
	1674  				dir, typ.Dir = typ.Dir, ast.RECV
	1675  				typ, ok = typ.Value.(*ast.ChanType)
	1676  			}
	1677  			if dir == ast.SEND {
	1678  				p.errorExpected(arrow, "channel type")
	1679  			}
	1680  
	1681  			return x
	1682  		}
	1683  
	1684  		// <-(expr)
	1685  		return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
	1686  
	1687  	case token.MUL:
	1688  		// pointer type or unary "*" expression
	1689  		pos := p.pos
	1690  		p.next()
	1691  		x := p.parseUnaryExpr()
	1692  		return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
	1693  	}
	1694  
	1695  	return p.parsePrimaryExpr()
	1696  }
	1697  
	1698  func (p *parser) tokPrec() (token.Token, int) {
	1699  	tok := p.tok
	1700  	if p.inRhs && tok == token.ASSIGN {
	1701  		tok = token.EQL
	1702  	}
	1703  	return tok, tok.Precedence()
	1704  }
	1705  
	1706  func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
	1707  	if p.trace {
	1708  		defer un(trace(p, "BinaryExpr"))
	1709  	}
	1710  
	1711  	x := p.parseUnaryExpr()
	1712  	// We track the nesting here rather than at the entry for the function,
	1713  	// since it can iteratively produce a nested output, and we want to
	1714  	// limit how deep a structure we generate.
	1715  	var n int
	1716  	defer func() { p.nestLev -= n }()
	1717  	for n = 1; ; n++ {
	1718  		incNestLev(p)
	1719  		op, oprec := p.tokPrec()
	1720  		if oprec < prec1 {
	1721  			return x
	1722  		}
	1723  		pos := p.expect(op)
	1724  		y := p.parseBinaryExpr(oprec + 1)
	1725  		x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
	1726  	}
	1727  }
	1728  
	1729  // The result may be a type or even a raw type ([...]int). Callers must
	1730  // check the result (using checkExpr or checkExprOrType), depending on
	1731  // context.
	1732  func (p *parser) parseExpr() ast.Expr {
	1733  	if p.trace {
	1734  		defer un(trace(p, "Expression"))
	1735  	}
	1736  
	1737  	return p.parseBinaryExpr(token.LowestPrec + 1)
	1738  }
	1739  
	1740  func (p *parser) parseRhs() ast.Expr {
	1741  	old := p.inRhs
	1742  	p.inRhs = true
	1743  	x := p.checkExpr(p.parseExpr())
	1744  	p.inRhs = old
	1745  	return x
	1746  }
	1747  
	1748  func (p *parser) parseRhsOrType() ast.Expr {
	1749  	old := p.inRhs
	1750  	p.inRhs = true
	1751  	x := p.checkExprOrType(p.parseExpr())
	1752  	p.inRhs = old
	1753  	return x
	1754  }
	1755  
	1756  // ----------------------------------------------------------------------------
	1757  // Statements
	1758  
	1759  // Parsing modes for parseSimpleStmt.
	1760  const (
	1761  	basic = iota
	1762  	labelOk
	1763  	rangeOk
	1764  )
	1765  
	1766  // parseSimpleStmt returns true as 2nd result if it parsed the assignment
	1767  // of a range clause (with mode == rangeOk). The returned statement is an
	1768  // assignment with a right-hand side that is a single unary expression of
	1769  // the form "range x". No guarantees are given for the left-hand side.
	1770  func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
	1771  	if p.trace {
	1772  		defer un(trace(p, "SimpleStmt"))
	1773  	}
	1774  
	1775  	x := p.parseList(false)
	1776  
	1777  	switch p.tok {
	1778  	case
	1779  		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
	1780  		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
	1781  		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
	1782  		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
	1783  		// assignment statement, possibly part of a range clause
	1784  		pos, tok := p.pos, p.tok
	1785  		p.next()
	1786  		var y []ast.Expr
	1787  		isRange := false
	1788  		if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
	1789  			pos := p.pos
	1790  			p.next()
	1791  			y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
	1792  			isRange = true
	1793  		} else {
	1794  			y = p.parseList(true)
	1795  		}
	1796  		as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
	1797  		if tok == token.DEFINE {
	1798  			p.checkAssignStmt(as)
	1799  		}
	1800  		return as, isRange
	1801  	}
	1802  
	1803  	if len(x) > 1 {
	1804  		p.errorExpected(x[0].Pos(), "1 expression")
	1805  		// continue with first expression
	1806  	}
	1807  
	1808  	switch p.tok {
	1809  	case token.COLON:
	1810  		// labeled statement
	1811  		colon := p.pos
	1812  		p.next()
	1813  		if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
	1814  			// Go spec: The scope of a label is the body of the function
	1815  			// in which it is declared and excludes the body of any nested
	1816  			// function.
	1817  			stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
	1818  			return stmt, false
	1819  		}
	1820  		// The label declaration typically starts at x[0].Pos(), but the label
	1821  		// declaration may be erroneous due to a token after that position (and
	1822  		// before the ':'). If SpuriousErrors is not set, the (only) error
	1823  		// reported for the line is the illegal label error instead of the token
	1824  		// before the ':' that caused the problem. Thus, use the (latest) colon
	1825  		// position for error reporting.
	1826  		p.error(colon, "illegal label declaration")
	1827  		return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
	1828  
	1829  	case token.ARROW:
	1830  		// send statement
	1831  		arrow := p.pos
	1832  		p.next()
	1833  		y := p.parseRhs()
	1834  		return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
	1835  
	1836  	case token.INC, token.DEC:
	1837  		// increment or decrement
	1838  		s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
	1839  		p.next()
	1840  		return s, false
	1841  	}
	1842  
	1843  	// expression
	1844  	return &ast.ExprStmt{X: x[0]}, false
	1845  }
	1846  
	1847  func (p *parser) checkAssignStmt(as *ast.AssignStmt) {
	1848  	for _, x := range as.Lhs {
	1849  		if _, isIdent := x.(*ast.Ident); !isIdent {
	1850  			p.errorExpected(x.Pos(), "identifier on left side of :=")
	1851  		}
	1852  	}
	1853  }
	1854  
	1855  func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
	1856  	x := p.parseRhsOrType() // could be a conversion: (some type)(x)
	1857  	if call, isCall := x.(*ast.CallExpr); isCall {
	1858  		return call
	1859  	}
	1860  	if _, isBad := x.(*ast.BadExpr); !isBad {
	1861  		// only report error if it's a new one
	1862  		p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
	1863  	}
	1864  	return nil
	1865  }
	1866  
	1867  func (p *parser) parseGoStmt() ast.Stmt {
	1868  	if p.trace {
	1869  		defer un(trace(p, "GoStmt"))
	1870  	}
	1871  
	1872  	pos := p.expect(token.GO)
	1873  	call := p.parseCallExpr("go")
	1874  	p.expectSemi()
	1875  	if call == nil {
	1876  		return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
	1877  	}
	1878  
	1879  	return &ast.GoStmt{Go: pos, Call: call}
	1880  }
	1881  
	1882  func (p *parser) parseDeferStmt() ast.Stmt {
	1883  	if p.trace {
	1884  		defer un(trace(p, "DeferStmt"))
	1885  	}
	1886  
	1887  	pos := p.expect(token.DEFER)
	1888  	call := p.parseCallExpr("defer")
	1889  	p.expectSemi()
	1890  	if call == nil {
	1891  		return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
	1892  	}
	1893  
	1894  	return &ast.DeferStmt{Defer: pos, Call: call}
	1895  }
	1896  
	1897  func (p *parser) parseReturnStmt() *ast.ReturnStmt {
	1898  	if p.trace {
	1899  		defer un(trace(p, "ReturnStmt"))
	1900  	}
	1901  
	1902  	pos := p.pos
	1903  	p.expect(token.RETURN)
	1904  	var x []ast.Expr
	1905  	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
	1906  		x = p.parseList(true)
	1907  	}
	1908  	p.expectSemi()
	1909  
	1910  	return &ast.ReturnStmt{Return: pos, Results: x}
	1911  }
	1912  
	1913  func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
	1914  	if p.trace {
	1915  		defer un(trace(p, "BranchStmt"))
	1916  	}
	1917  
	1918  	pos := p.expect(tok)
	1919  	var label *ast.Ident
	1920  	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
	1921  		label = p.parseIdent()
	1922  	}
	1923  	p.expectSemi()
	1924  
	1925  	return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
	1926  }
	1927  
	1928  func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
	1929  	if s == nil {
	1930  		return nil
	1931  	}
	1932  	if es, isExpr := s.(*ast.ExprStmt); isExpr {
	1933  		return p.checkExpr(es.X)
	1934  	}
	1935  	found := "simple statement"
	1936  	if _, isAss := s.(*ast.AssignStmt); isAss {
	1937  		found = "assignment"
	1938  	}
	1939  	p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
	1940  	return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
	1941  }
	1942  
	1943  // parseIfHeader is an adjusted version of parser.header
	1944  // in cmd/compile/internal/syntax/parser.go, which has
	1945  // been tuned for better error handling.
	1946  func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
	1947  	if p.tok == token.LBRACE {
	1948  		p.error(p.pos, "missing condition in if statement")
	1949  		cond = &ast.BadExpr{From: p.pos, To: p.pos}
	1950  		return
	1951  	}
	1952  	// p.tok != token.LBRACE
	1953  
	1954  	prevLev := p.exprLev
	1955  	p.exprLev = -1
	1956  
	1957  	if p.tok != token.SEMICOLON {
	1958  		// accept potential variable declaration but complain
	1959  		if p.tok == token.VAR {
	1960  			p.next()
	1961  			p.error(p.pos, "var declaration not allowed in 'IF' initializer")
	1962  		}
	1963  		init, _ = p.parseSimpleStmt(basic)
	1964  	}
	1965  
	1966  	var condStmt ast.Stmt
	1967  	var semi struct {
	1968  		pos token.Pos
	1969  		lit string // ";" or "\n"; valid if pos.IsValid()
	1970  	}
	1971  	if p.tok != token.LBRACE {
	1972  		if p.tok == token.SEMICOLON {
	1973  			semi.pos = p.pos
	1974  			semi.lit = p.lit
	1975  			p.next()
	1976  		} else {
	1977  			p.expect(token.SEMICOLON)
	1978  		}
	1979  		if p.tok != token.LBRACE {
	1980  			condStmt, _ = p.parseSimpleStmt(basic)
	1981  		}
	1982  	} else {
	1983  		condStmt = init
	1984  		init = nil
	1985  	}
	1986  
	1987  	if condStmt != nil {
	1988  		cond = p.makeExpr(condStmt, "boolean expression")
	1989  	} else if semi.pos.IsValid() {
	1990  		if semi.lit == "\n" {
	1991  			p.error(semi.pos, "unexpected newline, expecting { after if clause")
	1992  		} else {
	1993  			p.error(semi.pos, "missing condition in if statement")
	1994  		}
	1995  	}
	1996  
	1997  	// make sure we have a valid AST
	1998  	if cond == nil {
	1999  		cond = &ast.BadExpr{From: p.pos, To: p.pos}
	2000  	}
	2001  
	2002  	p.exprLev = prevLev
	2003  	return
	2004  }
	2005  
	2006  func (p *parser) parseIfStmt() *ast.IfStmt {
	2007  	defer decNestLev(incNestLev(p))
	2008  
	2009  	if p.trace {
	2010  		defer un(trace(p, "IfStmt"))
	2011  	}
	2012  
	2013  	pos := p.expect(token.IF)
	2014  
	2015  	init, cond := p.parseIfHeader()
	2016  	body := p.parseBlockStmt()
	2017  
	2018  	var else_ ast.Stmt
	2019  	if p.tok == token.ELSE {
	2020  		p.next()
	2021  		switch p.tok {
	2022  		case token.IF:
	2023  			else_ = p.parseIfStmt()
	2024  		case token.LBRACE:
	2025  			else_ = p.parseBlockStmt()
	2026  			p.expectSemi()
	2027  		default:
	2028  			p.errorExpected(p.pos, "if statement or block")
	2029  			else_ = &ast.BadStmt{From: p.pos, To: p.pos}
	2030  		}
	2031  	} else {
	2032  		p.expectSemi()
	2033  	}
	2034  
	2035  	return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
	2036  }
	2037  
	2038  func (p *parser) parseTypeList() (list []ast.Expr) {
	2039  	if p.trace {
	2040  		defer un(trace(p, "TypeList"))
	2041  	}
	2042  
	2043  	list = append(list, p.parseType())
	2044  	for p.tok == token.COMMA {
	2045  		p.next()
	2046  		list = append(list, p.parseType())
	2047  	}
	2048  
	2049  	return
	2050  }
	2051  
	2052  func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
	2053  	if p.trace {
	2054  		defer un(trace(p, "CaseClause"))
	2055  	}
	2056  
	2057  	pos := p.pos
	2058  	var list []ast.Expr
	2059  	if p.tok == token.CASE {
	2060  		p.next()
	2061  		if typeSwitch {
	2062  			list = p.parseTypeList()
	2063  		} else {
	2064  			list = p.parseList(true)
	2065  		}
	2066  	} else {
	2067  		p.expect(token.DEFAULT)
	2068  	}
	2069  
	2070  	colon := p.expect(token.COLON)
	2071  	body := p.parseStmtList()
	2072  
	2073  	return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
	2074  }
	2075  
	2076  func isTypeSwitchAssert(x ast.Expr) bool {
	2077  	a, ok := x.(*ast.TypeAssertExpr)
	2078  	return ok && a.Type == nil
	2079  }
	2080  
	2081  func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
	2082  	switch t := s.(type) {
	2083  	case *ast.ExprStmt:
	2084  		// x.(type)
	2085  		return isTypeSwitchAssert(t.X)
	2086  	case *ast.AssignStmt:
	2087  		// v := x.(type)
	2088  		if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
	2089  			switch t.Tok {
	2090  			case token.ASSIGN:
	2091  				// permit v = x.(type) but complain
	2092  				p.error(t.TokPos, "expected ':=', found '='")
	2093  				fallthrough
	2094  			case token.DEFINE:
	2095  				return true
	2096  			}
	2097  		}
	2098  	}
	2099  	return false
	2100  }
	2101  
	2102  func (p *parser) parseSwitchStmt() ast.Stmt {
	2103  	if p.trace {
	2104  		defer un(trace(p, "SwitchStmt"))
	2105  	}
	2106  
	2107  	pos := p.expect(token.SWITCH)
	2108  
	2109  	var s1, s2 ast.Stmt
	2110  	if p.tok != token.LBRACE {
	2111  		prevLev := p.exprLev
	2112  		p.exprLev = -1
	2113  		if p.tok != token.SEMICOLON {
	2114  			s2, _ = p.parseSimpleStmt(basic)
	2115  		}
	2116  		if p.tok == token.SEMICOLON {
	2117  			p.next()
	2118  			s1 = s2
	2119  			s2 = nil
	2120  			if p.tok != token.LBRACE {
	2121  				// A TypeSwitchGuard may declare a variable in addition
	2122  				// to the variable declared in the initial SimpleStmt.
	2123  				// Introduce extra scope to avoid redeclaration errors:
	2124  				//
	2125  				//	switch t := 0; t := x.(T) { ... }
	2126  				//
	2127  				// (this code is not valid Go because the first t
	2128  				// cannot be accessed and thus is never used, the extra
	2129  				// scope is needed for the correct error message).
	2130  				//
	2131  				// If we don't have a type switch, s2 must be an expression.
	2132  				// Having the extra nested but empty scope won't affect it.
	2133  				s2, _ = p.parseSimpleStmt(basic)
	2134  			}
	2135  		}
	2136  		p.exprLev = prevLev
	2137  	}
	2138  
	2139  	typeSwitch := p.isTypeSwitchGuard(s2)
	2140  	lbrace := p.expect(token.LBRACE)
	2141  	var list []ast.Stmt
	2142  	for p.tok == token.CASE || p.tok == token.DEFAULT {
	2143  		list = append(list, p.parseCaseClause(typeSwitch))
	2144  	}
	2145  	rbrace := p.expect(token.RBRACE)
	2146  	p.expectSemi()
	2147  	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
	2148  
	2149  	if typeSwitch {
	2150  		return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
	2151  	}
	2152  
	2153  	return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
	2154  }
	2155  
	2156  func (p *parser) parseCommClause() *ast.CommClause {
	2157  	if p.trace {
	2158  		defer un(trace(p, "CommClause"))
	2159  	}
	2160  
	2161  	pos := p.pos
	2162  	var comm ast.Stmt
	2163  	if p.tok == token.CASE {
	2164  		p.next()
	2165  		lhs := p.parseList(false)
	2166  		if p.tok == token.ARROW {
	2167  			// SendStmt
	2168  			if len(lhs) > 1 {
	2169  				p.errorExpected(lhs[0].Pos(), "1 expression")
	2170  				// continue with first expression
	2171  			}
	2172  			arrow := p.pos
	2173  			p.next()
	2174  			rhs := p.parseRhs()
	2175  			comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
	2176  		} else {
	2177  			// RecvStmt
	2178  			if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
	2179  				// RecvStmt with assignment
	2180  				if len(lhs) > 2 {
	2181  					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
	2182  					// continue with first two expressions
	2183  					lhs = lhs[0:2]
	2184  				}
	2185  				pos := p.pos
	2186  				p.next()
	2187  				rhs := p.parseRhs()
	2188  				as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
	2189  				if tok == token.DEFINE {
	2190  					p.checkAssignStmt(as)
	2191  				}
	2192  				comm = as
	2193  			} else {
	2194  				// lhs must be single receive operation
	2195  				if len(lhs) > 1 {
	2196  					p.errorExpected(lhs[0].Pos(), "1 expression")
	2197  					// continue with first expression
	2198  				}
	2199  				comm = &ast.ExprStmt{X: lhs[0]}
	2200  			}
	2201  		}
	2202  	} else {
	2203  		p.expect(token.DEFAULT)
	2204  	}
	2205  
	2206  	colon := p.expect(token.COLON)
	2207  	body := p.parseStmtList()
	2208  
	2209  	return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
	2210  }
	2211  
	2212  func (p *parser) parseSelectStmt() *ast.SelectStmt {
	2213  	if p.trace {
	2214  		defer un(trace(p, "SelectStmt"))
	2215  	}
	2216  
	2217  	pos := p.expect(token.SELECT)
	2218  	lbrace := p.expect(token.LBRACE)
	2219  	var list []ast.Stmt
	2220  	for p.tok == token.CASE || p.tok == token.DEFAULT {
	2221  		list = append(list, p.parseCommClause())
	2222  	}
	2223  	rbrace := p.expect(token.RBRACE)
	2224  	p.expectSemi()
	2225  	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
	2226  
	2227  	return &ast.SelectStmt{Select: pos, Body: body}
	2228  }
	2229  
	2230  func (p *parser) parseForStmt() ast.Stmt {
	2231  	if p.trace {
	2232  		defer un(trace(p, "ForStmt"))
	2233  	}
	2234  
	2235  	pos := p.expect(token.FOR)
	2236  
	2237  	var s1, s2, s3 ast.Stmt
	2238  	var isRange bool
	2239  	if p.tok != token.LBRACE {
	2240  		prevLev := p.exprLev
	2241  		p.exprLev = -1
	2242  		if p.tok != token.SEMICOLON {
	2243  			if p.tok == token.RANGE {
	2244  				// "for range x" (nil lhs in assignment)
	2245  				pos := p.pos
	2246  				p.next()
	2247  				y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
	2248  				s2 = &ast.AssignStmt{Rhs: y}
	2249  				isRange = true
	2250  			} else {
	2251  				s2, isRange = p.parseSimpleStmt(rangeOk)
	2252  			}
	2253  		}
	2254  		if !isRange && p.tok == token.SEMICOLON {
	2255  			p.next()
	2256  			s1 = s2
	2257  			s2 = nil
	2258  			if p.tok != token.SEMICOLON {
	2259  				s2, _ = p.parseSimpleStmt(basic)
	2260  			}
	2261  			p.expectSemi()
	2262  			if p.tok != token.LBRACE {
	2263  				s3, _ = p.parseSimpleStmt(basic)
	2264  			}
	2265  		}
	2266  		p.exprLev = prevLev
	2267  	}
	2268  
	2269  	body := p.parseBlockStmt()
	2270  	p.expectSemi()
	2271  
	2272  	if isRange {
	2273  		as := s2.(*ast.AssignStmt)
	2274  		// check lhs
	2275  		var key, value ast.Expr
	2276  		switch len(as.Lhs) {
	2277  		case 0:
	2278  			// nothing to do
	2279  		case 1:
	2280  			key = as.Lhs[0]
	2281  		case 2:
	2282  			key, value = as.Lhs[0], as.Lhs[1]
	2283  		default:
	2284  			p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
	2285  			return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
	2286  		}
	2287  		// parseSimpleStmt returned a right-hand side that
	2288  		// is a single unary expression of the form "range x"
	2289  		x := as.Rhs[0].(*ast.UnaryExpr).X
	2290  		return &ast.RangeStmt{
	2291  			For:		pos,
	2292  			Key:		key,
	2293  			Value:	value,
	2294  			TokPos: as.TokPos,
	2295  			Tok:		as.Tok,
	2296  			X:			x,
	2297  			Body:	 body,
	2298  		}
	2299  	}
	2300  
	2301  	// regular for statement
	2302  	return &ast.ForStmt{
	2303  		For:	pos,
	2304  		Init: s1,
	2305  		Cond: p.makeExpr(s2, "boolean or range expression"),
	2306  		Post: s3,
	2307  		Body: body,
	2308  	}
	2309  }
	2310  
	2311  func (p *parser) parseStmt() (s ast.Stmt) {
	2312  	defer decNestLev(incNestLev(p))
	2313  
	2314  	if p.trace {
	2315  		defer un(trace(p, "Statement"))
	2316  	}
	2317  
	2318  	switch p.tok {
	2319  	case token.CONST, token.TYPE, token.VAR:
	2320  		s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
	2321  	case
	2322  		// tokens that may start an expression
	2323  		token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
	2324  		token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
	2325  		token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
	2326  		s, _ = p.parseSimpleStmt(labelOk)
	2327  		// because of the required look-ahead, labeled statements are
	2328  		// parsed by parseSimpleStmt - don't expect a semicolon after
	2329  		// them
	2330  		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
	2331  			p.expectSemi()
	2332  		}
	2333  	case token.GO:
	2334  		s = p.parseGoStmt()
	2335  	case token.DEFER:
	2336  		s = p.parseDeferStmt()
	2337  	case token.RETURN:
	2338  		s = p.parseReturnStmt()
	2339  	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
	2340  		s = p.parseBranchStmt(p.tok)
	2341  	case token.LBRACE:
	2342  		s = p.parseBlockStmt()
	2343  		p.expectSemi()
	2344  	case token.IF:
	2345  		s = p.parseIfStmt()
	2346  	case token.SWITCH:
	2347  		s = p.parseSwitchStmt()
	2348  	case token.SELECT:
	2349  		s = p.parseSelectStmt()
	2350  	case token.FOR:
	2351  		s = p.parseForStmt()
	2352  	case token.SEMICOLON:
	2353  		// Is it ever possible to have an implicit semicolon
	2354  		// producing an empty statement in a valid program?
	2355  		// (handle correctly anyway)
	2356  		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
	2357  		p.next()
	2358  	case token.RBRACE:
	2359  		// a semicolon may be omitted before a closing "}"
	2360  		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
	2361  	default:
	2362  		// no statement found
	2363  		pos := p.pos
	2364  		p.errorExpected(pos, "statement")
	2365  		p.advance(stmtStart)
	2366  		s = &ast.BadStmt{From: pos, To: p.pos}
	2367  	}
	2368  
	2369  	return
	2370  }
	2371  
	2372  // ----------------------------------------------------------------------------
	2373  // Declarations
	2374  
	2375  type parseSpecFunction func(doc *ast.CommentGroup, pos token.Pos, keyword token.Token, iota int) ast.Spec
	2376  
	2377  func isValidImport(lit string) bool {
	2378  	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
	2379  	s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
	2380  	for _, r := range s {
	2381  		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
	2382  			return false
	2383  		}
	2384  	}
	2385  	return s != ""
	2386  }
	2387  
	2388  func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
	2389  	if p.trace {
	2390  		defer un(trace(p, "ImportSpec"))
	2391  	}
	2392  
	2393  	var ident *ast.Ident
	2394  	switch p.tok {
	2395  	case token.PERIOD:
	2396  		ident = &ast.Ident{NamePos: p.pos, Name: "."}
	2397  		p.next()
	2398  	case token.IDENT:
	2399  		ident = p.parseIdent()
	2400  	}
	2401  
	2402  	pos := p.pos
	2403  	var path string
	2404  	if p.tok == token.STRING {
	2405  		path = p.lit
	2406  		if !isValidImport(path) {
	2407  			p.error(pos, "invalid import path: "+path)
	2408  		}
	2409  		p.next()
	2410  	} else {
	2411  		p.expect(token.STRING) // use expect() error handling
	2412  	}
	2413  	p.expectSemi() // call before accessing p.linecomment
	2414  
	2415  	// collect imports
	2416  	spec := &ast.ImportSpec{
	2417  		Doc:		 doc,
	2418  		Name:		ident,
	2419  		Path:		&ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
	2420  		Comment: p.lineComment,
	2421  	}
	2422  	p.imports = append(p.imports, spec)
	2423  
	2424  	return spec
	2425  }
	2426  
	2427  func (p *parser) parseValueSpec(doc *ast.CommentGroup, _ token.Pos, keyword token.Token, iota int) ast.Spec {
	2428  	if p.trace {
	2429  		defer un(trace(p, keyword.String()+"Spec"))
	2430  	}
	2431  
	2432  	pos := p.pos
	2433  	idents := p.parseIdentList()
	2434  	typ := p.tryIdentOrType()
	2435  	var values []ast.Expr
	2436  	// always permit optional initialization for more tolerant parsing
	2437  	if p.tok == token.ASSIGN {
	2438  		p.next()
	2439  		values = p.parseList(true)
	2440  	}
	2441  	p.expectSemi() // call before accessing p.linecomment
	2442  
	2443  	switch keyword {
	2444  	case token.VAR:
	2445  		if typ == nil && values == nil {
	2446  			p.error(pos, "missing variable type or initialization")
	2447  		}
	2448  	case token.CONST:
	2449  		if values == nil && (iota == 0 || typ != nil) {
	2450  			p.error(pos, "missing constant value")
	2451  		}
	2452  	}
	2453  
	2454  	spec := &ast.ValueSpec{
	2455  		Doc:		 doc,
	2456  		Names:	 idents,
	2457  		Type:		typ,
	2458  		Values:	values,
	2459  		Comment: p.lineComment,
	2460  	}
	2461  	return spec
	2462  }
	2463  
	2464  func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, closeTok token.Token) {
	2465  	list := p.parseParameterList(name0, closeTok, p.parseParamDecl, true)
	2466  	closePos := p.expect(closeTok)
	2467  	typeparams.Set(spec, &ast.FieldList{Opening: openPos, List: list, Closing: closePos})
	2468  	// Type alias cannot have type parameters. Accept them for robustness but complain.
	2469  	if p.tok == token.ASSIGN {
	2470  		p.error(p.pos, "generic type cannot be alias")
	2471  		p.next()
	2472  	}
	2473  	spec.Type = p.parseType()
	2474  }
	2475  
	2476  func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
	2477  	if p.trace {
	2478  		defer un(trace(p, "TypeSpec"))
	2479  	}
	2480  
	2481  	ident := p.parseIdent()
	2482  	spec := &ast.TypeSpec{Doc: doc, Name: ident}
	2483  
	2484  	switch p.tok {
	2485  	case token.LBRACK:
	2486  		lbrack := p.pos
	2487  		p.next()
	2488  		if p.tok == token.IDENT {
	2489  			// array type or generic type [T any]
	2490  			p.exprLev++
	2491  			x := p.parseExpr()
	2492  			p.exprLev--
	2493  			if name0, _ := x.(*ast.Ident); p.parseTypeParams() && name0 != nil && p.tok != token.RBRACK {
	2494  				// generic type [T any];
	2495  				p.parseGenericType(spec, lbrack, name0, token.RBRACK)
	2496  			} else {
	2497  				// array type
	2498  				// TODO(rfindley) should resolve all identifiers in x.
	2499  				p.expect(token.RBRACK)
	2500  				elt := p.parseType()
	2501  				spec.Type = &ast.ArrayType{Lbrack: lbrack, Len: x, Elt: elt}
	2502  			}
	2503  		} else {
	2504  			// array type
	2505  			alen := p.parseArrayLen()
	2506  			p.expect(token.RBRACK)
	2507  			elt := p.parseType()
	2508  			spec.Type = &ast.ArrayType{Lbrack: lbrack, Len: alen, Elt: elt}
	2509  		}
	2510  
	2511  	default:
	2512  		// no type parameters
	2513  		if p.tok == token.ASSIGN {
	2514  			// type alias
	2515  			spec.Assign = p.pos
	2516  			p.next()
	2517  		}
	2518  		spec.Type = p.parseType()
	2519  	}
	2520  
	2521  	p.expectSemi() // call before accessing p.linecomment
	2522  	spec.Comment = p.lineComment
	2523  
	2524  	return spec
	2525  }
	2526  
	2527  func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
	2528  	if p.trace {
	2529  		defer un(trace(p, "GenDecl("+keyword.String()+")"))
	2530  	}
	2531  
	2532  	doc := p.leadComment
	2533  	pos := p.expect(keyword)
	2534  	var lparen, rparen token.Pos
	2535  	var list []ast.Spec
	2536  	if p.tok == token.LPAREN {
	2537  		lparen = p.pos
	2538  		p.next()
	2539  		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
	2540  			list = append(list, f(p.leadComment, pos, keyword, iota))
	2541  		}
	2542  		rparen = p.expect(token.RPAREN)
	2543  		p.expectSemi()
	2544  	} else {
	2545  		list = append(list, f(nil, pos, keyword, 0))
	2546  	}
	2547  
	2548  	return &ast.GenDecl{
	2549  		Doc:		doc,
	2550  		TokPos: pos,
	2551  		Tok:		keyword,
	2552  		Lparen: lparen,
	2553  		Specs:	list,
	2554  		Rparen: rparen,
	2555  	}
	2556  }
	2557  
	2558  func (p *parser) parseFuncDecl() *ast.FuncDecl {
	2559  	if p.trace {
	2560  		defer un(trace(p, "FunctionDecl"))
	2561  	}
	2562  
	2563  	doc := p.leadComment
	2564  	pos := p.expect(token.FUNC)
	2565  
	2566  	var recv *ast.FieldList
	2567  	if p.tok == token.LPAREN {
	2568  		_, recv = p.parseParameters(false)
	2569  	}
	2570  
	2571  	ident := p.parseIdent()
	2572  
	2573  	tparams, params := p.parseParameters(true)
	2574  	results := p.parseResult()
	2575  
	2576  	var body *ast.BlockStmt
	2577  	if p.tok == token.LBRACE {
	2578  		body = p.parseBody()
	2579  		p.expectSemi()
	2580  	} else if p.tok == token.SEMICOLON {
	2581  		p.next()
	2582  		if p.tok == token.LBRACE {
	2583  			// opening { of function declaration on next line
	2584  			p.error(p.pos, "unexpected semicolon or newline before {")
	2585  			body = p.parseBody()
	2586  			p.expectSemi()
	2587  		}
	2588  	} else {
	2589  		p.expectSemi()
	2590  	}
	2591  
	2592  	decl := &ast.FuncDecl{
	2593  		Doc:	doc,
	2594  		Recv: recv,
	2595  		Name: ident,
	2596  		Type: &ast.FuncType{
	2597  			Func:		pos,
	2598  			Params:	params,
	2599  			Results: results,
	2600  		},
	2601  		Body: body,
	2602  	}
	2603  	typeparams.Set(decl.Type, tparams)
	2604  	return decl
	2605  }
	2606  
	2607  func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
	2608  	if p.trace {
	2609  		defer un(trace(p, "Declaration"))
	2610  	}
	2611  
	2612  	var f parseSpecFunction
	2613  	switch p.tok {
	2614  	case token.CONST, token.VAR:
	2615  		f = p.parseValueSpec
	2616  
	2617  	case token.TYPE:
	2618  		f = p.parseTypeSpec
	2619  
	2620  	case token.FUNC:
	2621  		return p.parseFuncDecl()
	2622  
	2623  	default:
	2624  		pos := p.pos
	2625  		p.errorExpected(pos, "declaration")
	2626  		p.advance(sync)
	2627  		return &ast.BadDecl{From: pos, To: p.pos}
	2628  	}
	2629  
	2630  	return p.parseGenDecl(p.tok, f)
	2631  }
	2632  
	2633  // ----------------------------------------------------------------------------
	2634  // Source files
	2635  
	2636  func (p *parser) parseFile() *ast.File {
	2637  	if p.trace {
	2638  		defer un(trace(p, "File"))
	2639  	}
	2640  
	2641  	// Don't bother parsing the rest if we had errors scanning the first token.
	2642  	// Likely not a Go source file at all.
	2643  	if p.errors.Len() != 0 {
	2644  		return nil
	2645  	}
	2646  
	2647  	// package clause
	2648  	doc := p.leadComment
	2649  	pos := p.expect(token.PACKAGE)
	2650  	// Go spec: The package clause is not a declaration;
	2651  	// the package name does not appear in any scope.
	2652  	ident := p.parseIdent()
	2653  	if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
	2654  		p.error(p.pos, "invalid package name _")
	2655  	}
	2656  	p.expectSemi()
	2657  
	2658  	// Don't bother parsing the rest if we had errors parsing the package clause.
	2659  	// Likely not a Go source file at all.
	2660  	if p.errors.Len() != 0 {
	2661  		return nil
	2662  	}
	2663  
	2664  	var decls []ast.Decl
	2665  	if p.mode&PackageClauseOnly == 0 {
	2666  		// import decls
	2667  		for p.tok == token.IMPORT {
	2668  			decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
	2669  		}
	2670  
	2671  		if p.mode&ImportsOnly == 0 {
	2672  			// rest of package body
	2673  			for p.tok != token.EOF {
	2674  				decls = append(decls, p.parseDecl(declStart))
	2675  			}
	2676  		}
	2677  	}
	2678  
	2679  	f := &ast.File{
	2680  		Doc:			doc,
	2681  		Package:	pos,
	2682  		Name:		 ident,
	2683  		Decls:		decls,
	2684  		Imports:	p.imports,
	2685  		Comments: p.comments,
	2686  	}
	2687  	var declErr func(token.Pos, string)
	2688  	if p.mode&DeclarationErrors != 0 {
	2689  		declErr = p.error
	2690  	}
	2691  	if p.mode&SkipObjectResolution == 0 {
	2692  		resolveFile(f, p.file, declErr)
	2693  	}
	2694  
	2695  	return f
	2696  }
	2697  

View as plain text