cue/parser: add package

also exports DebugStr in internal package

Change-Id: I61b4099d8cd0aa9a471bd8343f6b369feb2ba736
diff --git a/cue/parser/doc.go b/cue/parser/doc.go
new file mode 100644
index 0000000..adde139
--- /dev/null
+++ b/cue/parser/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parser implements a parser for CUE source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the output
+// is an abstract syntax tree (AST) representing the CUE source. The parser is
+// invoked through one of the Parse* functions.
+//
+// The parser accepts a larger language than is syntactically permitted by the
+// CUE spec, for simplicity, and for improved robustness in the presence of
+// syntax errors.
+package parser // import "cuelang.org/go/cue/parser"
diff --git a/cue/parser/error_test.go b/cue/parser/error_test.go
new file mode 100644
index 0000000..376d343
--- /dev/null
+++ b/cue/parser/error_test.go
@@ -0,0 +1,198 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements a parser test harness. The files in the testdata
+// directory are parsed and the errors reported are compared against the
+// error messages expected in the test files. The test files must end in
+// .src rather than .go so that they are not disturbed by gofmt runs.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+//	package p
+//	{
+//		a = x /* ERROR "not declared" */ + 1
+//	}
+
+package parser
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"testing"
+
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/scanner"
+	"cuelang.org/go/cue/token"
+)
+
+const testdata = "testdata"
+
+// getFile assumes that each filename occurs at most once
+func getFile(fset *token.FileSet, filename string) (info *token.File) {
+	fset.Iterate(func(f *token.File) bool {
+		if f.Name() == filename {
+			if info != nil {
+				panic(filename + " used multiple times")
+			}
+			info = f
+		}
+		return true
+	})
+	return info
+}
+
+func getPos(fset *token.FileSet, filename string, offset int) token.Pos {
+	if f := getFile(fset, filename); f != nil {
+		return f.Pos(offset, 0)
+	}
+	return token.NoPos
+}
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+// The special form /* ERROR HERE "rx" */ must be used for error
+// messages that appear immediately after a token, rather than at
+// a token's position.
+//
+var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`)
+
+// expectedErrors collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func expectedErrors(t *testing.T, fset *token.FileSet, filename string, src []byte) map[token.Pos]string {
+	errors := make(map[token.Pos]string)
+
+	var s scanner.Scanner
+	// file was parsed already - do not add it again to the file
+	// set otherwise the position information returned here will
+	// not match the position information collected by the parser
+	s.Init(getFile(fset, filename), src, nil, scanner.ScanComments)
+	var prev token.Pos // position of last non-comment, non-semicolon token
+	var here token.Pos // position immediately after the token at position prev
+
+	for {
+		pos, tok, lit := s.Scan()
+		pos = pos.WithRel(0)
+		switch tok {
+		case token.EOF:
+			return errors
+		case token.COMMENT:
+			s := errRx.FindStringSubmatch(lit)
+			if len(s) == 3 {
+				pos := prev
+				if s[1] == "HERE" {
+					pos = here
+				}
+				errors[pos] = string(s[2])
+			}
+		default:
+			prev = pos
+			var l int // token length
+			if tok.IsLiteral() {
+				l = len(lit)
+			} else {
+				l = len(tok.String())
+			}
+			here = prev + token.Pos(l)
+		}
+	}
+}
+
+// compareErrors compares the map of expected error messages with the list
+// of found errors and reports discrepancies.
+//
+func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found errors.List) {
+	t.Helper()
+	for _, error := range found {
+		// error.Pos is a Position, but we want
+		// a Pos so we can do a map lookup
+		ePos := error.Position()
+		eMsg := error.Error()
+		pos := getPos(fset, ePos.Filename, ePos.Offset).WithRel(0)
+		if msg, found := expected[pos]; found {
+			// we expect a message at pos; check if it matches
+			rx, err := regexp.Compile(msg)
+			if err != nil {
+				t.Errorf("%s: %v", ePos, err)
+				continue
+			}
+			if match := rx.MatchString(eMsg); !match {
+				t.Errorf("%s: %q does not match %q", ePos, eMsg, msg)
+				continue
+			}
+			// we have a match - eliminate this error
+			delete(expected, pos)
+		} else {
+			// To keep in mind when analyzing failed test output:
+			// If the same error position occurs multiple times in errors,
+			// this message will be triggered (because the first error at
+			// the position removes this position from the expected errors).
+			t.Errorf("%s: unexpected error: -%q-", ePos, eMsg)
+		}
+	}
+
+	// there should be no expected errors left
+	if len(expected) > 0 {
+		t.Errorf("%d errors not reported:", len(expected))
+		for pos, msg := range expected {
+			t.Errorf("%s: -%q-\n", fset.Position(pos), msg)
+		}
+	}
+}
+
+func checkErrors(t *testing.T, filename string, input interface{}) {
+	t.Helper()
+	src, err := readSource(filename, input)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	fset := token.NewFileSet()
+	_, err = ParseFile(fset, filename, src, DeclarationErrors, AllErrors, ParseLambdas)
+	found, ok := err.(errors.List)
+	if err != nil && !ok {
+		t.Error(err)
+		return
+	}
+	found.RemoveMultiples()
+
+	// we are expecting the following errors
+	// (collect these after parsing a file so that it is found in the file set)
+	expected := expectedErrors(t, fset, filename, src)
+
+	// verify errors returned by the parser
+	compareErrors(t, fset, expected, found)
+}
+
+func TestErrors(t *testing.T) {
+	list, err := ioutil.ReadDir(testdata)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for _, fi := range list {
+		name := fi.Name()
+		if !fi.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".src") {
+			checkErrors(t, filepath.Join(testdata, name), nil)
+		}
+	}
+}
diff --git a/cue/parser/example_test.go b/cue/parser/example_test.go
new file mode 100644
index 0000000..195da10
--- /dev/null
+++ b/cue/parser/example_test.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser_test
+
+import (
+	"fmt"
+
+	"cuelang.org/go/cue/parser"
+	"cuelang.org/go/cue/token"
+)
+
+func ExampleParseFile() {
+	fset := token.NewFileSet() // positions are relative to fset
+
+	// Parse the file containing this very example
+	// but stop after processing the imports.
+	f, err := parser.ParseFile(fset, "testdata/test.cue", nil)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+
+	// Print the imports from the file's AST.
+	for _, s := range f.Imports {
+		fmt.Println(s.Path.Value)
+	}
+	// Output:
+	// "math"
+}
diff --git a/cue/parser/import.go b/cue/parser/import.go
new file mode 100644
index 0000000..ce95246
--- /dev/null
+++ b/cue/parser/import.go
@@ -0,0 +1,169 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"sort"
+	"strconv"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+// sortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data loss.
+func sortImports(fset *token.FileSet, f *ast.File) {
+	for _, d := range f.Decls {
+		d, ok := d.(*ast.ImportDecl)
+		if !ok {
+			// Not an import declaration, so we're done.
+			// Imports are always first.
+			break
+		}
+
+		if !d.Lparen.IsValid() {
+			// Not a block: sorted by default.
+			continue
+		}
+
+		// Identify and sort runs of specs on successive lines.
+		i := 0
+		specs := d.Specs[:0]
+		for j, s := range d.Specs {
+			if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+				// j begins a new run. End this one.
+				specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
+				i = j
+			}
+		}
+		specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
+		d.Specs = specs
+
+		// Deduping can leave a blank line before the rparen; clean that up.
+		if len(d.Specs) > 0 {
+			lastSpec := d.Specs[len(d.Specs)-1]
+			lastLine := fset.Position(lastSpec.Pos()).Line
+			rParenLine := fset.Position(d.Rparen).Line
+			for rParenLine > lastLine+1 {
+				rParenLine--
+				fset.File(d.Rparen).MergeLine(rParenLine)
+			}
+		}
+	}
+}
+
+func importPath(s *ast.ImportSpec) string {
+	t, err := strconv.Unquote(s.Path.Value)
+	if err == nil {
+		return t
+	}
+	return ""
+}
+
+func importName(s *ast.ImportSpec) string {
+	n := s.Name
+	if n == nil {
+		return ""
+	}
+	return n.Name
+}
+
+func importComment(s *ast.ImportSpec) string {
+	for _, c := range s.Comments() {
+		if c.Line {
+			return c.Text()
+		}
+	}
+	return ""
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next *ast.ImportSpec) bool {
+	if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+		return false
+	}
+	for _, c := range prev.Comments() {
+		if !c.Doc {
+			return false
+		}
+	}
+	return true
+}
+
+type posSpan struct {
+	Start token.Pos
+	End   token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *ast.File, specs []*ast.ImportSpec) []*ast.ImportSpec {
+	// Can't short-circuit here even if specs are already sorted,
+	// since they might yet need deduplication.
+	// A lone import, however, may be safely ignored.
+	if len(specs) <= 1 {
+		return specs
+	}
+
+	// Record positions for specs.
+	pos := make([]posSpan, len(specs))
+	for i, s := range specs {
+		pos[i] = posSpan{s.Pos(), s.End()}
+	}
+
+	// Sort the import specs by import path.
+	// Remove duplicates, when possible without data loss.
+	// Reassign the import paths to have the same position sequence.
+	// Reassign each comment to abut the end of its spec.
+	// Sort the comments by new position.
+	sort.Sort(byImportSpec(specs))
+
+	// Dedup. Thanks to our sorting, we can just consider
+	// adjacent pairs of imports.
+	deduped := specs[:0]
+	for i, s := range specs {
+		if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+			deduped = append(deduped, s)
+		} else {
+			p := s.Pos()
+			fset.File(p).MergeLine(fset.Position(p).Line)
+		}
+	}
+	specs = deduped
+
+	return specs
+}
+
+type byImportSpec []*ast.ImportSpec
+
+func (x byImportSpec) Len() int      { return len(x) }
+func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportSpec) Less(i, j int) bool {
+	ipath := importPath(x[i])
+	jpath := importPath(x[j])
+	if ipath != jpath {
+		return ipath < jpath
+	}
+	iname := importName(x[i])
+	jname := importName(x[j])
+	if iname != jname {
+		return iname < jname
+	}
+	return importComment(x[i]) < importComment(x[j])
+}
+
+type byCommentPos []*ast.CommentGroup
+
+func (x byCommentPos) Len() int           { return len(x) }
+func (x byCommentPos) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
diff --git a/cue/parser/interface.go b/cue/parser/interface.go
new file mode 100644
index 0000000..140f140
--- /dev/null
+++ b/cue/parser/interface.go
@@ -0,0 +1,250 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains the exported entry points for invoking the
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+// If src != nil, readSource converts src to a []byte if possible;
+// otherwise it returns an error. If src == nil, readSource returns
+// the result of reading the file specified by filename.
+//
+func readSource(filename string, src interface{}) ([]byte, error) {
+	if src != nil {
+		switch s := src.(type) {
+		case string:
+			return []byte(s), nil
+		case []byte:
+			return s, nil
+		case *bytes.Buffer:
+			// is io.Reader, but src is already available in []byte form
+			if s != nil {
+				return s.Bytes(), nil
+			}
+		case io.Reader:
+			var buf bytes.Buffer
+			if _, err := io.Copy(&buf, s); err != nil {
+				return nil, err
+			}
+			return buf.Bytes(), nil
+		}
+		return nil, fmt.Errorf("invalid source type %T", src)
+	}
+	return ioutil.ReadFile(filename)
+}
+
+type Option func(p *parser)
+
+var (
+	// PackageClauseOnly causes parsing to stop after the package clause.
+	PackageClauseOnly Option = packageClauseOnly
+	packageClauseOnly        = func(p *parser) {
+		p.mode |= packageClauseOnlyMode
+	}
+
+	// ImportsOnly causes parsing to stop parsing after the import declarations.
+	ImportsOnly Option = importsOnly
+	importsOnly        = func(p *parser) {
+		p.mode |= importsOnlyMode
+	}
+
+	// ParseComments causes comments to be parsed.
+	ParseComments Option = parseComments
+	parseComments        = func(p *parser) {
+		p.mode |= parseCommentsMode
+	}
+
+	// ParseLambdas enables parsing of Lambdas. By default these are disabled.
+	//
+	// NOTE: this option is for internal use only and can be made unavailable at
+	// any time.
+	ParseLambdas Option = parseLambdas
+	parseLambdas        = func(p *parser) {
+		p.mode |= parseLambdasMode
+	}
+
+	// Trace causes parsing to print a trace of parsed productions.
+	Trace    Option = traceOpt
+	traceOpt        = func(p *parser) {
+		p.mode |= traceMode
+	}
+
+	// DeclarationErrors causes parsing to report declaration errors.
+	DeclarationErrors Option = declarationErrors
+	declarationErrors        = func(p *parser) {
+		p.mode |= declarationErrorsMode
+	}
+
+	// AllErrors causes all errors to be reported (not just the first 10 on different lines).
+	AllErrors Option = allErrors
+	allErrors        = func(p *parser) {
+		p.mode |= allErrorsMode
+	}
+
+	// AllowPartial allows the parser to be used on a prefix buffer.
+	AllowPartial Option = allowPartial
+	allowPartial        = func(p *parser) {
+		p.mode |= partialMode
+	}
+)
+
+// A mode value is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+type mode uint
+
+const (
+	packageClauseOnlyMode mode = 1 << iota // stop parsing after package clause
+	importsOnlyMode                        // stop parsing after import declarations
+	parseCommentsMode                      // parse comments and add them to AST
+	parseLambdasMode
+	partialMode
+	traceMode             // print a trace of parsed productions
+	declarationErrorsMode // report declaration errors
+	allErrorsMode         // report all errors (not just the first 10 on different lines)
+)
+
+// ParseFile parses the source code of a single CUE source file and returns
+// the corresponding File node. The source code may be provided via
+// the filename of the source file, or via the src parameter.
+//
+// If src != nil, ParseFile parses the source from src and the filename is
+// only used when recording position information. The type of the argument
+// for the src parameter must be string, []byte, or io.Reader.
+// If src == nil, ParseFile parses the file specified by filename.
+//
+// The mode parameter controls the amount of source text parsed and other
+// optional parser functionality. Position information is recorded in the
+// file set fset, which must not be nil.
+//
+// If the source couldn't be read, the returned AST is nil and the error
+// indicates the specific failure. If the source was read but syntax
+// errors were found, the result is a partial AST (with Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors
+// are returned via a ErrorList which is sorted by file position.
+func ParseFile(p *token.FileSet, filename string, src interface{}, mode ...Option) (f *ast.File, err error) {
+	if p == nil {
+		panic("ParseFile: no file.FileSet provided (fset == nil)")
+	}
+
+	// get source
+	text, err := readSource(filename, src)
+	if err != nil {
+		return nil, err
+	}
+
+	var pp parser
+	defer func() {
+		if e := recover(); e != nil {
+			// resume same panic if it's not a bailout
+			if _, ok := e.(bailout); !ok {
+				panic(e)
+			}
+		}
+
+		// set result values
+		if f == nil {
+			// source is not a valid Go source file - satisfy
+			// ParseFile API and return a valid (but) empty
+			// *File
+			f = &ast.File{
+				Name: new(ast.Ident),
+				// Scope: NewScope(nil),
+			}
+		}
+
+		pp.errors.Sort()
+		err = pp.errors.Err()
+	}()
+
+	// parse source
+	pp.init(p, filename, text, mode)
+	f = pp.parseFile()
+	if f == nil {
+		return nil, pp.errors
+	}
+	f.Filename = filename
+	resolve(f, pp.error)
+
+	return
+}
+
+// ParseExpr is a convenience function for parsing an expression.
+// The arguments have the same meaning as for Parse, but the source must
+// be a valid CUE (type or value) expression. Specifically, fset must not
+// be nil.
+func ParseExpr(fset *token.FileSet, filename string, src interface{}, mode ...Option) (ast.Expr, error) {
+	if fset == nil {
+		panic("ParseExprFrom: no file.FileSet provided (fset == nil)")
+	}
+
+	// get source
+	text, err := readSource(filename, src)
+	if err != nil {
+		return nil, err
+	}
+
+	var p parser
+	defer func() {
+		if e := recover(); e != nil {
+			// resume same panic if it's not a bailout
+			if _, ok := e.(bailout); !ok {
+				panic(e)
+			}
+		}
+		p.errors.Sort()
+		err = p.errors.Err()
+	}()
+
+	// parse expr
+	p.init(fset, filename, text, mode)
+	// Set up pkg-level scopes to avoid nil-pointer errors.
+	// This is not needed for a correct expression x as the
+	// parser will be ok with a nil topScope, but be cautious
+	// in case of an erroneous x.
+	e := p.parseRHS()
+
+	// If a comma was inserted, consume it;
+	// report an error if there's more tokens.
+	if p.tok == token.COMMA && p.lit == "\n" {
+		p.next()
+	}
+	if p.mode&partialMode == 0 {
+		p.expect(token.EOF)
+	}
+
+	if p.errors.Len() > 0 {
+		p.errors.Sort()
+		return nil, p.errors.Err()
+	}
+
+	return e, nil
+}
+
+// parseExprString is a convenience function for obtaining the AST of an
+// expression x. The position information recorded in the AST is undefined. The
+// filename used in error messages is the empty string.
+func parseExprString(x string) (ast.Expr, error) {
+	return ParseExpr(token.NewFileSet(), "", []byte(x))
+}
diff --git a/cue/parser/interface_test.go b/cue/parser/interface_test.go
new file mode 100644
index 0000000..394a5a4
--- /dev/null
+++ b/cue/parser/interface_test.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"reflect"
+	"testing"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+func Test_readSource(t *testing.T) {
+	type args struct {
+		filename string
+		src      interface{}
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    []byte
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		got, err := readSource(tt.args.filename, tt.args.src)
+		if (err != nil) != tt.wantErr {
+			t.Errorf("%q. readSource() error = %v, wantErr %v", tt.name, err, tt.wantErr)
+			continue
+		}
+		if !reflect.DeepEqual(got, tt.want) {
+			t.Errorf("%q. readSource() = %v, want %v", tt.name, got, tt.want)
+		}
+	}
+}
+
+func TestParseFile(t *testing.T) {
+	type args struct {
+		fset     *token.FileSet
+		filename string
+		src      interface{}
+		options  []Option
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantF   *ast.File
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		gotF, err := ParseFile(tt.args.fset, tt.args.filename, tt.args.src, tt.args.options...)
+		if (err != nil) != tt.wantErr {
+			t.Errorf("%q. ParseFile() error = %v, wantErr %v", tt.name, err, tt.wantErr)
+			continue
+		}
+		if !reflect.DeepEqual(gotF, tt.wantF) {
+			t.Errorf("%q. ParseFile() = %v, want %v", tt.name, gotF, tt.wantF)
+		}
+	}
+}
+
+func TestParseExprFrom(t *testing.T) {
+	type args struct {
+		fset     *token.FileSet
+		filename string
+		src      interface{}
+		mode     Option
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    ast.Expr
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		got, err := ParseExpr(tt.args.fset, tt.args.filename, tt.args.src, tt.args.mode)
+		if (err != nil) != tt.wantErr {
+			t.Errorf("%q. ParseExprFrom() error = %v, wantErr %v", tt.name, err, tt.wantErr)
+			continue
+		}
+		if !reflect.DeepEqual(got, tt.want) {
+			t.Errorf("%q. ParseExprFrom() = %v, want %v", tt.name, got, tt.want)
+		}
+	}
+}
+
+func TestParseExprString(t *testing.T) {
+	type args struct {
+		x string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		want    ast.Expr
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		got, err := parseExprString(tt.args.x)
+		if (err != nil) != tt.wantErr {
+			t.Errorf("%q. ParseExpr() error = %v, wantErr %v", tt.name, err, tt.wantErr)
+			continue
+		}
+		if !reflect.DeepEqual(got, tt.want) {
+			t.Errorf("%q. ParseExpr() = %v, want %v", tt.name, got, tt.want)
+		}
+	}
+}
diff --git a/cue/parser/parser.go b/cue/parser/parser.go
new file mode 100644
index 0000000..fc12092
--- /dev/null
+++ b/cue/parser/parser.go
@@ -0,0 +1,1446 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/scanner"
+	"cuelang.org/go/cue/token"
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+	file    *token.File
+	errors  errors.List
+	scanner scanner.Scanner
+
+	// Tracing/debugging
+	mode   mode // parsing mode
+	trace  bool // == (mode & Trace != 0)
+	indent int  // indentation used for tracing output
+
+	// Comments
+	leadComment *ast.CommentGroup
+	comments    *commentState
+
+	// Next token
+	pos token.Pos   // token position
+	tok token.Token // one token look-ahead
+	lit string      // token literal
+
+	// Error recovery
+	// (used to limit the number of calls to syncXXX functions
+	// w/o making scanning progress - avoids potential endless
+	// loops across multiple parser functions during error recovery)
+	syncPos token.Pos // last synchronization position
+	syncCnt int       // number of calls to syncXXX without progress
+
+	// Non-syntactic parser control
+	exprLev int // < 0: in control clause, >= 0: in expression
+
+	imports []*ast.ImportSpec // list of imports
+
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode []Option) {
+	p.file = fset.AddFile(filename, -1, len(src))
+	for _, f := range mode {
+		f(p)
+	}
+	var m scanner.Mode
+	if p.mode&parseCommentsMode != 0 {
+		m = scanner.ScanComments
+	}
+	eh := func(pos token.Position, msg string) { p.errors.AddNew(pos, msg) }
+	p.scanner.Init(p.file, src, eh, m)
+
+	p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
+
+	p.comments = &commentState{pos: -1}
+
+	p.next()
+}
+
+type commentList struct {
+	taken      bool // for validation
+	attachTail bool
+	head       *ast.CommentGroup
+	last       *ast.CommentGroup
+}
+
+type commentState struct {
+	parent *commentState
+	pos    int8
+	groups []*ast.CommentGroup
+
+	// lists are not attached to nodes themselves. Enclosed expressions may
+	// miss a comment due to commas and line termination. closeLists ensures
+	// that comments will be passed to someone.
+	isList    int
+	lastChild ast.Node
+	lastPos   int8
+}
+
+// openComments reserves the next doc comment for the caller and flushes
+func (p *parser) openComments() *commentState {
+	if c := p.comments; c != nil && c.isList > 0 {
+		if c.lastChild != nil {
+			for _, cg := range c.groups {
+				cg.Position = c.lastPos
+				c.lastChild.AddComment(cg)
+			}
+			c.groups = nil
+		}
+		c.lastChild = nil
+	}
+	c := &commentState{
+		parent: p.comments,
+		groups: []*ast.CommentGroup{p.leadComment},
+	}
+	p.comments = c
+	p.leadComment = nil
+	return c
+}
+
+// openList is used to treat a list of comments as a single comment
+// position in a production.
+func (p *parser) openList() {
+	if p.comments.isList > 0 {
+		p.comments.isList++
+		return
+	}
+	c := &commentState{
+		parent: p.comments,
+		isList: 1,
+	}
+	p.comments = c
+}
+
+func (c *commentState) add(g *ast.CommentGroup) {
+	g.Position = c.pos
+	c.groups = append(c.groups, g)
+}
+
+func (p *parser) closeList() {
+	c := p.comments
+	if c.lastChild != nil {
+		for _, cg := range c.groups {
+			cg.Position = c.lastPos
+			c.lastChild.AddComment(cg)
+		}
+		c.groups = nil
+	}
+	switch c.isList--; {
+	case c.isList < 0:
+		panic("unmatched close list")
+	case c.isList == 0:
+		parent := c.parent
+		parent.groups = append(parent.groups, c.groups...)
+		parent.pos++
+		p.comments = parent
+	}
+}
+
+func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
+	if p.comments != c {
+		panic("unmatched comments")
+	}
+	p.comments = c.parent
+	if c.parent != nil {
+		c.parent.lastChild = n
+		c.parent.lastPos = c.pos
+		c.parent.pos++
+	}
+	for _, cg := range c.groups {
+		if n != nil {
+			n.AddComment(cg)
+		}
+	}
+	c.groups = nil
+	return n
+}
+
+func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
+	c.closeNode(p, n)
+	return n
+}
+
+func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
+	c.closeNode(p, n)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	pos := p.file.Position(p.pos)
+	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *parser) {
+	p.indent--
+	p.printTrace(")")
+}
+
+// Advance to the next
+func (p *parser) next0() {
+	// Because of one-token look-ahead, print the previous token
+	// when tracing as it provides a more readable output. The
+	// very first token (!p.pos.IsValid()) is not initialized
+	// (it is ILLEGAL), so don't print it .
+	if p.trace && p.pos.IsValid() {
+		s := p.tok.String()
+		switch {
+		case p.tok.IsLiteral():
+			p.printTrace(s, p.lit)
+		case p.tok.IsOperator(), p.tok.IsKeyword():
+			p.printTrace("\"" + s + "\"")
+		default:
+			p.printTrace(s)
+		}
+	}
+
+	p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+	// /*-style comments may end on a different line than where they start.
+	// Scan the comment for '\n' chars and adjust endline accordingly.
+	endline = p.file.Line(p.pos)
+	if p.lit[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.lit); i++ {
+			if p.lit[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
+	p.next0()
+
+	return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. A non-comment token or n
+// empty lines terminate a comment group.
+func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.file.Line(p.pos)
+	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	cg := &ast.CommentGroup{List: list}
+	comments = cg
+	return
+}
+
+// Advance to the next non-comment  In the process, collect
+// any comment groups encountered, and refield the last lead and
+// and line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+func (p *parser) next() {
+	// A leadComment may not be consumed if it leads an inner token of a node.
+	if p.leadComment != nil {
+		p.comments.add(p.leadComment)
+	}
+	p.leadComment = nil
+	prev := p.pos
+	p.next0()
+	p.comments.pos++
+
+	if p.tok == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		if p.file.Line(p.pos) == p.file.Line(prev) {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.file.Line(p.pos) != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				comment.Line = true
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok == token.COMMENT {
+			if comment != nil {
+				p.comments.add(comment)
+			}
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
+			// The next token is following on the line immediately after the
+			// comment group, thus the last comment group is a lead comment.
+			comment.Doc = true
+			p.leadComment = comment
+		} else {
+			p.comments.add(comment)
+		}
+	}
+}
+
+// A bailout panic is raised to indicate early termination.
+type bailout struct{}
+
+func (p *parser) error(pos token.Pos, msg string) {
+	ePos := p.file.Position(pos)
+
+	// If AllErrors is not set, discard errors reported on the same line
+	// as the last recorded error and stop parsing if there are more than
+	// 10 errors.
+	if p.mode&allErrorsMode == 0 {
+		n := len(p.errors)
+		if n > 0 && p.errors[n-1].Position().Line == ePos.Line {
+			return // discard - likely a spurious error
+		}
+		if n > 10 {
+			panic(bailout{})
+		}
+	}
+
+	p.errors.AddNew(ePos, msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+	msg = "expected " + msg
+	if pos == p.pos {
+		// the error happened at the current position;
+		// make the error message more specific
+		if p.tok == token.COMMA && p.lit == "\n" {
+			msg += ", found newline"
+		} else {
+			msg += ", found '" + p.tok.String() + "'"
+			if p.tok.IsLiteral() {
+				msg += " " + p.lit
+			}
+		}
+	}
+	p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+	pos := p.pos
+	if p.tok != tok {
+		p.errorExpected(pos, "'"+tok.String()+"'")
+	}
+	p.next() // make progress
+	return pos
+}
+
+// expectClosing is like expect but provides a better error message
+// for the common case of a missing comma before a newline.
+func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
+	if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
+		p.error(p.pos, "missing ',' before newline in "+context)
+		p.next()
+	}
+	return p.expect(tok)
+}
+
+func (p *parser) expectComma() {
+	// semicolon is optional before a closing ')', ']', '}', or newline
+	if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
+		switch p.tok {
+		case token.COMMA:
+			p.next()
+		default:
+			p.errorExpected(p.pos, "','")
+			syncExpr(p)
+		}
+	}
+}
+
+func (p *parser) atComma(context string, follow ...token.Token) bool {
+	if p.tok == token.COMMA {
+		return true
+	}
+	for _, t := range follow {
+		if p.tok == t {
+			return false
+		}
+	}
+	msg := "missing ','"
+	// TODO: find a way to detect crossing lines now we don't have a semi.
+	if p.lit == "\n" {
+		msg += " before newline"
+	}
+	p.error(p.pos, msg+" in "+context)
+	return true // "insert" comma and continue
+}
+
+func assert(cond bool, msg string) {
+	if !cond {
+		panic("lacelang/parser internal error: " + msg)
+	}
+}
+
+// syncExpr advances to the next field in a field list.
+// Used for synchronization after an error.
+func syncExpr(p *parser) {
+	for {
+		switch p.tok {
+		case token.COMMA:
+			// Return only if parser made some progress since last
+			// sync or if it has not reached 10 sync calls without
+			// progress. Otherwise consume at least one token to
+			// avoid an endless parser loop (it is possible that
+			// both parseOperand and parseStmt call syncStmt and
+			// correctly do not advance, thus the need for the
+			// invocation limit p.syncCnt).
+			if p.pos == p.syncPos && p.syncCnt < 10 {
+				p.syncCnt++
+				return
+			}
+			if p.pos > p.syncPos {
+				p.syncPos = p.pos
+				p.syncCnt = 0
+				return
+			}
+			// Reaching here indicates a parser bug, likely an
+			// incorrect token list in this function, but it only
+			// leads to skipping of possibly correct code if a
+			// previous error is present, and thus is preferred
+			// over a non-terminating parse.
+		case token.EOF:
+			return
+		}
+		p.next()
+	}
+}
+
+// safePos returns a valid file position for a given position: If pos
+// is valid to begin with, safePos returns pos. If pos is out-of-range,
+// safePos returns the EOF position.
+//
+// This is hack to work around "artificial" end positions in the AST which
+// are computed by adding 1 to (presumably valid) token positions. If the
+// token positions are invalid due to parse errors, the resulting end position
+// may be past the file's EOF position, which would lead to panics if used
+// later on.
+func (p *parser) safePos(pos token.Pos) (res token.Pos) {
+	defer func() {
+		if recover() != nil {
+			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
+		}
+	}()
+	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
+	return pos
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+	c := p.openComments()
+	pos := p.pos
+	name := "_"
+	if p.tok == token.IDENT {
+		name = p.lit
+		p.next()
+	} else {
+		p.expect(token.IDENT) // use expect() error handling
+	}
+	ident := &ast.Ident{NamePos: pos, Name: name}
+	c.closeNode(p, ident)
+	return ident
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+// parseOperand returns an expression.
+// Callers must verify the result.
+func (p *parser) parseOperand() (expr ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "Operand"))
+	}
+
+	switch p.tok {
+	case token.IDENT:
+		return p.parseIdent()
+
+	case token.LBRACE:
+		return p.parseStruct()
+
+	case token.LBRACK:
+		return p.parseList()
+
+	case token.BOTTOM:
+		c := p.openComments()
+		x := &ast.BottomLit{Bottom: p.pos}
+		p.next()
+		return c.closeExpr(p, x)
+
+	case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
+		c := p.openComments()
+		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+		p.next()
+		return c.closeExpr(p, x)
+
+	case token.INTERPOLATION:
+		return p.parseInterpolation()
+
+	case token.LPAREN:
+		c := p.openComments()
+		defer func() { c.closeNode(p, expr) }()
+		lparen := p.pos
+		p.next()
+		if p.tok == token.RPAREN && p.mode&parseLambdasMode != 0 {
+			c.pos = 2
+			rparen := p.expect(token.RPAREN)
+			p.expect(token.LAMBDA)
+			return &ast.LambdaExpr{
+				Lparen: lparen,
+				Rparen: rparen,
+				Expr:   p.parseRHS(),
+			}
+		}
+		p.exprLev++
+		p.openList()
+		x := p.parseRHS() // types may be parenthesized: (some type)
+		var params []*ast.Field
+		ident, ok := x.(*ast.Ident)
+		if ok && (p.tok == token.COLON || p.tok == token.COMMA) && p.mode&parseLambdasMode != 0 {
+			params = p.parseParams(ident, token.RPAREN)
+		}
+		p.closeList()
+		p.exprLev--
+		rparen := p.expect(token.RPAREN)
+		if p.tok == token.LAMBDA || params != nil && p.mode&parseLambdasMode != 0 {
+			p.expect(token.LAMBDA)
+			if params == nil {
+				m := &ast.Field{Label: ident}
+				params = append(params, m)
+			}
+			return &ast.LambdaExpr{
+				Lparen: lparen,
+				Params: params,
+				Rparen: rparen,
+				Expr:   p.parseRHS(),
+			}
+		}
+		return &ast.ParenExpr{
+			Lparen: lparen,
+			X:      x,
+			Rparen: rparen}
+	}
+
+	// we have an error
+	c := p.openComments()
+	pos := p.pos
+	p.errorExpected(pos, "operand")
+	syncExpr(p)
+	return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
+}
+
+func (p *parser) parseParams(ident *ast.Ident, follow token.Token) (params []*ast.Field) {
+	for {
+		c := p.openComments()
+		if ident == nil {
+			ident = p.parseIdent()
+		}
+		m := &ast.Field{Label: ident}
+		if p.tok == token.COLON {
+			m.Colon = p.expect(token.COLON)
+			m.Value = p.parseRHS()
+		}
+		hasComma := p.tok == token.COMMA
+		if hasComma {
+			p.expect(token.COMMA)
+		}
+		c.closeNode(p, m)
+		params = append(params, m)
+		if !hasComma || p.tok == follow || p.tok == token.EOF {
+			break
+		}
+		ident = nil
+	}
+	return params
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "IndexOrSlice"))
+	}
+
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+	c.pos = 1
+
+	const N = 2
+	lbrack := p.expect(token.LBRACK)
+
+	p.exprLev++
+	var index [N]ast.Expr
+	var colons [N - 1]token.Pos
+	if p.tok != token.COLON {
+		index[0] = p.parseRHS()
+	}
+	nColons := 0
+	for p.tok == token.COLON && nColons < len(colons) {
+		colons[nColons] = p.pos
+		nColons++
+		p.next()
+		if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
+			index[nColons] = p.parseRHS()
+		}
+	}
+	p.exprLev--
+	rbrack := p.expect(token.RBRACK)
+
+	if nColons > 0 {
+		return &ast.SliceExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Low:    index[0],
+			High:   index[1],
+			Rbrack: rbrack}
+	}
+
+	return &ast.IndexExpr{
+		X:      x,
+		Lbrack: lbrack,
+		Index:  index[0],
+		Rbrack: rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
+	if p.trace {
+		defer un(trace(p, "CallOrConversion"))
+	}
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+
+	lparen := p.expect(token.LPAREN)
+	p.exprLev++
+	var list []ast.Expr
+	for p.tok != token.RPAREN && p.tok != token.EOF {
+		list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
+		if !p.atComma("argument list", token.RPAREN) {
+			break
+		}
+		p.next()
+	}
+	p.exprLev--
+	rparen := p.expectClosing(token.RPAREN, "argument list")
+
+	return &ast.CallExpr{
+		Fun:    fun,
+		Lparen: lparen,
+		Args:   list,
+		Rparen: rparen}
+}
+
+func (p *parser) parseFieldList(allowEmit bool) (list []ast.Decl) {
+	if p.trace {
+		defer un(trace(p, "FieldList"))
+	}
+	origEmit := allowEmit
+	p.openList()
+	defer p.closeList()
+
+	for p.tok != token.RBRACE && p.tok != token.EOF {
+		d := p.parseField(allowEmit)
+		if e, ok := d.(*ast.EmitDecl); ok {
+			if origEmit && !allowEmit {
+				p.error(p.pos, "only one emit allowed at top level")
+			}
+			if !origEmit || !allowEmit {
+				d = &ast.BadDecl{From: e.Pos(), To: e.End()}
+				for _, cg := range e.Comments() {
+					d.AddComment(cg)
+				}
+			}
+			// uncomment to only allow one emit per top-level
+			// allowEmit = false
+		}
+		list = append(list, d)
+	}
+	return
+}
+func (p *parser) parseField(allowEmit bool) (decl ast.Decl) {
+	if p.trace {
+		defer un(trace(p, "Field"))
+	}
+
+	c := p.openComments()
+	defer func() { c.closeNode(p, decl) }()
+
+	pos := p.pos
+
+	this := &ast.Field{Label: nil}
+	m := this
+
+	for i := 0; ; i++ {
+		tok := p.tok
+
+		expr, ok := p.parseLabel(m)
+
+		if !ok {
+			if !allowEmit {
+				p.error(pos, "expected label, found "+tok.String())
+			}
+			if expr == nil {
+				expr = p.parseExpr()
+			}
+			e := &ast.EmitDecl{Expr: expr}
+			if p.atComma("file", token.RBRACE) {
+				p.next()
+			}
+			return e
+		}
+
+		if i == 0 && tok == token.IDENT {
+			ident := expr.(*ast.Ident)
+			switch p.tok {
+			case token.BIND:
+				pos := p.pos
+				p.expect(token.BIND)
+				ref := p.parseRHS()
+				if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
+					p.next()
+				}
+				return &ast.Alias{Ident: ident, Equal: pos, Expr: ref}
+
+			case token.LPAREN:
+				var value ast.Expr
+				if p.mode&parseLambdasMode != 0 {
+					c.pos = 2
+					// TODO: Only allow LambdaExpr after non-quoted identifier.
+					value = p.parseOperand()
+					if _, ok := unparen(value).(*ast.LambdaExpr); !ok {
+						p.error(value.Pos(), "expected lambda expression")
+					}
+				}
+				if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
+					p.next()
+				}
+				return &ast.Field{Label: ident, Value: value}
+			}
+		}
+
+		if p.tok == token.COLON {
+			break
+		}
+
+		switch p.tok {
+		default:
+			if !allowEmit || p.tok != token.COMMA {
+				p.errorExpected(p.pos, "label or ':'")
+			}
+			switch tok {
+			case token.IDENT, token.LBRACK, token.STRING, token.INTERPOLATION, token.NULL, token.TRUE, token.FALSE:
+				if p.tok == token.COMMA {
+					p.expectComma()
+					return &ast.EmitDecl{Expr: expr}
+				}
+			}
+			return &ast.BadDecl{From: pos, To: p.pos}
+
+		case token.IDENT, token.STRING, token.LSS, token.INTERPOLATION, token.LBRACK:
+			field := &ast.Field{}
+			m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
+			m = field
+		}
+
+		allowEmit = false
+	}
+
+	this.Colon = p.pos
+	p.expect(token.COLON)
+	m.Value = p.parseRHS()
+
+	decl = this
+	var arrow token.Pos
+	switch p.tok {
+	case token.ARROW:
+		arrow = p.expect(token.ARROW)
+		fallthrough
+
+	case token.FOR, token.IF:
+		clauses := p.parseComprehensionClauses()
+		return &ast.ComprehensionDecl{
+			Field:   this,
+			Select:  arrow,
+			Clauses: clauses,
+		}
+	}
+
+	if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
+		p.next()
+	}
+
+	return decl
+}
+
+func (p *parser) parseLabel(f *ast.Field) (expr ast.Expr, ok bool) {
+	switch p.tok {
+	case token.IDENT:
+		ident := p.parseIdent()
+		f.Label = ident
+		expr = ident
+
+	case token.STRING:
+		// JSON compatibility.
+
+		expr = p.parseOperand()
+		f.Label = expr.(ast.Label)
+
+	case token.INTERPOLATION:
+		expr = p.parseInterpolation()
+		f.Label = expr.(ast.Label)
+
+	case token.NULL, token.TRUE, token.FALSE:
+		// Keywords that represent operands.
+
+		// Allowing keywords to be used as a labels should not interfere with
+		// generating good errors: any keyword can only appear on the RHS of a
+		// field (after a ':'), whereas labels always appear on the LHS.
+		ident := &ast.BasicLit{
+			Kind:     p.tok,
+			ValuePos: p.pos,
+			Value:    p.lit,
+		}
+		p.next()
+		f.Label = ident
+		expr = ident
+
+	case token.IF, token.FOR, token.IN, token.LET:
+		// Keywords representing clauses.
+		f.Label = &ast.Ident{
+			NamePos: p.pos,
+			Name:    p.lit,
+		}
+		p.next()
+
+	case token.LSS: // element templates
+		pos := p.pos
+		c := p.openComments()
+		p.next()
+		ident := p.parseIdent()
+		gtr := p.pos
+		if p.tok != token.GTR {
+			p.expect(token.GTR)
+		}
+		p.next()
+		label := &ast.TemplateLabel{Langle: pos, Ident: ident, Rangle: gtr}
+		c.closeNode(p, label)
+		f.Label = label
+
+	case token.LBRACK:
+		expr = p.parseList()
+		list, ok := expr.(*ast.ListLit)
+		if ok && len(list.Elts) == 1 && list.Ellipsis == token.NoPos {
+			f.Label = &ast.ExprLabel{
+				Lbrack: list.Lbrack,
+				Label:  list.Elts[0],
+				Rbrack: list.Rbrack,
+			}
+			break
+		}
+
+		fallthrough
+	default:
+		return expr, false
+	}
+	return expr, true
+}
+
+func (p *parser) parseStruct() (expr ast.Expr) {
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+
+	lbrace := p.expect(token.LBRACE)
+
+	if p.trace {
+		defer un(trace(p, "StructLit"))
+	}
+
+	elts := p.parseStructBody()
+	rbrace := p.expectClosing(token.RBRACE, "struct literal")
+	return &ast.StructLit{
+		Lbrace: lbrace,
+		Elts:   elts,
+		Rbrace: rbrace,
+	}
+}
+
+func (p *parser) parseStructBody() []ast.Decl {
+	if p.trace {
+		defer un(trace(p, "StructBody"))
+	}
+
+	p.exprLev++
+	var elts []ast.Decl
+	if p.tok != token.RBRACE {
+		elts = p.parseFieldList(false)
+	}
+	p.exprLev--
+
+	return elts
+}
+
+func isClauseStart(tok token.Token) bool {
+	return tok == token.FOR || tok == token.IF // || tok == LET
+}
+
+func (p *parser) parseComprehensionClauses() (clauses []ast.Clause) {
+	// TODO: reuse Template spec, which is possible if it doesn't check the
+	// first is an identifier.
+	for {
+		if p.tok == token.COMMA {
+			p.next()
+		}
+		switch p.tok {
+		case token.FOR:
+			c := p.openComments()
+			forPos := p.expect(token.FOR)
+			var key, value *ast.Ident
+			var colon token.Pos
+			value = p.parseIdent()
+			if p.tok == token.COMMA {
+				colon = p.expect(token.COMMA)
+				key = value
+				value = p.parseIdent()
+			}
+			c.pos = 4
+			// params := p.parseParams(nil, ARROW)
+			clauses = append(clauses, c.closeClause(p, &ast.ForClause{
+				For:    forPos,
+				Key:    key,
+				Colon:  colon,
+				Value:  value,
+				In:     p.expect(token.IN),
+				Source: p.parseExpr(),
+			}))
+
+		case token.IF:
+			c := p.openComments()
+			clauses = append(clauses, c.closeClause(p, &ast.IfClause{
+				If:        p.expect(token.IF),
+				Condition: p.parseExpr(),
+			}))
+
+		// TODO: case LET:
+		default:
+			return clauses
+		}
+	}
+}
+
+func (p *parser) parseList() (expr ast.Expr) {
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+
+	lbrack := p.expect(token.LBRACK)
+
+	if p.trace {
+		defer un(trace(p, "ListLiteral"))
+	}
+
+	elts := p.parseListElements()
+
+	if clauses := p.parseComprehensionClauses(); clauses != nil {
+		var expr ast.Expr
+		if len(elts) != 1 {
+			p.error(lbrack+1, "list comprehension must have exactly one element")
+		}
+		if len(elts) > 0 {
+			expr = elts[0]
+		}
+		rbrack := p.expectClosing(token.RBRACK, "list comprehension")
+
+		return &ast.ListComprehension{
+			Lbrack:  lbrack,
+			Expr:    expr,
+			Clauses: clauses,
+			Rbrack:  rbrack,
+		}
+	}
+
+	ellipsis := token.NoPos
+	typ := ast.Expr(nil)
+	if p.tok == token.ELLIPSIS {
+		ellipsis = p.pos
+		p.next()
+		if p.tok != token.COMMA && p.tok != token.RBRACK {
+			typ = p.parseRHS()
+		}
+		if p.atComma("list literal", token.RBRACK) {
+			p.next()
+		}
+	}
+
+	rbrack := p.expectClosing(token.RBRACK, "list literal")
+	return &ast.ListLit{
+		Lbrack:   lbrack,
+		Elts:     elts,
+		Ellipsis: ellipsis,
+		Type:     typ,
+		Rbrack:   rbrack}
+}
+
+func (p *parser) parseListElements() (list []ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ListElements"))
+	}
+	p.openList()
+	defer p.closeList()
+
+	for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
+		list = append(list, p.parseListElement())
+		// Enforce there is an explicit comma. We could also allow the
+		// omission of commas in lists, but this gives rise to some ambiguities
+		// with list comprehensions.
+		if p.tok == token.COMMA && p.lit != "," {
+			p.next()
+			// Allow missing comma for last element, though, to be compliant
+			// with JSON.
+			if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
+				break
+			}
+			p.error(p.pos, "missing ',' before newline in list literal")
+		} else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
+			break
+		}
+		p.next()
+	}
+
+	return
+}
+
+func (p *parser) parseListElement() (expr ast.Expr) {
+	if p.trace {
+		defer un(trace(p, "ListElement"))
+	}
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+
+	e := p.parseRHS()
+	switch p.tok {
+	case token.ELLIPSIS:
+		return &ast.Ellipsis{Ellipsis: p.expect(token.ELLIPSIS), Elt: e}
+	}
+	return e
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+	switch unparen(x).(type) {
+	case *ast.BadExpr:
+	case *ast.BottomLit:
+	case *ast.Ident:
+	case *ast.BasicLit:
+	case *ast.Interpolation:
+	case *ast.StructLit:
+	case *ast.ListLit:
+	case *ast.LambdaExpr:
+	case *ast.ListComprehension:
+	case *ast.ParenExpr:
+		panic("unreachable")
+	case *ast.SelectorExpr:
+	case *ast.IndexExpr:
+	case *ast.SliceExpr:
+	case *ast.CallExpr:
+	case *ast.UnaryExpr:
+	case *ast.BinaryExpr:
+	default:
+		// all other nodes are not proper expressions
+		p.errorExpected(x.Pos(), "expression")
+		x = &ast.BadExpr{
+			From: x.Pos(), To: p.safePos(x.End()),
+		}
+	}
+	return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+	if p, isParen := x.(*ast.ParenExpr); isParen {
+		x = unparen(p.X)
+	}
+	return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "PrimaryExpr"))
+	}
+
+	x := p.parseOperand()
+
+L:
+	for {
+		switch p.tok {
+		case token.PERIOD:
+			c := p.openComments()
+			c.pos = 1
+			p.next()
+			switch p.tok {
+			case token.IDENT:
+				x = &ast.SelectorExpr{
+					X:   p.checkExpr(x),
+					Sel: p.parseIdent(),
+				}
+			default:
+				pos := p.pos
+				p.errorExpected(pos, "selector")
+				p.next() // make progress
+				x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
+			}
+			c.closeNode(p, x)
+		case token.LBRACK:
+			x = p.parseIndexOrSlice(p.checkExpr(x))
+		case token.LPAREN:
+			x = p.parseCallOrConversion(p.checkExpr(x))
+		default:
+			break L
+		}
+	}
+
+	return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "UnaryExpr"))
+	}
+
+	switch p.tok {
+	case token.ADD, token.SUB, token.NOT:
+		pos, op := p.pos, p.tok
+		c := p.openComments()
+		p.next()
+		return c.closeExpr(p, &ast.UnaryExpr{
+			OpPos: pos,
+			Op:    op,
+			X:     p.checkExpr(p.parseUnaryExpr()),
+		})
+	}
+
+	return p.parsePrimaryExpr()
+}
+
+func (p *parser) tokPrec() (token.Token, int) {
+	tok := p.tok
+	if tok == token.IDENT {
+		switch p.lit {
+		case "quo":
+			return token.IQUO, 7
+		case "rem":
+			return token.IREM, 7
+		case "div":
+			return token.IDIV, 7
+		case "mod":
+			return token.IMOD, 7
+		default:
+			return tok, 0
+		}
+	}
+	return tok, tok.Precedence()
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
+	if p.trace {
+		defer un(trace(p, "BinaryExpr"))
+	}
+	p.openList()
+	defer p.closeList()
+
+	x := p.parseUnaryExpr()
+
+	for {
+		op, prec := p.tokPrec()
+		if prec < prec1 {
+			return x
+		}
+		c := p.openComments()
+		c.pos = 1
+		pos := p.expect(p.tok)
+		x = c.closeExpr(p, &ast.BinaryExpr{
+			X:     p.checkExpr(x),
+			OpPos: pos,
+			Op:    op,
+			Y:     p.checkExpr(p.parseBinaryExpr(prec + 1))})
+	}
+}
+
+func (p *parser) parseInterpolation() (expr ast.Expr) {
+	c := p.openComments()
+	defer func() { c.closeNode(p, expr) }()
+
+	p.openList()
+	defer p.closeList()
+
+	cc := p.openComments()
+
+	lit := p.lit
+	p.next()
+	last := &ast.BasicLit{ValuePos: p.pos, Kind: token.STRING, Value: lit}
+	exprs := []ast.Expr{last}
+
+	quote := rune(lit[0])
+	numQuotes := 1
+	if len(lit) > 2 && lit[0] == lit[1] {
+		numQuotes = 3
+	}
+
+	for p.tok == token.LPAREN {
+		c.pos = 1
+		p.expect(token.LPAREN)
+		cc.closeExpr(p, last)
+
+		exprs = append(exprs, p.parseExpr())
+
+		cc = p.openComments()
+		if p.tok != token.RPAREN {
+			p.error(p.pos, "expected ')' for string interpolation")
+		}
+		lit = p.scanner.ResumeInterpolation(quote, numQuotes)
+		p.next()
+		last = &ast.BasicLit{
+			ValuePos: p.pos,
+			Kind:     token.STRING,
+			Value:    lit,
+		}
+		exprs = append(exprs, last)
+	}
+	cc.closeExpr(p, last)
+	return &ast.Interpolation{Elts: exprs}
+}
+
+// Callers must check the result (using checkExpr), depending on context.
+func (p *parser) parseExpr() ast.Expr {
+	if p.trace {
+		defer un(trace(p, "Expression"))
+	}
+
+	return p.parseBinaryExpr(token.LowestPrec + 1)
+}
+
+func (p *parser) parseRHS() ast.Expr {
+	x := p.checkExpr(p.parseExpr())
+	return x
+}
+
+func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
+	x := p.parseRHS() // could be a conversion: (some type)(x)
+	if call, isCall := x.(*ast.CallExpr); isCall {
+		return call
+	}
+	if _, isBad := x.(*ast.BadExpr); !isBad {
+		// only report error if it's a new one
+		p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(iota int) *ast.ImportSpec
+
+func isValidImport(lit string) bool {
+	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+	s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
+	for _, r := range s {
+		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+			return false
+		}
+	}
+	return s != ""
+}
+
+func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
+	if p.trace {
+		defer un(trace(p, "ImportSpec"))
+	}
+
+	c := p.openComments()
+
+	var ident *ast.Ident
+	switch p.tok {
+	case token.PERIOD:
+		ident = &ast.Ident{NamePos: p.pos, Name: "."}
+		p.next()
+	case token.IDENT:
+		ident = p.parseIdent()
+	}
+
+	pos := p.pos
+	var path string
+	if p.tok == token.STRING {
+		path = p.lit
+		if !isValidImport(path) {
+			p.error(pos, "invalid import path: "+path)
+		}
+		p.next()
+		p.expectComma() // call before accessing p.linecomment
+	} else {
+		p.expect(token.STRING) // use expect() error handling
+		if p.tok == token.COMMA {
+			p.expectComma() // call before accessing p.linecomment
+		}
+	}
+	// collect imports
+	spec := &ast.ImportSpec{
+		Name: ident,
+		Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
+	}
+	c.closeNode(p, spec)
+	p.imports = append(p.imports, spec)
+
+	return spec
+}
+
+func (p *parser) parseImports() *ast.ImportDecl {
+	if p.trace {
+		defer un(trace(p, "Imports"))
+	}
+	c := p.openComments()
+
+	ident := p.parseIdent()
+	var lparen, rparen token.Pos
+	var list []*ast.ImportSpec
+	if p.tok == token.LPAREN {
+		lparen = p.pos
+		p.next()
+		p.openList()
+		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+			list = append(list, p.parseImportSpec(iota))
+		}
+		p.closeList()
+		rparen = p.expect(token.RPAREN)
+		p.expectComma()
+	} else {
+		list = append(list, p.parseImportSpec(0))
+	}
+
+	d := &ast.ImportDecl{
+		Import: ident.Pos(),
+		Lparen: lparen,
+		Specs:  list,
+		Rparen: rparen,
+	}
+	c.closeNode(p, d)
+	return d
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+	if p.trace {
+		defer un(trace(p, "File"))
+	}
+
+	c := p.comments
+
+	// Don't bother parsing the rest if we had errors scanning the first
+	// Likely not a Go source file at all.
+	if p.errors.Len() != 0 {
+		return nil
+	}
+
+	// The package clause is not a declaration: it does not appear in any
+	// scope.
+	pos := p.pos
+	var name *ast.Ident
+	if p.tok == token.IDENT && p.lit == "package" {
+		p.expect(token.IDENT)
+		name = p.parseIdent()
+		if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
+			p.error(p.pos, "invalid package name _")
+		}
+		p.expectComma()
+	} else {
+		pos = token.NoPos
+	}
+	c.pos = 3
+
+	p.openList()
+	var decls []ast.Decl
+	if p.mode&packageClauseOnlyMode == 0 {
+		// import decls
+		for p.tok == token.IDENT && p.lit == "import" {
+			decls = append(decls, p.parseImports())
+		}
+
+		if p.mode&importsOnlyMode == 0 {
+			// rest of package decls
+			// TODO: loop and allow multiple expressions.
+			decls = append(decls, p.parseFieldList(true)...)
+			p.expect(token.EOF)
+		}
+	}
+	p.closeList()
+
+	f := &ast.File{
+		Package: pos,
+		Name:    name,
+		Imports: p.imports,
+		Decls:   decls,
+	}
+	c.closeNode(p, f)
+	return f
+}
diff --git a/cue/parser/parser_test.go b/cue/parser/parser_test.go
new file mode 100644
index 0000000..ab4859c
--- /dev/null
+++ b/cue/parser/parser_test.go
@@ -0,0 +1,543 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"testing"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+func TestParse(t *testing.T) {
+	testCases := []struct{ desc, in, out string }{{
+		"empty file", "", "",
+	}, {
+		"empty struct", "{}", "{}",
+	}, {
+		"empty structs", "{},{},", "{}, {}",
+	}, {
+		"empty structs; elided comma", "{}\n{}", "{}, {}",
+	}, {
+		"basic lits", `"a","b", 3,3.4,5,2_3`, `"a", "b", 3, 3.4, 5, 2_3`,
+	}, {
+		"keyword basic lits", `true,false,null`, `true, false, null`,
+	}, {
+		"keywords as labels",
+		`if: 0, for: 1, in: 2, where: 3, div: 4, quo: 5`,
+		`if: 0, for: 1, in: 2, where: 3, div: 4, quo: 5`,
+	}, {
+		"json",
+		`{
+			"a": 1,
+			"b": "2",
+			"c": 3
+		}`,
+		`{"a": 1, "b": "2", "c": 3}`,
+	}, {
+		"json:extra comma",
+		`{
+			"a": 1,
+			"b": "2",
+			"c": 3,
+		}`,
+		`{"a": 1, "b": "2", "c": 3}`,
+	}, {
+		"json:simplified",
+		`{
+			a: 1
+			b: "2"
+			c: 3
+		}`,
+		`{a: 1, b: "2", c: 3}`,
+	}, {
+		"not emitted",
+		`a: true
+		 b: "2"
+		 c: 3
+		`,
+		`a: true, b: "2", c: 3`,
+	}, {
+		"emitted refrencing non-emitted",
+		`a: 1
+		 b: "2"
+		 c: 3
+		{ name: b, total: a + b }`,
+		`a: 1, b: "2", c: 3, {name: b, total: a+b}`,
+	}, {
+		"package file",
+		`package k8s
+		 {}
+		`,
+		`package k8s, {}`,
+	}, {
+		"imports group",
+		`package k8s
+
+		import (
+			a "foo"
+			"bar/baz"
+			. "model"
+		)
+		`,
+		`package k8s, import ( a "foo", "bar/baz", . "model" )`,
+	}, {
+		"imports single",
+		`package k8s
+
+		import a "foo"
+		import "bar/baz"
+		import . "model"
+			`,
+		`package k8s, import a "foo", import "bar/baz", import . "model"`,
+	}, {
+		"collapsed fields",
+		`a b c: 1
+		 // job foo { bar: 1 } // TODO error after foo
+		 job "foo": { bar: 1 }
+		`,
+		`a: {b: {c: 1}}, job: {"foo": {bar: 1}}`,
+	}, {
+		"identifiers",
+		`// 	$_: 1,
+			a: {b: {c: d}}
+			c: a
+			d: a.b
+			// e: a."b" // TODO: is an error
+			e: a.b.c
+			"f": f,
+			<X>: X
+		`,
+		"a: {b: {c: d}}, c: a, d: a.b, e: a.b.c, \"f\": f, <X>: X",
+	}, {
+		"expressions",
+		`	a: (2 + 3) * 5
+			b: (2 + 3) + 4
+			c: 2 + 3 + 4
+			d: -1
+			e: !foo
+			f: _|_
+		`,
+		"a: (2+3)*5, b: (2+3)+4, c: 2+3+4, d: -1, e: !foo, f: _|_",
+	}, {
+		"pseudo keyword expressions",
+		`	a: (2 div 3) mod 5
+			b: (2 quo 3) rem 4
+			c: 2 div 3 div 4
+		`,
+		"a: (2 div 3) mod 5, b: (2 quo 3) rem 4, c: 2 div 3 div 4",
+	}, {
+		"ranges",
+		`	a: 1..2
+			b: 2.0 .. 40.0
+			c: "a".."b"
+			v: (1..2)..(5..10)
+			w: 1..2..3
+			d: 3T..5M
+		`,
+		"a: 1..2, b: 2.0..40.0, c: \"a\"..\"b\", v: (1..2)..(5..10), w: 1..2..3, d: 3T..5M",
+	}, {
+		"indices",
+		`{
+			a: b[2]
+			b: c[1:2]
+			c: "asdf"
+			d: c ["a"]
+		}`,
+		`{a: b[2], b: c[1:2], c: "asdf", d: c["a"]}`,
+	}, {
+		"lambdas",
+		`{
+			a(P, Q, r: R) -> { p: P, q: Q }
+			b:             a(4002, "s")
+		}`,
+		`{a: (P: _,Q: _,r: R,) -> {p: P, q: Q}, b: a(4002, "s")}`, // c(C): {d(D): {}}}`,
+	}, {
+		"calls",
+		`{
+			a: b(a.b, c.d)
+			b: a.b(c)
+		}`,
+		`{a: b(a.b, c.d), b: a.b(c)}`,
+	}, {
+		"lists",
+		`{
+			a: [ 1, 2, 3, b..., c... ]
+			b: [ 1, 2, 3, ],
+			c: [ 1,
+			 2,
+			 3
+			 ],
+			d: [ 1+2, 2, 4,]
+		}`,
+		`{a: [1, 2, 3, b..., c...], b: [1, 2, 3], c: [1, 2, 3], d: [1+2, 2, 4]}`,
+	}, {
+		"list types",
+		`{
+			a: 4*[int]
+			b: 0..5*[ {a: 5} ]
+			c1: [...int]
+			c2: [...]
+			c3: [1, 2, ...int,]
+		}`,
+		`{a: 4*[int], b: 0..5*[{a: 5}], c1: [...int], c2: [...], c3: [1, 2, ...int]}`,
+	}, {
+		"list comprehensions",
+		`{
+				y: [1,2,3]
+				b: [ x for x in y if x == 1 ],
+			}`,
+		`{y: [1, 2, 3], b: [x for x in y if x==1 ]}`,
+	}, {
+		"field comprehensions",
+		`{
+				y: { a: 1, b: 2}
+				a: { "\(k)": v for k, v in y if v > 2 }
+			 }`,
+		`{y: {a: 1, b: 2}, a: {"\(k)": v for k: v in y if v>2 }}`,
+	}, {
+		"duplicates allowed",
+		`{
+			a b: 3
+			a: { b: 3 }
+		}`,
+		"{a: {b: 3}, a: {b: 3}}",
+	}, {
+		"templates",
+		`{
+			<foo>: { a: int }
+			a:     { a: 1 }
+		}`,
+		"{<foo>: {a: int}, a: {a: 1}}",
+	}, {
+		"foo",
+		`[
+			[1],
+			[1, 2],
+			[1, 2, 3],
+		]`,
+		"[[1], [1, 2], [1, 2, 3]]",
+	}, {
+		"interpolation",
+		`a: "foo \(ident)"
+		 b: "bar \(bar)  $$$ "
+		 c: "nest \(   { a: "\( nest ) "}.a ) \(5)"
+		 m1: """
+			 multi \(bar)
+			 """
+		 m2: '''
+			 \(bar) multi
+			 '''`,
+		`a: "foo \(ident)", b: "bar \(bar)  $$$ ", c: "nest \({a: "\(nest) "}.a) \(5)", ` + "m1: \"\"\"\n\t\t\t multi \\(bar)\n\t\t\t \"\"\", m2: '''\n\t\t\t \\(bar) multi\n\t\t\t '''",
+	}, {
+		"file comments",
+		`// foo
+
+		// uni
+		package foo // uniline
+
+		// file.1
+		// file.2
+
+		`,
+		"<[0// foo] [d0// uni] [l3// uniline] [3// file.1 // file.2] package foo, >",
+	}, {
+		"line comments",
+		`// doc
+		 a: 5 // line
+		 b: 6 // lineb
+			  // next
+			`, // next is followed by EOF. Ensure it doesn't move to file.
+		"<[d0// doc] [l4// line] a: 5>, " +
+			"<[l4// lineb] [4// next] b: 6>",
+	}, {
+		"alt comments",
+		`// a ...
+		a: 5 // line a
+
+		// about a
+
+		// b ...
+		b: // lineb
+		  6
+
+		// about b
+
+		c: 7
+
+		// about c
+
+		`,
+		"<[d0// a ...] [l4// line a] [4// about a] a: 5>, " +
+			"<[d0// b ...] [l2// lineb] [4// about b] b: 6>, " +
+			"<[4// about c] c: 7>",
+	}, {
+		"expr comments",
+		`
+		a: 2 +  // 2 +
+		   3 +  // 3 +
+		   4    // 4
+		l1(     // sig
+		  ) ->  // arrow
+		   4    // expr
+		l2(a // la
+			) -> // arrow
+			a // l2
+		l3(
+			// param a
+			a : // la
+
+			// int
+			int // lint
+			 ) ->  // larrow
+			a + 1
+		   `,
+		"<[l4// 4] a: <[l2// 3 +] <[l2// 2 +] 2+3>+4>>, " +
+			"<[l4// expr] l1: <[l1// sig] [l4// arrow] () -> 4>>, " +
+			"<[l4// l2] l2: <[l4// arrow] (<[l1// la] a: _>,) -> a>>, " +
+			"l3: <[l4// larrow] (<[l1// la] [l3// lint] <[d0// param a] a>: <[d0// int] int>>,) -> a+1>",
+	}, {
+		"composit comments",
+		`a : {
+			a: 1, b: 2, c: 3, d: 4
+			// end
+		}
+		b: [
+			1, 2, 3, 4, 5,
+			// end
+		]
+		c: [ 1, 2, 3, 4, // here
+			5, 6, 7, 8 // and here
+		]
+		d: {
+			a: /* 8 */ 1 // Hello
+			// Doc
+			b: 2
+		}
+		e1: [
+			// comment in list body
+		]
+		e2: {
+			// comment in struct body
+		}
+		`,
+		"a: <[d2// end] {a: 1, b: 2, c: 3, d: 4}>, " +
+			"b: <[d2// end] [1, 2, 3, 4, 5]>, " +
+			"c: [1, 2, 3, <[l1// here] 4>, 5, 6, 7, <[l1// and here] 8>], " +
+			"d: {<[2/* 8 */] [l4// Hello] a: 1>, <[d0// Doc] b: 2>}, " +
+			"e1: <[d2// comment in list body] []>, " +
+			"e2: <[d1// comment in struct body] {}>",
+	}, {
+		"emit comments",
+		`// a comment at the beginning of the file
+
+		// a second comment
+
+		// comment
+		a: 5
+
+		{}
+
+		// a comment at the end of the file
+		`,
+		"<[0// a comment at the beginning of the file] [0// a second comment] <[d0// comment] a: 5>, <[2// a comment at the end of the file] {}>>",
+	}}
+	for _, tc := range testCases {
+		t.Run(tc.desc, func(t *testing.T) {
+			fset := token.NewFileSet()
+			mode := []Option{AllErrors, ParseLambdas}
+			if strings.Contains(tc.desc, "comments") {
+				mode = append(mode, ParseComments)
+			}
+			f, err := ParseFile(fset, "input", tc.in, mode...)
+			if err != nil {
+				t.Errorf("unexpected error: %v", err)
+			}
+			if got := debugStr(f); got != tc.out {
+				t.Errorf("\ngot  %q;\nwant %q", got, tc.out)
+			}
+		})
+	}
+}
+
+func TestParseExpr(t *testing.T) {
+	// just kicking the tires:
+	// a valid arithmetic expression
+	src := "a + b"
+	x, err := parseExprString(src)
+	if err != nil {
+		t.Errorf("ParseExpr(%q): %v", src, err)
+	}
+	// sanity check
+	if _, ok := x.(*ast.BinaryExpr); !ok {
+		t.Errorf("ParseExpr(%q): got %T, want *BinaryExpr", src, x)
+	}
+
+	// an invalid expression
+	src = "a + *"
+	if _, err := parseExprString(src); err == nil {
+		t.Errorf("ParseExpr(%q): got no error", src)
+	}
+
+	// a comma is not permitted unless automatically inserted
+	src = "a + b\n"
+	if _, err := parseExprString(src); err != nil {
+		t.Errorf("ParseExpr(%q): got error %s", src, err)
+	}
+	src = "a + b;"
+	if _, err := parseExprString(src); err == nil {
+		t.Errorf("ParseExpr(%q): got no error", src)
+	}
+
+	// various other stuff following a valid expression
+	const validExpr = "a + b"
+	const anything = "dh3*#D)#_"
+	for _, c := range "!)]};," {
+		src := validExpr + string(c) + anything
+		if _, err := parseExprString(src); err == nil {
+			t.Errorf("ParseExpr(%q): got no error", src)
+		}
+	}
+
+	// ParseExpr must not crash
+	for _, src := range valids {
+		parseExprString(src)
+	}
+}
+
+func TestImports(t *testing.T) {
+	var imports = map[string]bool{
+		`"a"`:        true,
+		`"a/b"`:      true,
+		`"a.b"`:      true,
+		`"m\x61th"`:  true,
+		`"greek/αβ"`: true,
+		`""`:         false,
+
+		// Each of these pairs tests both `` vs "" strings
+		// and also use of invalid characters spelled out as
+		// escape sequences and written directly.
+		// For example `"\x00"` tests import "\x00"
+		// while "`\x00`" tests import `<actual-NUL-byte>`.
+		"`a`":        true,
+		`"\x00"`:     false,
+		"`\x00`":     false,
+		`"\x7f"`:     false,
+		"`\x7f`":     false,
+		`"a!"`:       false,
+		"`a!`":       false,
+		`"a b"`:      false,
+		"`a b`":      false,
+		`"a\\b"`:     false,
+		"`a\\b`":     false,
+		"\"`a`\"":    false,
+		"`\"a\"`":    false,
+		`"\x80\x80"`: false,
+		"`\x80\x80`": false,
+		`"\xFFFD"`:   false,
+		"`\xFFFD`":   false,
+	}
+	for path, isValid := range imports {
+		t.Run(path, func(t *testing.T) {
+			src := fmt.Sprintf("package p, import %s", path)
+			_, err := ParseFile(token.NewFileSet(), "", src)
+			switch {
+			case err != nil && isValid:
+				t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
+			case err == nil && !isValid:
+				t.Errorf("ParseFile(%s): got no error; expected one", src)
+			}
+		})
+	}
+}
+
+func labelName(l ast.Label) string {
+	name, _ := ast.LabelName(l)
+	return name
+}
+
+func getField(file *ast.File, fieldname string) *ast.Field {
+	get := func(elts []ast.Decl, name string) *ast.Field {
+		for _, s := range elts {
+			if s, ok := s.(*ast.Field); ok && labelName(s.Label) == name {
+				return s
+			}
+		}
+		return nil
+	}
+	elts := file.Decls
+	var m *ast.Field
+	for _, p := range strings.Split(fieldname, ".") {
+		m = get(elts, p)
+		if v, ok := m.Value.(*ast.StructLit); ok {
+			elts = v.Elts
+		} else {
+			break
+		}
+	}
+	return m
+}
+
+// Don't use CommentGroup.Text() - we want to see exact comment text.
+func commentText(c *ast.CommentGroup) string {
+	var buf bytes.Buffer
+	if c != nil {
+		for _, c := range c.List {
+			buf.WriteString(c.Text)
+		}
+	}
+	return buf.String()
+}
+
+// TestIncompleteSelection ensures that an incomplete selector
+// expression is parsed as a (blank) *SelectorExpr, not a
+// *BadExpr.
+func TestIncompleteSelection(t *testing.T) {
+	for _, src := range []string{
+		"{ a: fmt. }",           // at end of object
+		"{ a: fmt.\n\"a\": x }", // not at end of struct
+	} {
+		t.Run("", func(t *testing.T) {
+			fset := token.NewFileSet()
+			f, err := ParseFile(fset, "", src)
+			if err == nil {
+				t.Fatalf("ParseFile(%s) succeeded unexpectedly", src)
+			}
+
+			const wantErr = "expected selector"
+			if !strings.Contains(err.Error(), wantErr) {
+				t.Errorf("ParseFile returned wrong error %q, want %q", err, wantErr)
+			}
+
+			var sel *ast.SelectorExpr
+			ast.Walk(f, func(n ast.Node) bool {
+				if n, ok := n.(*ast.SelectorExpr); ok {
+					sel = n
+				}
+				return true
+			}, nil)
+			if sel == nil {
+				t.Fatalf("found no *SelectorExpr: %#v %s", f.Decls[0], debugStr(f))
+			}
+			const wantSel = "&{{<nil>} fmt _}"
+			if fmt.Sprint(sel) != wantSel {
+				t.Fatalf("found selector %v, want %s", sel, wantSel)
+			}
+		})
+	}
+}
diff --git a/cue/parser/performance_test.go b/cue/parser/performance_test.go
new file mode 100644
index 0000000..f08b9d4
--- /dev/null
+++ b/cue/parser/performance_test.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"io/ioutil"
+	"testing"
+
+	"cuelang.org/go/cue/token"
+)
+
+var src = readFile("testdata/commas.src")
+
+func readFile(filename string) []byte {
+	data, err := ioutil.ReadFile(filename)
+	if err != nil {
+		panic(err)
+	}
+	return data
+}
+
+func BenchmarkParse(b *testing.B) {
+	b.SetBytes(int64(len(src)))
+	for i := 0; i < b.N; i++ {
+		if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments); err != nil {
+			b.Fatalf("benchmark failed due to parse error: %s", err)
+		}
+	}
+}
diff --git a/cue/parser/print.go b/cue/parser/print.go
new file mode 100644
index 0000000..e9ebd55
--- /dev/null
+++ b/cue/parser/print.go
@@ -0,0 +1,302 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+	"cuelang.org/go/internal"
+)
+
+func init() {
+	internal.DebugStr = debugStr
+}
+
+func debugStr(x interface{}) (out string) {
+	if n, ok := x.(ast.Node); ok {
+		comments := ""
+		for _, g := range n.Comments() {
+			comments += debugStr(g)
+		}
+		if comments != "" {
+			defer func() { out = "<" + comments + out + ">" }()
+		}
+	}
+	switch v := x.(type) {
+	case *ast.File:
+		out := ""
+		if v.Name != nil {
+			out += "package "
+			out += debugStr(v.Name)
+			out += ", "
+		}
+		out += debugStr(v.Decls)
+		return out
+
+	case *ast.Alias:
+		out := debugStr(v.Ident)
+		out += " = "
+		out += debugStr(v.Expr)
+		return out
+
+	case *ast.BottomLit:
+		return "_|_"
+
+	case *ast.BasicLit:
+		return v.Value
+
+	case *ast.Interpolation:
+		for _, e := range v.Elts {
+			out += debugStr(e)
+		}
+		return out
+
+	case *ast.EmitDecl:
+		// out := "<"
+		out += debugStr(v.Expr)
+		// out += ">"
+		return out
+
+	case *ast.ImportDecl:
+		out := "import "
+		if v.Lparen != token.NoPos {
+			out += "( "
+			out += debugStr(v.Specs)
+			out += " )"
+		} else {
+			out += debugStr(v.Specs)
+		}
+		return out
+
+	case *ast.ComprehensionDecl:
+		out := debugStr(v.Field)
+		out += " "
+		out += debugStr(v.Clauses)
+		return out
+
+	case *ast.StructLit:
+		out := "{"
+		out += debugStr(v.Elts)
+		out += "}"
+		return out
+
+	case *ast.ListLit:
+		out := "["
+		out += debugStr(v.Elts)
+		if v.Ellipsis != token.NoPos || v.Type != nil {
+			if out != "[" {
+				out += ", "
+			}
+			out += "..."
+			if v.Type != nil {
+				out += debugStr(v.Type)
+			}
+		}
+		out += "]"
+		return out
+
+	case *ast.ListComprehension:
+		out := "["
+		out += debugStr(v.Expr)
+		out += " "
+		out += debugStr(v.Clauses)
+		out += "]"
+		return out
+
+	case *ast.ForClause:
+		out := "for "
+		if v.Key != nil {
+			out += debugStr(v.Key)
+			out += ": "
+		}
+		out += debugStr(v.Value)
+		out += " in "
+		out += debugStr(v.Source)
+		return out
+
+	case *ast.IfClause:
+		out := "if "
+		out += debugStr(v.Condition)
+		return out
+
+	case *ast.Field:
+		out := debugStr(v.Label)
+		if v.Value != nil {
+			out += ": "
+			out += debugStr(v.Value)
+		}
+		return out
+
+	case *ast.LambdaExpr:
+		out := "("
+		for _, m := range v.Params {
+			out += debugStr(m)
+			out += ","
+		}
+		out += ") -> "
+		out += debugStr(v.Expr)
+		return out
+
+	case *ast.Ident:
+		return v.Name
+
+	case *ast.ExprLabel:
+		out := "["
+		out += debugStr(v.Label)
+		out += "]"
+		return out
+
+	case *ast.TemplateLabel:
+		out := "<"
+		out += debugStr(v.Ident)
+		out += ">"
+		return out
+
+	case *ast.SelectorExpr:
+		return debugStr(v.X) + "." + debugStr(v.Sel)
+
+	case *ast.CallExpr:
+		out := debugStr(v.Fun)
+		out += "("
+		out += debugStr(v.Args)
+		out += ")"
+		return out
+
+	case *ast.Ellipsis:
+		return debugStr(v.Elt) + "..."
+
+	case *ast.ParenExpr:
+		out := "("
+		out += debugStr(v.X)
+		out += ")"
+		return out
+
+	case *ast.UnaryExpr:
+		return v.Op.String() + debugStr(v.X)
+
+	case *ast.BinaryExpr:
+		out := debugStr(v.X)
+		op := v.Op.String()
+		if 'a' <= op[0] && op[0] <= 'z' {
+			op = fmt.Sprintf(" %s ", op)
+		}
+		out += op
+		out += debugStr(v.Y)
+		return out
+
+	case []*ast.CommentGroup:
+		var a []string
+		for _, c := range v {
+			a = append(a, debugStr(c))
+		}
+		return strings.Join(a, "\n")
+
+	case *ast.CommentGroup:
+		str := "["
+		if v.Doc {
+			str += "d"
+		}
+		if v.Line {
+			str += "l"
+		}
+		str += strconv.Itoa(int(v.Position))
+		var a = []string{}
+		for _, c := range v.List {
+			a = append(a, c.Text)
+		}
+		return str + strings.Join(a, " ") + "] "
+
+	case *ast.IndexExpr:
+		out := debugStr(v.X)
+		out += "["
+		out += debugStr(v.Index)
+		out += "]"
+		return out
+
+	case *ast.SliceExpr:
+		out := debugStr(v.X)
+		out += "["
+		out += debugStr(v.Low)
+		out += ":"
+		out += debugStr(v.High)
+		out += "]"
+		return out
+
+	case *ast.ImportSpec:
+		out := ""
+		if v.Name != nil {
+			out += debugStr(v.Name)
+			out += " "
+		}
+		out += debugStr(v.Path)
+		return out
+
+	case []ast.Decl:
+		if len(v) == 0 {
+			return ""
+		}
+		out := ""
+		for _, d := range v {
+			out += debugStr(d)
+			out += sep
+		}
+		return out[:len(out)-len(sep)]
+
+	case []ast.Clause:
+		if len(v) == 0 {
+			return ""
+		}
+		out := ""
+		for _, c := range v {
+			out += debugStr(c)
+			out += " "
+		}
+		return out
+
+	case []ast.Expr:
+		if len(v) == 0 {
+			return ""
+		}
+		out := ""
+		for _, d := range v {
+			out += debugStr(d)
+			out += sep
+		}
+		return out[:len(out)-len(sep)]
+
+	case []*ast.ImportSpec:
+		if len(v) == 0 {
+			return ""
+		}
+		out := ""
+		for _, d := range v {
+			out += debugStr(d)
+			out += sep
+		}
+		return out[:len(out)-len(sep)]
+
+	default:
+		if v == nil {
+			return ""
+		}
+		return fmt.Sprintf("<%T>", x)
+	}
+}
+
+const sep = ", "
diff --git a/cue/parser/resolve.go b/cue/parser/resolve.go
new file mode 100644
index 0000000..32eb7e5
--- /dev/null
+++ b/cue/parser/resolve.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements scopes and the objects they contain.
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+// resolve resolves all identifiers in a file. Unresolved identifiers are
+// recorded in Unresolved.
+func resolve(f *ast.File, errFn func(pos token.Pos, msg string)) {
+	walk(&scope{errFn: errFn}, f)
+}
+
+// A Scope maintains the set of named language entities declared
+// in the scope and a link to the immediately surrounding (outer)
+// scope.
+//
+type scope struct {
+	file  *ast.File
+	outer *scope
+	node  ast.Node
+	index map[string]ast.Node
+
+	errFn func(p token.Pos, msg string)
+}
+
+func newScope(f *ast.File, outer *scope, node ast.Node, decls []ast.Decl) *scope {
+	const n = 4 // initial scope capacity
+	s := &scope{
+		file:  f,
+		outer: outer,
+		node:  node,
+		index: make(map[string]ast.Node, n),
+		errFn: outer.errFn,
+	}
+	for _, d := range decls {
+		switch x := d.(type) {
+		case *ast.Field:
+			if name, ok := ast.LabelName(x.Label); ok {
+				s.insert(name, x.Value)
+			}
+		case *ast.Alias:
+			name := x.Ident.Name
+			s.insert(name, x)
+			// Handle imports
+		}
+	}
+	return s
+}
+
+func (s *scope) insert(name string, n ast.Node) {
+	if _, existing := s.lookup(name); existing != nil {
+		_, isAlias1 := n.(*ast.Alias)
+		_, isAlias2 := existing.(*ast.Alias)
+		if isAlias1 != isAlias2 {
+			s.errFn(n.Pos(), "cannot have alias and non-alias with the same name")
+			return
+		} else if isAlias1 || isAlias2 {
+			s.errFn(n.Pos(), "cannot have two aliases with the same name in the same scope")
+			return
+		}
+	}
+	s.index[name] = n
+}
+
+func (s *scope) lookup(name string) (obj, node ast.Node) {
+	last := s
+	for s != nil {
+		if n, ok := s.index[name]; ok {
+			if last.node == n {
+				return nil, n
+			}
+			return s.node, n
+		}
+		s, last = s.outer, s
+	}
+	return nil, nil
+}
+
+func (s *scope) After(n ast.Node) {}
+func (s *scope) Before(n ast.Node) (w visitor) {
+	switch x := n.(type) {
+	case *ast.File:
+		s := newScope(x, s, x, x.Decls)
+		// Support imports.
+		for _, d := range x.Decls {
+			walk(s, d)
+		}
+		return nil
+
+	case *ast.StructLit:
+		return newScope(s.file, s, x, x.Elts)
+
+	case *ast.ComprehensionDecl:
+		s = scopeClauses(s, x.Clauses)
+
+	case *ast.ListComprehension:
+		s = scopeClauses(s, x.Clauses)
+
+	case *ast.Field:
+		switch label := x.Label.(type) {
+		case *ast.Interpolation:
+			walk(s, label)
+		case *ast.ExprLabel:
+			walk(s, x.Label)
+		case *ast.TemplateLabel:
+			s := newScope(s.file, s, x, nil)
+			name, _ := ast.LabelName(label)
+			s.insert(name, x.Label) // Field used for entire lambda.
+			walk(s, x.Value)
+			return nil
+		}
+		// Disallow referring to the current LHS name (this applies recursively)
+		if x.Value != nil {
+			walk(s, x.Value)
+		}
+		return nil
+
+	case *ast.Alias:
+		// Disallow referring to the current LHS name.
+		name := x.Ident.Name
+		saved := s.index[name]
+		delete(s.index, name) // The same name may still appear in another scope
+
+		if x.Expr != nil {
+			walk(s, x.Expr)
+		}
+		s.index[name] = saved
+		return nil
+
+	case *ast.ImportSpec:
+		return nil
+
+	case *ast.SelectorExpr:
+		walk(s, x.X)
+		return nil
+
+	case *ast.LambdaExpr:
+		s = newScope(s.file, s, x, nil)
+		for _, p := range x.Params {
+			name, _ := ast.LabelName(p.Label)
+			s.insert(name, p)
+			if p.Value == nil {
+				// TODO: make this optional
+				p.Value = ast.NewIdent("_")
+				s.insert(name, p)
+			}
+		}
+
+	case *ast.Ident:
+		if obj, node := s.lookup(x.Name); node != nil {
+			x.Node = node
+			x.Scope = obj
+		} else {
+			s.file.Unresolved = append(s.file.Unresolved, x)
+		}
+		return nil
+	}
+	return s
+}
+
+func scopeClauses(s *scope, clauses []ast.Clause) *scope {
+	for _, c := range clauses {
+		if f, ok := c.(*ast.ForClause); ok { // TODO(let): support let clause
+			walk(s, f.Source)
+			s = newScope(s.file, s, f, nil)
+			if f.Key != nil {
+				s.insert(f.Key.Name, f.Key)
+			}
+			s.insert(f.Value.Name, f.Value)
+		} else {
+			walk(s, c)
+		}
+	}
+	return s
+}
+
+// Debugging support
+func (s *scope) String() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "scope %p {", s)
+	if s != nil && len(s.index) > 0 {
+		fmt.Fprintln(&buf)
+		for name := range s.index {
+			fmt.Fprintf(&buf, "\t%v\n", name)
+		}
+	}
+	fmt.Fprintf(&buf, "}\n")
+	return buf.String()
+}
diff --git a/cue/parser/short_test.go b/cue/parser/short_test.go
new file mode 100644
index 0000000..41b38da
--- /dev/null
+++ b/cue/parser/short_test.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains test cases for short valid and invalid programs.
+
+package parser
+
+import "testing"
+
+var valids = []string{
+	"\n",
+	`{}`,
+	`{ foo: "fmt", bar: () -> { baz: fmt.Service("Hello, World!") }, }`,
+	`{ <Name>: foo }`,
+	`{ a: 3 }`,
+}
+
+func TestValid(t *testing.T) {
+	for _, src := range valids {
+		t.Run(src, func(t *testing.T) {
+			checkErrors(t, src, src)
+		})
+	}
+}
+
+func TestInvalid(t *testing.T) {
+	invalids := []string{
+		`foo !/* ERROR "expected label or ':', found '!'" */`,
+		// `foo: /* ERROR "expected operand, found '}'" */}`, // TODO: wrong position
+		`{ <Name
+			/* ERROR "expected '>', found newline" */ >: foo }`,
+		// TODO:
+		// `{ </* ERROR "expected identifier, found newline" */
+		// 	Name>: foo }`,
+	}
+	for _, src := range invalids {
+		checkErrors(t, src, src)
+	}
+}
diff --git a/cue/parser/testdata/commas.src b/cue/parser/testdata/commas.src
new file mode 100644
index 0000000..b159a0d
--- /dev/null
+++ b/cue/parser/testdata/commas.src
@@ -0,0 +1,35 @@
+// Copyright 2018 The CUE Authors
+// 
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// 
+//     http://www.apache.org/licenses/LICENSE-2.0
+// 
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Test case for error messages/parser synchronization
+// after missing commas.
+package foo
+
+import "path/to/pkg"
+import name "path/to/pkg"
+import . "path/to/pkg"
+import      /* ERROR "expected 'STRING', found newline" */
+import err  /* ERROR "expected 'STRING', found newline" */
+
+foo: [
+	0 // legal JSON
+]
+
+bar: [
+	0,
+	1,
+	2,
+	3
+]
+
diff --git a/cue/parser/testdata/test.cue b/cue/parser/testdata/test.cue
new file mode 100644
index 0000000..cc048d5
--- /dev/null
+++ b/cue/parser/testdata/test.cue
@@ -0,0 +1,5 @@
+
+import "math"
+
+foo: 1
+bar: "baz"
diff --git a/cue/parser/walk.go b/cue/parser/walk.go
new file mode 100644
index 0000000..39ec801
--- /dev/null
+++ b/cue/parser/walk.go
@@ -0,0 +1,281 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+	"fmt"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/token"
+)
+
+// TODO: use ast.Walk or adopt that version to allow visitors.
+
+// A visitor's before method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.After.
+type visitor interface {
+	Before(node ast.Node) (w visitor)
+	After(node ast.Node)
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func walkIdentList(v visitor, list []*ast.Ident) {
+	for _, x := range list {
+		walk(v, x)
+	}
+}
+
+func walkExprList(v visitor, list []ast.Expr) {
+	for _, x := range list {
+		walk(v, x)
+	}
+}
+
+func walkDeclList(v visitor, list []ast.Decl) {
+	for _, x := range list {
+		walk(v, x)
+	}
+}
+
+// walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+func walk(v visitor, node ast.Node) {
+	if v = v.Before(node); v == nil {
+		return
+	}
+
+	// TODO: record the comment groups and interleave with the values like for
+	// parsing and printing?
+	for _, c := range node.Comments() {
+		walk(v, c)
+	}
+
+	// walk children
+	// (the order of the cases matches the order
+	// of the corresponding node types in go)
+	switch n := node.(type) {
+	// Comments and fields
+	case *ast.Comment:
+		// nothing to do
+
+	case *ast.CommentGroup:
+		for _, c := range n.List {
+			walk(v, c)
+		}
+
+	case *ast.Field:
+		walk(v, n.Label)
+		if n.Value != nil {
+			walk(v, n.Value)
+		}
+
+	case *ast.LambdaExpr:
+		for _, p := range n.Params {
+			walk(v, p)
+		}
+		walk(v, n.Expr)
+
+	case *ast.StructLit:
+		for _, f := range n.Elts {
+			walk(v, f)
+		}
+
+	// Expressions
+	case *ast.BottomLit, *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+		// nothing to do
+
+	case *ast.ExprLabel:
+		walk(v, n.Label)
+
+	case *ast.TemplateLabel:
+		walk(v, n.Ident)
+
+	case *ast.Interpolation:
+		for _, e := range n.Elts {
+			walk(v, e)
+		}
+
+	case *ast.Ellipsis:
+		if n.Elt != nil {
+			walk(v, n.Elt)
+		}
+
+	case *ast.ListLit:
+		walkExprList(v, n.Elts)
+		if n.Type != nil {
+			walk(v, n.Type)
+		}
+
+	case *ast.ParenExpr:
+		walk(v, n.X)
+
+	case *ast.SelectorExpr:
+		walk(v, n.X)
+		walk(v, n.Sel)
+
+	case *ast.IndexExpr:
+		walk(v, n.X)
+		walk(v, n.Index)
+
+	case *ast.SliceExpr:
+		walk(v, n.X)
+		if n.Low != nil {
+			walk(v, n.Low)
+		}
+		if n.High != nil {
+			walk(v, n.High)
+		}
+
+	case *ast.CallExpr:
+		walk(v, n.Fun)
+		walkExprList(v, n.Args)
+
+	case *ast.UnaryExpr:
+		walk(v, n.X)
+
+	case *ast.BinaryExpr:
+		walk(v, n.X)
+		walk(v, n.Y)
+
+	// Declarations
+	case *ast.ImportSpec:
+		if n.Name != nil {
+			walk(v, n.Name)
+		}
+		walk(v, n.Path)
+
+	case *ast.BadDecl:
+		// nothing to do
+
+	case *ast.ImportDecl:
+		for _, s := range n.Specs {
+			walk(v, s)
+		}
+
+	case *ast.EmitDecl:
+		walk(v, n.Expr)
+
+	case *ast.Alias:
+		walk(v, n.Ident)
+		walk(v, n.Expr)
+
+	case *ast.ComprehensionDecl:
+		walk(v, n.Field)
+		for _, c := range n.Clauses {
+			walk(v, c)
+		}
+
+	// Files and packages
+	case *ast.File:
+		if n.Name != nil {
+			walk(v, n.Name)
+		}
+		walkDeclList(v, n.Decls)
+		// don't walk n.Comments - they have been
+		// visited already through the individual
+		// nodes
+
+	case *ast.ListComprehension:
+		walk(v, n.Expr)
+		for _, c := range n.Clauses {
+			walk(v, c)
+		}
+
+	case *ast.ForClause:
+		if n.Key != nil {
+			walk(v, n.Key)
+		}
+		walk(v, n.Value)
+		walk(v, n.Source)
+
+	case *ast.IfClause:
+		walk(v, n.Condition)
+
+	default:
+		panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+	}
+
+	v.After(node)
+}
+
+type inspector struct {
+	before func(ast.Node) bool
+	after  func(ast.Node)
+
+	commentStack []commentFrame
+	current      commentFrame
+}
+
+type commentFrame struct {
+	cg  []*ast.CommentGroup
+	pos int8
+}
+
+func (f *inspector) Before(node ast.Node) visitor {
+	if f.before == nil || f.before(node) {
+		f.commentStack = append(f.commentStack, f.current)
+		f.current = commentFrame{cg: node.Comments()}
+		f.visitComments(f.current.pos)
+		return f
+	}
+	return nil
+}
+
+func (f *inspector) After(node ast.Node) {
+	f.visitComments(127)
+	p := len(f.commentStack) - 1
+	f.current = f.commentStack[p]
+	f.commentStack = f.commentStack[:p]
+	f.current.pos++
+	if f.after != nil {
+		f.after(node)
+	}
+}
+
+func (f *inspector) Token(t token.Token) {
+	f.current.pos++
+}
+
+func (f *inspector) setPos(i int8) {
+	f.current.pos = i
+}
+
+func (f *inspector) visitComments(pos int8) {
+	c := &f.current
+	for ; len(c.cg) > 0; c.cg = c.cg[1:] {
+		cg := c.cg[0]
+		if cg.Position == pos {
+			continue
+		}
+		if f.before == nil || f.before(cg) {
+			for _, c := range cg.List {
+				if f.before == nil || f.before(c) {
+					if f.after != nil {
+						f.after(c)
+					}
+				}
+			}
+			if f.after != nil {
+				f.after(cg)
+			}
+		}
+	}
+}
diff --git a/internal/internal.go b/internal/internal.go
new file mode 100644
index 0000000..23545ac
--- /dev/null
+++ b/internal/internal.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "cuelang.org/go/internal"
+
+// TODO: refactor packages as to make this package unnecessary.
+
+// DebugStr prints a syntax node.
+var DebugStr func(x interface{}) string