cue: remove lambda support
Lambdas are a common source of problem with
configuration languages due to their non-orthogonality
w.r.t. structs.
Structs can be used to simulate lambdas. A better way
to implement lambdas would be to have a per-struct
emit value. This generalizes the emit value, making it a
more foundational solution, while avoiding the issues
with lambda.
Change-Id: I2082da5b3c0387e25ecf5e525e2db610df8eef2c
diff --git a/cmd/cue/cmd/testdata/loaderr/loaderr.out b/cmd/cue/cmd/testdata/loaderr/loaderr.out
index 2921045..68b2374 100644
--- a/cmd/cue/cmd/testdata/loaderr/loaderr.out
+++ b/cmd/cue/cmd/testdata/loaderr/loaderr.out
@@ -1,3 +1,2 @@
-cannot find package "non-existing":
-
+cannot find package "non-existing"
terminating because of errors
diff --git a/cue/ast.go b/cue/ast.go
index dcb8d1c..2591bbd 100644
--- a/cue/ast.go
+++ b/cue/ast.go
@@ -291,23 +291,6 @@
case *ast.Alias:
// parsed verbatim at reference.
- case *ast.LambdaExpr:
- sig := ¶ms{}
- lambda := &lambdaExpr{newExpr(n), sig, nil}
- v.setScope(n, lambda)
-
- for _, p := range n.Params {
- f, _ := v.nodeLabel(p.Label)
- if p.Value != nil {
- sig.add(f, v.walk(p.Value))
- } else {
- src := &ast.Ident{NamePos: p.Pos(), Name: "_"}
- sig.add(f, &top{baseValue: newExpr(src)})
- }
- }
- lambda.value = v.walk(n.Expr)
- return lambda
-
case *ast.ListComprehension:
yielder := &yield{baseValue: newExpr(n.Expr)}
lc := &listComprehension{
diff --git a/cue/ast/ast.go b/cue/ast/ast.go
index 64ac5fb..91c9dcb 100644
--- a/cue/ast/ast.go
+++ b/cue/ast/ast.go
@@ -62,7 +62,6 @@
func (*Interpolation) exprNode() {}
func (*StructLit) exprNode() {}
func (*ListLit) exprNode() {}
-func (*LambdaExpr) exprNode() {}
// func (*StructComprehension) exprNode() {}
func (*ListComprehension) exprNode() {}
@@ -271,8 +270,7 @@
comments
Label Label // must have at least one element.
- // No colon: Value must be an StructLit with one field or a
- // LambdaExpr.
+ // No colon: Value must be an StructLit with one field.
Colon token.Pos
Value Expr // the value associated with this field.
}
@@ -313,24 +311,6 @@
// An expression is represented by a tree consisting of one
// or more of the following concrete expression nodes.
-// A LambdaExpr defines a function expression.
-//
-// Lambdas are only used internally under controlled conditions. Although
-// the implementation of lambdas is fully functional, enabling them will
-// cause the language to be Turing-complete (if not otherwise limited).
-// Also, lambdas would provide yet another way to create structure, and one
-// that is known to not work well for declarative configuration languages.
-type LambdaExpr struct {
- comments
- Lparen token.Pos // position of "("
- Params []*Field // parameters with possible initializers
- Rparen token.Pos // position of ")"
- Expr Expr
-}
-
-func (t *LambdaExpr) Pos() token.Pos { return t.Lparen }
-func (t *LambdaExpr) End() token.Pos { return t.Rparen }
-
// A BadExpr node is a placeholder for expressions containing
// syntax errors for which no correct expression nodes can be
// created. This is different from an ErrorExpr which represents
diff --git a/cue/ast/walk.go b/cue/ast/walk.go
index b505d75..9869e5d 100644
--- a/cue/ast/walk.go
+++ b/cue/ast/walk.go
@@ -93,12 +93,6 @@
walk(v, n.Value)
}
- case *LambdaExpr:
- for _, p := range n.Params {
- walk(v, p)
- }
- walk(v, n.Expr)
-
case *StructLit:
for _, f := range n.Elts {
walk(v, f)
diff --git a/cue/format/format.go b/cue/format/format.go
index e581df5..7127328 100644
--- a/cue/format/format.go
+++ b/cue/format/format.go
@@ -85,8 +85,7 @@
cfg.fset = token.NewFileSet()
}
- f, err := parser.ParseFile(cfg.fset, "", b,
- parser.ParseComments, parser.ParseLambdas)
+ f, err := parser.ParseFile(cfg.fset, "", b, parser.ParseComments)
if err != nil {
return nil, fmt.Errorf("parse: %s", err)
}
diff --git a/cue/format/format_test.go b/cue/format/format_test.go
index c77130f..5b4f729 100644
--- a/cue/format/format_test.go
+++ b/cue/format/format_test.go
@@ -71,8 +71,7 @@
}
// make sure formatted output is syntactically correct
- if _, err := parser.ParseFile(fset, "", res,
- parser.ParseLambdas, parser.AllErrors); err != nil {
+ if _, err := parser.ParseFile(fset, "", res, parser.AllErrors); err != nil {
return nil, fmt.Errorf("re-parse: %s\n%s", err, res)
}
@@ -307,7 +306,7 @@
// parse pretty printed original
// (//line comments must be interpreted even w/o syntax.ParseComments set)
f2, err := parser.ParseFile(fset, "", buf.Bytes(),
- parser.AllErrors, parser.ParseLambdas, parser.ParseComments)
+ parser.AllErrors, parser.ParseComments)
if err != nil {
t.Fatalf("%s\n%s", err, buf.Bytes())
}
diff --git a/cue/format/node.go b/cue/format/node.go
index 537e3e9..a51ad57 100644
--- a/cue/format/node.go
+++ b/cue/format/node.go
@@ -425,26 +425,6 @@
f.walkClauseList(x.Clauses)
f.print(unindent, f.wsOverride(blank), x.Rbrack, token.RBRACK)
- case *ast.LambdaExpr:
- f.print(x.Lparen, token.LPAREN, indent, noblank)
-
- f.before(nil)
- for _, x := range x.Params {
- f.label(x.Label)
- if x.Colon.IsValid() {
- f.print(x.Colon, token.COLON, blank)
- f.expr(x.Value)
- }
- f.print(comma, blank)
- }
- f.print(trailcomma, noblank)
- f.after(nil)
-
- f.print(trailcomma, noblank, unindent)
- f.print(x.Rparen, token.RPAREN, blank)
- f.print(token.LAMBDA, blank)
- f.expr(x.Expr)
-
default:
panic(fmt.Sprintf("unimplemented type %T", x))
}
diff --git a/cue/format/testdata/expressions.golden b/cue/format/testdata/expressions.golden
index 2300559..ed5f3bf 100644
--- a/cue/format/testdata/expressions.golden
+++ b/cue/format/testdata/expressions.golden
@@ -143,19 +143,6 @@
4,
]
- e: (a, b) -> a + b
- e: (a,
- b) -> a + b
- e: (a, b) -> {
- a: b
- }
- e: (a,
- b,
- ) ->
- {
- a: b
- }
-
e: e.f(1, 2)
e: (3 + 4)
diff --git a/cue/format/testdata/expressions.input b/cue/format/testdata/expressions.input
index d330b86..60f513a 100644
--- a/cue/format/testdata/expressions.input
+++ b/cue/format/testdata/expressions.input
@@ -143,19 +143,6 @@
4,
]
- e: (a, b) -> a + b
- e: (a,
- b) -> a + b
- e: (a, b) -> {
- a: b
- }
- e: (a,
- b
- ) ->
- {
- a: b
- }
-
e: e.f(1, 2)
e: ((3 + 4))
diff --git a/cue/parser/error_test.go b/cue/parser/error_test.go
index 376d343..5bfff87 100644
--- a/cue/parser/error_test.go
+++ b/cue/parser/error_test.go
@@ -168,7 +168,7 @@
}
fset := token.NewFileSet()
- _, err = ParseFile(fset, filename, src, DeclarationErrors, AllErrors, ParseLambdas)
+ _, err = ParseFile(fset, filename, src, DeclarationErrors, AllErrors)
found, ok := err.(errors.List)
if err != nil && !ok {
t.Error(err)
diff --git a/cue/parser/interface.go b/cue/parser/interface.go
index 3c62eb6..a66fcbf 100644
--- a/cue/parser/interface.go
+++ b/cue/parser/interface.go
@@ -76,15 +76,6 @@
p.mode |= parseCommentsMode
}
- // ParseLambdas enables parsing of Lambdas. By default these are disabled.
- //
- // NOTE: this option is for internal use only and can be made unavailable at
- // any time.
- ParseLambdas Option = parseLambdas
- parseLambdas = func(p *parser) {
- p.mode |= parseLambdasMode
- }
-
// Trace causes parsing to print a trace of parsed productions.
Trace Option = traceOpt
traceOpt = func(p *parser) {
@@ -119,7 +110,6 @@
packageClauseOnlyMode mode = 1 << iota // stop parsing after package clause
importsOnlyMode // stop parsing after import declarations
parseCommentsMode // parse comments and add them to AST
- parseLambdasMode
partialMode
traceMode // print a trace of parsed productions
declarationErrorsMode // report declaration errors
diff --git a/cue/parser/parser.go b/cue/parser/parser.go
index 52a7fcb..f6ed64b 100644
--- a/cue/parser/parser.go
+++ b/cue/parser/parser.go
@@ -542,40 +542,12 @@
defer func() { c.closeNode(p, expr) }()
lparen := p.pos
p.next()
- if p.tok == token.RPAREN && p.mode&parseLambdasMode != 0 {
- c.pos = 2
- rparen := p.expect(token.RPAREN)
- p.expect(token.LAMBDA)
- return &ast.LambdaExpr{
- Lparen: lparen,
- Rparen: rparen,
- Expr: p.parseRHS(),
- }
- }
p.exprLev++
p.openList()
x := p.parseRHS() // types may be parenthesized: (some type)
- var params []*ast.Field
- ident, ok := x.(*ast.Ident)
- if ok && (p.tok == token.COLON || p.tok == token.COMMA) && p.mode&parseLambdasMode != 0 {
- params = p.parseParams(ident, token.RPAREN)
- }
p.closeList()
p.exprLev--
rparen := p.expect(token.RPAREN)
- if p.tok == token.LAMBDA || params != nil && p.mode&parseLambdasMode != 0 {
- p.expect(token.LAMBDA)
- if params == nil {
- m := &ast.Field{Label: ident}
- params = append(params, m)
- }
- return &ast.LambdaExpr{
- Lparen: lparen,
- Params: params,
- Rparen: rparen,
- Expr: p.parseRHS(),
- }
- }
return &ast.ParenExpr{
Lparen: lparen,
X: x,
@@ -758,21 +730,6 @@
p.next()
}
return &ast.Alias{Ident: ident, Equal: pos, Expr: ref}
-
- case token.LPAREN:
- var value ast.Expr
- if p.mode&parseLambdasMode != 0 {
- c.pos = 2
- // TODO: Only allow LambdaExpr after non-quoted identifier.
- value = p.parseOperand()
- if _, ok := unparen(value).(*ast.LambdaExpr); !ok {
- p.error(value.Pos(), "expected lambda expression")
- }
- }
- if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
- p.next()
- }
- return &ast.Field{Label: ident, Value: value}
}
}
@@ -1078,7 +1035,6 @@
case *ast.Interpolation:
case *ast.StructLit:
case *ast.ListLit:
- case *ast.LambdaExpr:
case *ast.ListComprehension:
case *ast.ParenExpr:
panic("unreachable")
diff --git a/cue/parser/parser_test.go b/cue/parser/parser_test.go
index ab4859c..e0c5a69 100644
--- a/cue/parser/parser_test.go
+++ b/cue/parser/parser_test.go
@@ -161,13 +161,6 @@
}`,
`{a: b[2], b: c[1:2], c: "asdf", d: c["a"]}`,
}, {
- "lambdas",
- `{
- a(P, Q, r: R) -> { p: P, q: Q }
- b: a(4002, "s")
- }`,
- `{a: (P: _,Q: _,r: R,) -> {p: P, q: Q}, b: a(4002, "s")}`, // c(C): {d(D): {}}}`,
- }, {
"calls",
`{
a: b(a.b, c.d)
@@ -292,25 +285,8 @@
a: 2 + // 2 +
3 + // 3 +
4 // 4
- l1( // sig
- ) -> // arrow
- 4 // expr
- l2(a // la
- ) -> // arrow
- a // l2
- l3(
- // param a
- a : // la
-
- // int
- int // lint
- ) -> // larrow
- a + 1
`,
- "<[l4// 4] a: <[l2// 3 +] <[l2// 2 +] 2+3>+4>>, " +
- "<[l4// expr] l1: <[l1// sig] [l4// arrow] () -> 4>>, " +
- "<[l4// l2] l2: <[l4// arrow] (<[l1// la] a: _>,) -> a>>, " +
- "l3: <[l4// larrow] (<[l1// la] [l3// lint] <[d0// param a] a>: <[d0// int] int>>,) -> a+1>",
+ "<[l4// 4] a: <[l2// 3 +] <[l2// 2 +] 2+3>+4>>",
}, {
"composit comments",
`a : {
@@ -360,7 +336,7 @@
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
fset := token.NewFileSet()
- mode := []Option{AllErrors, ParseLambdas}
+ mode := []Option{AllErrors}
if strings.Contains(tc.desc, "comments") {
mode = append(mode, ParseComments)
}
diff --git a/cue/parser/print.go b/cue/parser/print.go
index b02efcc..bf9ca8d 100644
--- a/cue/parser/print.go
+++ b/cue/parser/print.go
@@ -143,16 +143,6 @@
}
return out
- case *ast.LambdaExpr:
- out := "("
- for _, m := range v.Params {
- out += debugStr(m)
- out += ","
- }
- out += ") -> "
- out += debugStr(v.Expr)
- return out
-
case *ast.Ident:
return v.Name
diff --git a/cue/parser/resolve.go b/cue/parser/resolve.go
index 1ce01b2..98195d9 100644
--- a/cue/parser/resolve.go
+++ b/cue/parser/resolve.go
@@ -152,18 +152,6 @@
walk(s, x.X)
return nil
- case *ast.LambdaExpr:
- s = newScope(s.file, s, x, nil)
- for _, p := range x.Params {
- name, _ := ast.LabelName(p.Label)
- s.insert(name, p)
- if p.Value == nil {
- // TODO: make this optional
- p.Value = ast.NewIdent("_")
- s.insert(name, p)
- }
- }
-
case *ast.Ident:
if obj, node := s.lookup(x.Name); node != nil {
x.Node = node
diff --git a/cue/parser/short_test.go b/cue/parser/short_test.go
index 41b38da..6916135 100644
--- a/cue/parser/short_test.go
+++ b/cue/parser/short_test.go
@@ -21,7 +21,6 @@
var valids = []string{
"\n",
`{}`,
- `{ foo: "fmt", bar: () -> { baz: fmt.Service("Hello, World!") }, }`,
`{ <Name>: foo }`,
`{ a: 3 }`,
}
diff --git a/cue/parser/walk.go b/cue/parser/walk.go
index 1765ee8..29adad7 100644
--- a/cue/parser/walk.go
+++ b/cue/parser/walk.go
@@ -87,12 +87,6 @@
walk(v, n.Value)
}
- case *ast.LambdaExpr:
- for _, p := range n.Params {
- walk(v, p)
- }
- walk(v, n.Expr)
-
case *ast.StructLit:
for _, f := range n.Elts {
walk(v, f)
diff --git a/cue/resolve_test.go b/cue/resolve_test.go
index 6f9f12d..4888b8f 100644
--- a/cue/resolve_test.go
+++ b/cue/resolve_test.go
@@ -46,7 +46,7 @@
fset := token.NewFileSet()
x := newIndex(fset).NewInstance(nil)
- f, err := parser.ParseFile(fset, "test", body, parser.ParseLambdas)
+ f, err := parser.ParseFile(fset, "test", body)
ctx := x.newContext()
switch errs := err.(type) {
@@ -283,16 +283,6 @@
`,
out: `<0>{o1: (1 | 2 | 3), o2: 1, o3: 2, o4: (1 | 2 | 3), o5: (1! | 2! | 3!), o6: (1! | 2! | 3!), o7: (2 | 3), o8: (2! | 3!), o9: (2 | 3), o10: (3! | 2!), i1: "c"}`,
}, {
- desc: "lambda",
- in: `
- o1(A:1, B:2) -> { a: A, b: B }
- oe() -> { a: 1, b: 2 }
- l1: (A:1, B:2) -> { a: A, b: B }
- c1: ((A:int, B:int) -> {a:A, b:B})(1, 2)
- `,
- // TODO(P1): don't let values refer to themselves.
- out: "<0>{o1: <1>(A: 1, B: 2)-><2>{a: <1>.A, b: <1>.B}, oe: <3>()-><4>{a: 1, b: 2}, l1: <5>(A: 1, B: 2)-><6>{a: <5>.A, b: <5>.B}, c1: <7>{a: 1, b: 2}}",
- }, {
desc: "types",
in: `
i: int
@@ -449,26 +439,6 @@
// `,
// out: ``,
}, {
- desc: "call",
- in: `
- a: { a: (P, Q) -> {p:P, q:Q} }
- b: a // reference different nodes
- c: a.a(1, 2)
- `,
- out: "<0>{a: <1>{a: <2>(P: _, Q: _)-><3>{p: <2>.P, q: <2>.Q}}, b: <4>{a: <2>(P: _, Q: _)-><3>{p: <2>.P, q: <2>.Q}}, c: <5>{p: 1, q: 2}}",
- }, {
- desc: "call of lambda",
- in: `
- a(P, Q) -> {p:P, q:Q}
- a(P, Q) -> {p:P, q:Q}
- ai(P, Q) -> {p:Q, q:P}
- b: a(1,2)
- c: (a | b)(1)
- d: ([] | (a) -> 3)(2)
- e1: a(1)
- `,
- out: "<1>{a: <2>(P: _, Q: _)->(<3>{p: <2>.P, q: <2>.Q} & <4>{p: <2>.P, q: <2>.Q}), ai: <5>(P: _, Q: _)-><6>{p: <5>.Q, q: <5>.P}, b: <7>{p: 1, q: 2}, c: _|_((<8>.a | <8>.b) (1):number of arguments does not match (2 vs 1)), d: _|_(([] | <0>(a: _)->3):cannot call non-function [] (type list)), e1: _|_(<8>.a (1):number of arguments does not match (2 vs 1))}",
- }, {
desc: "reference across tuples and back",
// Tests that it is okay to partially evaluate structs.
in: `
diff --git a/cue/scanner/scanner.go b/cue/scanner/scanner.go
index fbace9d..89493e5 100644
--- a/cue/scanner/scanner.go
+++ b/cue/scanner/scanner.go
@@ -751,12 +751,7 @@
case '+':
tok = token.ADD // Consider ++ for list concatenate.
case '-':
- if s.ch == '>' {
- s.next()
- tok = token.LAMBDA
- } else {
- tok = token.SUB
- }
+ tok = token.SUB
case '*':
tok = token.MUL
case '/':
diff --git a/cue/scanner/scanner_test.go b/cue/scanner/scanner_test.go
index d793e94..2faf296 100644
--- a/cue/scanner/scanner_test.go
+++ b/cue/scanner/scanner_test.go
@@ -123,7 +123,6 @@
{token.LAND, "&&", operator},
{token.LOR, "||", operator},
- {token.LAMBDA, "->", operator},
{token.EQL, "==", operator},
{token.LSS, "<", operator},
diff --git a/cue/subsume_test.go b/cue/subsume_test.go
index 33ceb3d..b0d9382 100644
--- a/cue/subsume_test.go
+++ b/cue/subsume_test.go
@@ -137,8 +137,7 @@
65: {subsumes: true, in: `a: {}, b: {a: 1}`},
66: {subsumes: true, in: `a: {a:1}, b: {a:1, b:1}`},
67: {subsumes: true, in: `a: {s: { a:1} }, b: { s: { a:1, b:2 }}`},
- // TODO: allow subsumption of unevaluated values?
- 68: {subsumes: true, in: `a: {}, b: c(), c: () -> {}`},
+ 68: {subsumes: true, in: `a: {}, b: {}`},
// TODO: allow subsumption of unevaluated values?
// ref not yet evaluated and not structurally equivalent
69: {subsumes: true, in: `a: {}, b: {} & c, c: {}`},
@@ -147,21 +146,6 @@
71: {subsumes: false, in: `a: {a:1, b:1}, b: {a:1}`},
72: {subsumes: false, in: `a: {s: { a:1} }, b: { s: {}}`},
- // Lambda
- 73: {subsumes: true, in: `a: (x: _) -> {}, b: (x: _) -> {}`},
- 74: {subsumes: true, in: `a: (x: int) -> {}, b: (x: int) -> {}`},
- 75: {subsumes: true, in: `a: (x: {}) -> {}, b: (x: {}) -> {}`},
- 76: {subsumes: false, in: `a: (x: _) -> {}, b: (x: _, y:_) -> {}`},
- 77: {subsumes: true, in: `a: (x: _) -> {}, b: (x: 1) -> { a: 1 }`},
- 78: {subsumes: false, in: `a: (x: 1) -> {}, b: (x: _) -> {}`},
- 79: {subsumes: false, in: `a: (x: _) -> {}, b: () -> {}`},
-
- 80: {subsumes: true, in: `a: (x: _) -> {}, b: (y: _) -> {}`},
- 81: {subsumes: true, in: `a: (x) -> {}, b: (y) -> {}`},
-
- 82: {subsumes: true, in: `a: (x: {a:1}) -> {f:2}, b: (x: {a:1, b:1}) -> {f:2, g:3}`},
- 83: {subsumes: false, in: `a: (x: {a:1, b:1}) -> {f:2}, b: (x: {a:1}) -> {f:2, g:3}`},
-
// Disjunction TODO: for now these two are false: unifying may result in
// an ambiguity that we are currently not handling, so safer to not
// unify.
@@ -203,71 +187,29 @@
// true because both evaluate to bool
103: {subsumes: true, in: `a: !bool, b: bool`},
- 104: {subsumes: true, in: `
- a: () -> 2
- b: () -> 2`},
- 105: {subsumes: true, in: `
- a: () -> number
- b: () -> 2`},
- 106: {subsumes: true, in: `
- a: (a: number) -> 2
- b: (a: number) -> 2`},
- 107: {subsumes: true, in: `
- a: (a: number) -> 2
- b: (a: 2) -> 2`},
- 108: {subsumes: false, in: `
- a: (a: 2) -> 2
- b: (a: 2, b: 2) -> 2`},
- 109: {subsumes: false, in: `
- a: (a: number) -> 2,
- b: (a: number, b: number) -> 2`},
- 110: {subsumes: false, in: `
- a: () -> 3
- b: () -> number`},
- 111: {subsumes: false, in: `
- a: (a: 3) -> 2
- b: (a: number) -> 2`},
- 112: {subsumes: false, in: `
- a: (a: 3, b: 3) -> 2
- b: (a: 3) -> 2`},
-
// Call
113: {subsumes: true, in: `
- a: (() -> 2)(),
- b: (() -> 2)()`,
+ a: fn(),
+ b: fn()`,
},
// TODO: allow subsumption of unevaluated values?
114: {subsumes: true, in: `
- a: (() -> 2)(),
- b: ((a) -> 2)(1)`,
+ a: len(),
+ b: len(1)`,
},
115: {subsumes: true, in: `
- a: ((a: number) -> [2])(2)
- b: ((a: number) -> [2])(2)`,
+ a: fn(2)
+ b: fn(2)`,
},
// TODO: allow subsumption of unevaluated values?
116: {subsumes: true, in: `
- a: ((a: number) -> [2])(number)
- b: ((a: number) -> [2])(2)`,
+ a: fn(number)
+ b: fn(2)`,
},
// TODO: allow subsumption of unevaluated values?
117: {subsumes: true, in: `
- a: ((a: number) -> [2])(2)
- b: ((a: number) -> [2])(number)`,
- },
- 118: {subsumes: true, in: `
- a: ((a) -> number)(2)
- b: ((a) -> 2)(2)`,
- },
- 119: {subsumes: false, in: `
- a: ((a) -> 2)(2)
- b: ((a) -> number)(2)`,
- },
- // purely structural:
- // TODO: allow subsumption of unevaluated values?
- 120: {subsumes: true, in: `
- a: ((a) -> int)(2)
- b: int`,
+ a: fn(2)
+ b: fn(number)`,
},
// TODO: allow subsumption of unevaluated values?
@@ -319,6 +261,9 @@
re := regexp.MustCompile(`a: (.*).*b: ([^\n]*)`)
for i, tc := range testCases {
+ if tc.in == "" {
+ continue
+ }
m := re.FindStringSubmatch(strings.Join(strings.Split(tc.in, "\n"), ""))
const cutset = "\n ,"
key := strings.Trim(m[1], cutset) + " ⊑ " + strings.Trim(m[2], cutset)
diff --git a/cue/token/token.go b/cue/token/token.go
index ef50c34..edc9050 100644
--- a/cue/token/token.go
+++ b/cue/token/token.go
@@ -61,13 +61,12 @@
LAND // &&
LOR // ||
- BIND // =
- EQL // ==
- LSS // <
- GTR // >
- NOT // !
- ARROW // <-
- LAMBDA // ->
+ BIND // =
+ EQL // ==
+ LSS // <
+ GTR // >
+ NOT // !
+ ARROW // <-
NEQ // !=
LEQ // <=
@@ -133,13 +132,12 @@
LAND: "&&",
LOR: "||",
- BIND: "=",
- EQL: "==",
- LSS: "<",
- GTR: ">",
- NOT: "!",
- ARROW: "<-",
- LAMBDA: "->",
+ BIND: "=",
+ EQL: "==",
+ LSS: "<",
+ GTR: ">",
+ NOT: "!",
+ ARROW: "<-",
NEQ: "!=",
LEQ: "<=",
diff --git a/cue/types_test.go b/cue/types_test.go
index dfb77c0..7eeae0b 100644
--- a/cue/types_test.go
+++ b/cue/types_test.go
@@ -964,9 +964,6 @@
a: {}`,
err: "undefined field",
}, {
- value: `((a)->a)`,
- err: "cannot convert value",
- }, {
value: `true`,
json: `true`,
}, {