internal/core/eval: implement core evaluator

Does not yet implement imports and builtins.

- adds implementations for adt types
- adds eval package with higher-level evaluation
- some tweaks to compile

Change-Id: Ie91bd0bde8a03ed9957f306166042f56aebe19ce
Reviewed-on: https://cue-review.googlesource.com/c/cue/+/6280
Reviewed-by: Marcel van Lohuizen <mpvl@golang.org>
diff --git a/cue/testdata/basicrewrite/000_errors.txtar b/cue/testdata/basicrewrite/000_errors.txtar
index 353eb21..251380a 100644
--- a/cue/testdata/basicrewrite/000_errors.txtar
+++ b/cue/testdata/basicrewrite/000_errors.txtar
@@ -19,9 +19,24 @@
 -- out/compile --
 --- in.cue
 {
-  a: (_|_ & _|_)
-  b: (null & _|_)
-  c: (〈0;b〉.a == _|_)
-  d: (_|_ != 〈0;b〉.a)
-  e: (_|_ == _|_)
+  a: (_|_(from source) & _|_(from source))
+  b: (null & _|_(from source))
+  c: (〈0;b〉.a == _|_(from source))
+  d: (_|_(from source) != 〈0;b〉.a)
+  e: (_|_(from source) == _|_(from source))
+}
+-- out/eval --
+(_|_){
+  // [user]
+  a: (_|_){
+    // [user] from source:
+    //     ./in.cue:1:4
+  }
+  b: (_|_){
+    // [user] from source:
+    //     ./in.cue:2:11
+  }
+  c: (bool){ true }
+  d: (bool){ false }
+  e: (bool){ true }
 }
diff --git a/cue/testdata/basicrewrite/001_regexp.txtar b/cue/testdata/basicrewrite/001_regexp.txtar
index 3ae91fd..86167aa 100644
--- a/cue/testdata/basicrewrite/001_regexp.txtar
+++ b/cue/testdata/basicrewrite/001_regexp.txtar
@@ -60,3 +60,30 @@
   e2: ("foo" !~ true)
   e3: (!="a" & <5)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  c1: (bool){ true }
+  c2: (bool){ true }
+  c3: (bool){ false }
+  c4: (bool){ true }
+  b1: (string){ "a" }
+  b2: (string){ "foo" }
+  b3: (_|_){
+    // [eval] invalid value *adt.Vertex (out of bound *adt.BoundValue)
+  }
+  b4: (string){ "foo" }
+  s1: (string){ &(!="b", =~"c") }
+  s2: (string){ &(!="b", =~"[a-z]") }
+  e1: (_|_){
+    // [eval] cannot use *adt.Num (type int) as type (string|bytes):
+    //     ./in.cue:18:5
+  }
+  e2: (_|_){
+    // [eval] cannot use *adt.Bool (type bool) as type (string|bytes):
+    //     ./in.cue:19:5
+  }
+  e3: (_|_){
+    // [eval] invalid value *adt.BoundValue (mismatched types number and string)
+  }
+}
diff --git a/cue/testdata/basicrewrite/002_arithmetic.txtar b/cue/testdata/basicrewrite/002_arithmetic.txtar
index c6ad611..ca98463 100644
--- a/cue/testdata/basicrewrite/002_arithmetic.txtar
+++ b/cue/testdata/basicrewrite/002_arithmetic.txtar
@@ -93,3 +93,63 @@
   e7: (2 quo 2.0)
   e8: (1.0 mod 1)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  i1: (int){ 1 }
+  i2: (int){ 2 }
+  sum: (int){ 1 }
+  div1: (float){ 4.00000000000000000000000 }
+  div2: (float){ 4.00000000000000000000000 }
+  div3: (float){ 1 }
+  divZero: (_|_){
+    // [eval] failed arithmetic: division by zero:
+    //     ./in.cue:8:10
+  }
+  div00: (_|_){
+    // [eval] failed arithmetic: division undefined:
+    //     ./in.cue:9:10
+  }
+  b: (bool){ true }
+  add: (float){ 5.00000000000000000000000 }
+  idiv00: (_|_){
+    // [eval] division by zero:
+    //     ./in.cue:13:9
+  }
+  imod00: (_|_){
+    // [eval] division by zero:
+    //     ./in.cue:14:9
+  }
+  iquo00: (_|_){
+    // [eval] division by zero:
+    //     ./in.cue:15:9
+  }
+  irem00: (_|_){
+    // [eval] division by zero:
+    //     ./in.cue:16:9
+  }
+  v1: (float){ 5.0000000000E+11 }
+  v2: (bool){ true }
+  v3: (float){ 0.666666666666666666666667 }
+  v5: (int){ 0 }
+  e0: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.String to '+' (type int and string):
+    //     ./in.cue:23:5
+  }
+  e5: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'div' (type float and int):
+    //     ./in.cue:29:5
+  }
+  e6: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'rem' (type int and float):
+    //     ./in.cue:30:5
+  }
+  e7: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'quo' (type int and float):
+    //     ./in.cue:31:5
+  }
+  e8: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'mod' (type float and int):
+    //     ./in.cue:32:5
+  }
+}
diff --git a/cue/testdata/basicrewrite/003_integer-specific_arithmetic.txtar b/cue/testdata/basicrewrite/003_integer-specific_arithmetic.txtar
index 02c94b1..046f66e 100644
--- a/cue/testdata/basicrewrite/003_integer-specific_arithmetic.txtar
+++ b/cue/testdata/basicrewrite/003_integer-specific_arithmetic.txtar
@@ -85,3 +85,55 @@
   me1: (2.0 mod 1)
   me2: (2 mod 1.0)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  q1: (int){ 2 }
+  q2: (int){ -2 }
+  q3: (int){ -2 }
+  q4: (int){ 2 }
+  qe1: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'quo' (type float and int):
+    //     ./in.cue:5:6
+  }
+  qe2: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'quo' (type int and float):
+    //     ./in.cue:6:6
+  }
+  r1: (int){ 1 }
+  r2: (int){ 1 }
+  r3: (int){ -1 }
+  r4: (int){ -1 }
+  re1: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'rem' (type float and int):
+    //     ./in.cue:12:6
+  }
+  re2: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'rem' (type int and float):
+    //     ./in.cue:13:6
+  }
+  d1: (int){ 2 }
+  d2: (int){ -2 }
+  d3: (int){ -3 }
+  d4: (int){ 3 }
+  de1: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'div' (type float and int):
+    //     ./in.cue:19:6
+  }
+  de2: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'div' (type int and float):
+    //     ./in.cue:20:6
+  }
+  m1: (int){ 1 }
+  m2: (int){ 1 }
+  m3: (int){ 1 }
+  m4: (int){ 1 }
+  me1: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'mod' (type float and int):
+    //     ./in.cue:26:6
+  }
+  me2: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.Num to 'mod' (type int and float):
+    //     ./in.cue:27:6
+  }
+}
diff --git a/cue/testdata/basicrewrite/004_booleans.txtar b/cue/testdata/basicrewrite/004_booleans.txtar
index 7764733..2d969ce 100644
--- a/cue/testdata/basicrewrite/004_booleans.txtar
+++ b/cue/testdata/basicrewrite/004_booleans.txtar
@@ -25,3 +25,12 @@
   e: true
   e: !true
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  t: (bool){ true }
+  f: (bool){ false }
+  e: (_|_){
+    // [eval] incompatible values *adt.Bool and *adt.Bool
+  }
+}
diff --git a/cue/testdata/basicrewrite/005_boolean_arithmetic.txtar b/cue/testdata/basicrewrite/005_boolean_arithmetic.txtar
index a56a1a9..818f601 100644
--- a/cue/testdata/basicrewrite/005_boolean_arithmetic.txtar
+++ b/cue/testdata/basicrewrite/005_boolean_arithmetic.txtar
@@ -28,3 +28,15 @@
   e: (true & true)
   f: (true & false)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (bool){ true }
+  b: (bool){ true }
+  c: (bool){ false }
+  d: (bool){ true }
+  e: (bool){ true }
+  f: (_|_){
+    // [eval] incompatible values *adt.Bool and *adt.Bool
+  }
+}
diff --git a/cue/testdata/basicrewrite/006_basic_type.txtar b/cue/testdata/basicrewrite/006_basic_type.txtar
index 3e465d5..01e714b 100644
--- a/cue/testdata/basicrewrite/006_basic_type.txtar
+++ b/cue/testdata/basicrewrite/006_basic_type.txtar
@@ -32,3 +32,15 @@
   f: true
   f: bool
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (int){ 1 }
+  b: (int){ 1 }
+  c: (float){ 1.0 }
+  d: (_|_){
+    // [eval] invalid value *adt.BasicType (mismatched types float and int)
+  }
+  e: (string){ "4" }
+  f: (bool){ true }
+}
diff --git a/cue/testdata/basicrewrite/007_strings_and_bytes.txtar b/cue/testdata/basicrewrite/007_strings_and_bytes.txtar
index 3a43fbc..c24c382 100644
--- a/cue/testdata/basicrewrite/007_strings_and_bytes.txtar
+++ b/cue/testdata/basicrewrite/007_strings_and_bytes.txtar
@@ -39,3 +39,21 @@
   e0: ("a" + '')
   e1: ('b' + "c")
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  s0: (string){ "foobar" }
+  s1: (string){ "abcabcabc" }
+  s2: (string){ "abcabc" }
+  b0: (bytes){ 'foobar' }
+  b1: (bytes){ 'abcabcabc' }
+  b2: (bytes){ 'abcabc' }
+  e0: (_|_){
+    // [eval] invalid operands *adt.String and *adt.Bytes to '+' (type string and bytes):
+    //     ./in.cue:10:5
+  }
+  e1: (_|_){
+    // [eval] invalid operands *adt.Bytes and *adt.String to '+' (type bytes and string):
+    //     ./in.cue:11:5
+  }
+}
diff --git a/cue/testdata/basicrewrite/008_escaping.txtar b/cue/testdata/basicrewrite/008_escaping.txtar
index ab9dc40..d6a2ccc 100644
--- a/cue/testdata/basicrewrite/008_escaping.txtar
+++ b/cue/testdata/basicrewrite/008_escaping.txtar
@@ -42,3 +42,8 @@
   a: "foo\nbar"
   b: 〈0;a〉
 }
+-- out/eval --
+(struct){
+  a: (string){ "foo\nbar" }
+  b: (string){ "foo\nbar" }
+}
diff --git a/cue/testdata/basicrewrite/009_reference.txtar b/cue/testdata/basicrewrite/009_reference.txtar
index b5ad55b..590cb84 100644
--- a/cue/testdata/basicrewrite/009_reference.txtar
+++ b/cue/testdata/basicrewrite/009_reference.txtar
@@ -80,3 +80,20 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (int){ 2 }
+  b: (int){ 2 }
+  d: (struct){
+    d: (int){ 3 }
+    e: (int){ 3 }
+  }
+  e: (struct){
+    e: (struct){
+      v: (int){ 1 }
+    }
+    f: (struct){
+      v: (int){ 1 }
+    }
+  }
+}
diff --git a/cue/testdata/basicrewrite/010_lists.txtar b/cue/testdata/basicrewrite/010_lists.txtar
index 577e592..50130fc 100644
--- a/cue/testdata/basicrewrite/010_lists.txtar
+++ b/cue/testdata/basicrewrite/010_lists.txtar
@@ -74,3 +74,47 @@
     ...(>=4 & <=5),
   ])
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  list: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+  }
+  index: (int){ 2 }
+  unify: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+  }
+  e: (_|_){
+    // [eval] conflicting types
+  }
+  e2: (_|_){
+    // [eval] invalid list index d (type string):
+    //     ./in.cue:5:12
+  }
+  e3: (_|_){
+    // [eval] invalid negative index *adt.Num:
+    //     ./in.cue:6:8
+  }
+  e4: (_|_){
+    // [eval]
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 4 }
+    3: (_|_){
+      // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+    }
+  }
+  e5: (_|_){
+    // [eval]
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 4 }
+    3: (_|_){
+      // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+    }
+  }
+}
diff --git a/cue/testdata/basicrewrite/011_list_arithmetic.txtar b/cue/testdata/basicrewrite/011_list_arithmetic.txtar
index 826caaa..c1b52fd 100644
--- a/cue/testdata/basicrewrite/011_list_arithmetic.txtar
+++ b/cue/testdata/basicrewrite/011_list_arithmetic.txtar
@@ -43,3 +43,43 @@
   mul1_2: (〈0;list1〉 * 2)
   e: (〈0;list〉 * -1)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  list: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+  }
+  mul0: (#list){
+  }
+  mul1: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+  }
+  mul2: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+    3: (int){ 1 }
+    4: (int){ 2 }
+    5: (int){ 3 }
+  }
+  list1: (#list){
+    0: (int){ 1 }
+  }
+  mul1_0: (#list){
+  }
+  mul1_1: (#list){
+    0: (int){ 1 }
+  }
+  mul1_2: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+  }
+  e: (_|_){
+    // [eval] cannot convert negative number to uint64:
+    //     ./in.cue:9:9
+  }
+}
diff --git a/cue/testdata/basicrewrite/012_selecting.txtar b/cue/testdata/basicrewrite/012_selecting.txtar
index 71a0e98..8f3a41e 100644
--- a/cue/testdata/basicrewrite/012_selecting.txtar
+++ b/cue/testdata/basicrewrite/012_selecting.txtar
@@ -58,3 +58,29 @@
     3,
   ].b
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  obj: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  index: (int){ 2 }
+  mulidx: (int){ 3 }
+  e: (_|_){
+    // [eval] invalid struct selector 4 (type int):
+    //     ./in.cue:4:16
+  }
+  f: (_|_){
+    // [incomplete] undefined field b:
+    //     ./in.cue:5:16
+  }
+  g: (_|_){
+    // [incomplete] undefined field b:
+    //     ./in.cue:6:16
+  }
+  h: (_|_){
+    // [eval] invalid list index b (type string):
+    //     ./in.cue:7:13
+  }
+}
diff --git a/cue/testdata/basicrewrite/013_obj_unify.txtar b/cue/testdata/basicrewrite/013_obj_unify.txtar
index a34e7f0..072fcb0 100644
--- a/cue/testdata/basicrewrite/013_obj_unify.txtar
+++ b/cue/testdata/basicrewrite/013_obj_unify.txtar
@@ -73,3 +73,27 @@
     a: 3
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  o1: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  o2: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  o3: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  o4: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  e: (_|_){
+    // [eval] conflicting values struct and int
+    a: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/basicrewrite/014_disjunctions.txtar b/cue/testdata/basicrewrite/014_disjunctions.txtar
index 887eae2..01e01ae 100644
--- a/cue/testdata/basicrewrite/014_disjunctions.txtar
+++ b/cue/testdata/basicrewrite/014_disjunctions.txtar
@@ -79,3 +79,22 @@
     2,
   ][3]|"c")
 }
+-- out/eval --
+(struct){
+  o1: (int){ |((int){ 1 }, (int){ 2 }, (int){ 3 }) }
+  o2: (int){ 1 }
+  o3: (int){ 2 }
+  o4: (int){ |((int){ 2 }, (int){ 1 }, (int){ 3 }) }
+  o5: (int){ |(*(int){ 2 }, (int){ 1 }, (int){ 3 }) }
+  o6: (int){ |((int){ 1 }, (int){ 2 }, (int){ 3 }) }
+  o7: (int){ |((int){ 2 }, (int){ 3 }) }
+  o8: (int){ |((int){ 2 }, (int){ 3 }) }
+  o9: (int){ |((int){ 2 }, (int){ 3 }) }
+  o10: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  m4: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  m5: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  i1: (string){ "c" }
+}
diff --git a/cue/testdata/basicrewrite/015_types.txtar b/cue/testdata/basicrewrite/015_types.txtar
index a1c6fb1..dcf4145 100644
--- a/cue/testdata/basicrewrite/015_types.txtar
+++ b/cue/testdata/basicrewrite/015_types.txtar
@@ -37,3 +37,29 @@
   p: +true
   m: -false
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  i: (int){ int }
+  j: (int){ 3 }
+  s: (string){ string }
+  t: (string){ "s" }
+  e: (_|_){
+    // [eval] invalid value *adt.BasicType (mismatched types string and int)
+  }
+  e2: (_|_){
+    // [eval] invalid value *adt.BasicType (mismatched types string and int)
+  }
+  b: (_|_){
+    // [eval] value can never become concrete:
+    //     ./in.cue:7:5
+  }
+  p: (_|_){
+    // [eval] invalid operation +*adt.UnaryExpr (+ bool):
+    //     ./in.cue:8:5
+  }
+  m: (_|_){
+    // [eval] invalid operation -*adt.UnaryExpr (- bool):
+    //     ./in.cue:9:5
+  }
+}
diff --git a/cue/testdata/basicrewrite/016_comparison.txtar b/cue/testdata/basicrewrite/016_comparison.txtar
index e927f6b..6b98206 100644
--- a/cue/testdata/basicrewrite/016_comparison.txtar
+++ b/cue/testdata/basicrewrite/016_comparison.txtar
@@ -36,3 +36,18 @@
   seq: (("a" + "b") == "ab")
   err: (2 == "s")
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  lss: (bool){ true }
+  leq: (bool){ true }
+  eql: (bool){ true }
+  neq: (bool){ true }
+  gtr: (bool){ true }
+  geq: (bool){ true }
+  seq: (bool){ true }
+  err: (_|_){
+    // [eval] invalid operands *adt.Num and *adt.String to '==' (type int and string):
+    //     ./in.cue:9:6
+  }
+}
diff --git a/cue/testdata/basicrewrite/017_null.txtar b/cue/testdata/basicrewrite/017_null.txtar
index 966c49e..755b9f6 100644
--- a/cue/testdata/basicrewrite/017_null.txtar
+++ b/cue/testdata/basicrewrite/017_null.txtar
@@ -35,3 +35,17 @@
   ne1: ("s" != null)
   call: null()
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  eql: (bool){ true }
+  neq: (bool){ false }
+  unf: (null){ null }
+  eq1: (bool){ false }
+  eq2: (bool){ false }
+  ne1: (bool){ true }
+  call: (_|_){
+    // [eval] cannot call non-function *adt.Null (type nil):
+    //     ./in.cue:9:7
+  }
+}
diff --git a/cue/testdata/basicrewrite/018_self-reference_cycles.txtar b/cue/testdata/basicrewrite/018_self-reference_cycles.txtar
index 30912f8..1ac0e3d 100644
--- a/cue/testdata/basicrewrite/018_self-reference_cycles.txtar
+++ b/cue/testdata/basicrewrite/018_self-reference_cycles.txtar
@@ -23,3 +23,16 @@
     〈0;c〉[0],
   ]
 }
+-- out/eval --
+(struct){
+  a: (_|_){
+    // [cycle] cycle error
+  }
+  b: (_|_){
+    // [cycle] cycle error
+  }
+  c: (#list){
+    0: (_){ _ }
+    1: (_){ _ }
+  }
+}
diff --git a/cue/testdata/basicrewrite/019_resolved_self-reference_cycles.txtar b/cue/testdata/basicrewrite/019_resolved_self-reference_cycles.txtar
index 951c97d..a36703d 100644
--- a/cue/testdata/basicrewrite/019_resolved_self-reference_cycles.txtar
+++ b/cue/testdata/basicrewrite/019_resolved_self-reference_cycles.txtar
@@ -86,3 +86,27 @@
     c: 3
   })
 }
+-- out/eval --
+(struct){
+  a: (int){ 100 }
+  b: (int){ 200 }
+  c: (#list){
+    0: (int){ 100 }
+    1: (int){ 100 }
+  }
+  s1: (struct){
+    c: (int){ 3 }
+    b: (int){ 2 }
+    a: (int){ 1 }
+  }
+  s2: (struct){
+    a: (int){ 1 }
+    c: (int){ 3 }
+    b: (int){ 2 }
+  }
+  s3: (struct){
+    b: (int){ 2 }
+    a: (int){ 1 }
+    c: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/basicrewrite/020_resolved_self-reference_cycles__Issue_19.txtar b/cue/testdata/basicrewrite/020_resolved_self-reference_cycles__Issue_19.txtar
index b25bad3..c7e18b3 100644
--- a/cue/testdata/basicrewrite/020_resolved_self-reference_cycles__Issue_19.txtar
+++ b/cue/testdata/basicrewrite/020_resolved_self-reference_cycles__Issue_19.txtar
@@ -49,3 +49,11 @@
   z3: (〈0;z1〉 - 3)
   z3: 8
 }
+-- out/eval --
+(struct){
+  x: (int){ 200 }
+  y: (int){ 100 }
+  z1: (int){ 11 }
+  z2: (int){ 10 }
+  z3: (int){ 8 }
+}
diff --git a/cue/testdata/choosedefault/000_pick_first.txtar b/cue/testdata/choosedefault/000_pick_first.txtar
index 47105f2..9bd2a7a 100644
--- a/cue/testdata/choosedefault/000_pick_first.txtar
+++ b/cue/testdata/choosedefault/000_pick_first.txtar
@@ -45,3 +45,14 @@
     })
   }
 }
+-- out/eval --
+(struct){
+  a: ((bool|int|string)){ |(*(int){ 5 }, (string){ "a" }, (bool){ true }) }
+  b: (struct){
+    c: (struct){ |(*(struct){
+        a: (int){ 2 }
+      }, (struct){
+        a: (int){ 3 }
+      }) }
+  }
+}
diff --git a/cue/testdata/choosedefault/001_simple_disambiguation_conflict.txtar b/cue/testdata/choosedefault/001_simple_disambiguation_conflict.txtar
index 66e83d3..2fe2c5c 100644
--- a/cue/testdata/choosedefault/001_simple_disambiguation_conflict.txtar
+++ b/cue/testdata/choosedefault/001_simple_disambiguation_conflict.txtar
@@ -19,3 +19,9 @@
   b: (*"b"|"a")
   c: (〈0;a〉 & 〈0;b〉)
 }
+-- out/eval --
+(struct){
+  a: (string){ |(*(string){ "a" }, (string){ "b" }) }
+  b: (string){ |(*(string){ "b" }, (string){ "a" }) }
+  c: (string){ |((string){ "a" }, (string){ "b" }) }
+}
diff --git a/cue/testdata/choosedefault/002_associativity_of_defaults.txtar b/cue/testdata/choosedefault/002_associativity_of_defaults.txtar
index 96647f7..0b8611b 100644
--- a/cue/testdata/choosedefault/002_associativity_of_defaults.txtar
+++ b/cue/testdata/choosedefault/002_associativity_of_defaults.txtar
@@ -25,3 +25,11 @@
   x: (〈0;a〉 & 〈0;b〉)
   y: (〈0;b〉 & 〈0;c〉)
 }
+-- out/eval --
+(struct){
+  a: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+  b: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+  c: (string){ |(*(string){ "a" }, *(string){ "b" }, (string){ "c" }) }
+  x: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+  y: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+}
diff --git a/cue/testdata/compile/erralias.txtar b/cue/testdata/compile/erralias.txtar
index deccc61..985a0c1 100644
--- a/cue/testdata/compile/erralias.txtar
+++ b/cue/testdata/compile/erralias.txtar
@@ -22,11 +22,11 @@
 --- in.cue
 {
   ["foo"]: 3
-  a: _|_
+  a: _|_(illegal reference Y)
   "\(〈0;b〉)": 3
   b: "foo"
   c: {}
   for _, x in 〈0;c〉 {
-    a: _|_
+    a: _|_(reference "E" not found)
   }
 }
diff --git a/cue/testdata/compile/scope.txtar b/cue/testdata/compile/scope.txtar
index 0f75b8d..094d82f 100644
--- a/cue/testdata/compile/scope.txtar
+++ b/cue/testdata/compile/scope.txtar
@@ -65,3 +65,25 @@
   }
   f: 〈0;let B〉
 }
+-- out/eval --
+(struct){
+  e: (struct){
+  }
+  a: (struct){
+    d: (struct){
+    }
+    e: (struct){
+    }
+    c: (struct){
+    }
+  }
+  b: (struct){
+  }
+  s: (string){ "foo" }
+  c: (string){ "foo" }
+  d: (struct){
+  }
+  f: (struct){
+    open: (int){ int }
+  }
+}
diff --git a/cue/testdata/fulleval/015_list_comprehension.txtar b/cue/testdata/comprehensions/015_list_comprehension.txtar
similarity index 74%
rename from cue/testdata/fulleval/015_list_comprehension.txtar
rename to cue/testdata/comprehensions/015_list_comprehension.txtar
index 5fcc1d8..14501a7 100644
--- a/cue/testdata/fulleval/015_list_comprehension.txtar
+++ b/cue/testdata/comprehensions/015_list_comprehension.txtar
@@ -80,3 +80,28 @@
     },
   ]
 }
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (string){ "b" }
+    1: (string){ "c" }
+  }
+  b: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+    c: (int){ 3 }
+    d: (int){ 4 }
+  }
+  c: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+    4: (int){ 2 }
+    5: (int){ 3 }
+  }
+  d: (#list){
+    0: (int){ 0 }
+    1: (int){ 1 }
+  }
+}
diff --git a/cue/testdata/fulleval/045_comprehension_and_skipped_field.txtar b/cue/testdata/comprehensions/045_comprehension_and_skipped_field.txtar
similarity index 85%
rename from cue/testdata/fulleval/045_comprehension_and_skipped_field.txtar
rename to cue/testdata/comprehensions/045_comprehension_and_skipped_field.txtar
index c3eabd7..77525ec 100644
--- a/cue/testdata/fulleval/045_comprehension_and_skipped_field.txtar
+++ b/cue/testdata/comprehensions/045_comprehension_and_skipped_field.txtar
@@ -49,3 +49,12 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  x: (struct){
+    v: (struct){
+      "1": (int){ 2 }
+    }
+    _p: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/comprehensions/for.txtar b/cue/testdata/comprehensions/for.txtar
new file mode 100644
index 0000000..3f511b6
--- /dev/null
+++ b/cue/testdata/comprehensions/for.txtar
@@ -0,0 +1,55 @@
+-- in.cue --
+b: { for k, v in a { "\(k)": v+1 } }
+a: { b: 1, c: 2 }
+
+x: { for k, v in y { "\(k)": v } }
+y: {} // check that empty struct after reference works.
+
+k: { for v in e { v } }
+e: int
+-- out/eval --
+(_|_){
+  // [eval]
+  b: (struct){
+    b: (int){ 2 }
+    c: (int){ 3 }
+  }
+  a: (struct){
+    b: (int){ 1 }
+    c: (int){ 2 }
+  }
+  x: (struct){
+  }
+  y: (struct){
+  }
+  k: (_|_){
+    // [eval] invalid operand e (found int, want list or struct):
+    //     ./in.cue:7:15
+  }
+  e: (int){ int }
+}
+-- out/compile --
+--- in.cue
+{
+  b: {
+    for k, v in 〈1;a〉 {
+      "\(〈1;k〉)": (〈1;v〉 + 1)
+    }
+  }
+  a: {
+    b: 1
+    c: 2
+  }
+  x: {
+    for k, v in 〈1;y〉 {
+      "\(〈1;k〉)": 〈1;v〉
+    }
+  }
+  y: {}
+  k: {
+    for _, v in 〈1;e〉 {
+      〈1;v〉
+    }
+  }
+  e: int
+}
diff --git a/cue/testdata/comprehensions/issue287.txtar b/cue/testdata/comprehensions/issue287.txtar
new file mode 100644
index 0000000..243e4b7
--- /dev/null
+++ b/cue/testdata/comprehensions/issue287.txtar
@@ -0,0 +1,35 @@
+-- in.cue --
+if #E["x"] != _|_ {
+	#E: y: true
+}
+if #E["y"] != _|_ {
+	z: true
+}
+#E: [_]: bool
+#E: x: true
+-- out/eval --
+(struct){
+  #E: (#struct){
+    x: (bool){ true }
+    y: (bool){ true }
+  }
+  z: (bool){ true }
+}
+-- out/compile --
+--- in.cue
+{
+  if (〈0;#E〉["x"] != _|_(from source)) {
+    #E: {
+      y: true
+    }
+  }
+  if (〈0;#E〉["y"] != _|_(from source)) {
+    z: true
+  }
+  #E: {
+    [_]: bool
+  }
+  #E: {
+    x: true
+  }
+}
diff --git a/cue/testdata/comprehensions/issue293.txtar b/cue/testdata/comprehensions/issue293.txtar
new file mode 100644
index 0000000..d2b69ed
--- /dev/null
+++ b/cue/testdata/comprehensions/issue293.txtar
@@ -0,0 +1,30 @@
+-- in.cue --
+t: #C
+#C: {
+	if true {
+		p: _
+	}
+}
+t: p: "foo"
+-- out/eval --
+(struct){
+  t: (#struct){
+    p: (string){ "foo" }
+  }
+  #C: (#struct){
+    p: (_){ _ }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  t: 〈0;#C〉
+  #C: {
+    if true {
+      p: _
+    }
+  }
+  t: {
+    p: "foo"
+  }
+}
diff --git a/cue/testdata/comprehensions/issue436.txtar b/cue/testdata/comprehensions/issue436.txtar
new file mode 100644
index 0000000..c206191
--- /dev/null
+++ b/cue/testdata/comprehensions/issue436.txtar
@@ -0,0 +1,61 @@
+-- in.cue --
+#a: {
+    val: string
+    result: string
+}
+
+a: #a & {
+    val: *"default" | string
+    result: *"not-matched" | string
+    if (val == "match") {
+        result: "matched"
+    }
+}
+
+match: a & {
+    val: "match"
+}
+
+not: a & {
+    val: "other"
+}
+-- out/eval --
+(struct){
+  #a: (#struct){
+    val: (string){ string }
+    result: (string){ string }
+  }
+  a: (#struct){
+    val: (string){ "default" }
+    result: (string){ |(*(string){ "not-matched" }, (string){ string }) }
+  }
+  match: (#struct){
+    val: (string){ "match" }
+    result: (string){ "matched" }
+  }
+  not: (#struct){
+    val: (string){ "other" }
+    result: (string){ |(*(string){ "not-matched" }, (string){ string }) }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #a: {
+    val: string
+    result: string
+  }
+  a: (〈0;#a〉 & {
+    val: (*"default"|string)
+    result: (*"not-matched"|string)
+    if (〈0;val〉 == "match") {
+      result: "matched"
+    }
+  })
+  match: (〈0;a〉 & {
+    val: "match"
+  })
+  not: (〈0;a〉 & {
+    val: "other"
+  })
+}
diff --git a/cue/testdata/resolve/015_reference_across_tuples_and_back.txtar b/cue/testdata/cycle/015_reference_across_tuples_and_back.txtar
similarity index 80%
rename from cue/testdata/resolve/015_reference_across_tuples_and_back.txtar
rename to cue/testdata/cycle/015_reference_across_tuples_and_back.txtar
index 7f8a0f1..4550cd4 100644
--- a/cue/testdata/resolve/015_reference_across_tuples_and_back.txtar
+++ b/cue/testdata/cycle/015_reference_across_tuples_and_back.txtar
@@ -46,3 +46,14 @@
     f: 〈1;a〉.c
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    c: (int){ 3 }
+    d: (int){ 3 }
+  }
+  b: (struct){
+    e: (int){ 3 }
+    f: (_){ _ }
+  }
+}
diff --git a/cue/testdata/basicrewrite/021_delayed_constraint_failure.txtar b/cue/testdata/cycle/021_delayed_constraint_failure.txtar
similarity index 74%
rename from cue/testdata/basicrewrite/021_delayed_constraint_failure.txtar
rename to cue/testdata/cycle/021_delayed_constraint_failure.txtar
index e35cec9..47a55f4 100644
--- a/cue/testdata/basicrewrite/021_delayed_constraint_failure.txtar
+++ b/cue/testdata/cycle/021_delayed_constraint_failure.txtar
@@ -24,3 +24,14 @@
   x: 100
   x: (〈0;x〉 + 1)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (int){ 100 }
+  b: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num
+  }
+  x: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num
+  }
+}
diff --git a/cue/testdata/fulleval/023_reentrance.txtar b/cue/testdata/cycle/023_reentrance.txtar
similarity index 83%
rename from cue/testdata/fulleval/023_reentrance.txtar
rename to cue/testdata/cycle/023_reentrance.txtar
index e074f6c..1b6596e 100644
--- a/cue/testdata/fulleval/023_reentrance.txtar
+++ b/cue/testdata/cycle/023_reentrance.txtar
@@ -78,3 +78,21 @@
     n: 12
   }).out
 }
+-- out/eval --
+(struct){
+  fibRec: (struct){
+    nn: (int){ int }
+    out: (_|_){
+      // [incomplete] incomplete
+      // undefined field out:
+      //     ./in.cue:3:40
+    }
+  }
+  fib: (_|_){
+    // [incomplete] incomplete
+    n: (int){ int }
+  }
+  fib2: (int){ 1 }
+  fib7: (int){ 13 }
+  fib12: (int){ 144 }
+}
diff --git a/cue/testdata/fulleval/025_cannot_resolve_references_that_would_be_ambiguous.txtar b/cue/testdata/cycle/025_cannot_resolve_references_that_would_be_ambiguous.txtar
similarity index 72%
rename from cue/testdata/fulleval/025_cannot_resolve_references_that_would_be_ambiguous.txtar
rename to cue/testdata/cycle/025_cannot_resolve_references_that_would_be_ambiguous.txtar
index 981e750..94aba3e 100644
--- a/cue/testdata/fulleval/025_cannot_resolve_references_that_would_be_ambiguous.txtar
+++ b/cue/testdata/cycle/025_cannot_resolve_references_that_would_be_ambiguous.txtar
@@ -53,3 +53,25 @@
     b: 2
   }) & 〈0;c1〉)
 }
+-- out/eval --
+(struct){
+  a1: (_|_){
+    // [incomplete]
+  }
+  a2: (_|_){
+    // [incomplete]
+  }
+  a3: (int){ 1 }
+  b1: (_|_){
+    // [incomplete] ambiguous disjunction
+  }
+  b2: (_|_){
+    // [incomplete] ambiguous disjunction
+  }
+  c1: (_|_){
+    // [incomplete] ambiguous disjunction
+  }
+  c2: (_|_){
+    // [incomplete] ambiguous disjunction
+  }
+}
diff --git a/cue/testdata/resolve/049_self-reference_cycles_conflicts_with_strings.txtar b/cue/testdata/cycle/049_self-reference_cycles_conflicts_with_strings.txtar
similarity index 67%
rename from cue/testdata/resolve/049_self-reference_cycles_conflicts_with_strings.txtar
rename to cue/testdata/cycle/049_self-reference_cycles_conflicts_with_strings.txtar
index 5505a1b..a6402eb 100644
--- a/cue/testdata/resolve/049_self-reference_cycles_conflicts_with_strings.txtar
+++ b/cue/testdata/cycle/049_self-reference_cycles_conflicts_with_strings.txtar
@@ -26,3 +26,16 @@
     x: "hey"
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (_|_){
+    // [eval]
+    x: (_|_){
+      // [eval] incompatible values *adt.String and *adt.String
+    }
+    y: (_|_){
+      // [eval] incompatible values *adt.String and *adt.String
+    }
+  }
+}
diff --git a/cue/testdata/resolve/050_resolved_self-reference_cycles_with_disjunctions.txtar b/cue/testdata/cycle/050_resolved_self-reference_cycles_with_disjunctions.txtar
similarity index 63%
rename from cue/testdata/resolve/050_resolved_self-reference_cycles_with_disjunctions.txtar
rename to cue/testdata/cycle/050_resolved_self-reference_cycles_with_disjunctions.txtar
index fcb31da..ff1b338 100644
--- a/cue/testdata/resolve/050_resolved_self-reference_cycles_with_disjunctions.txtar
+++ b/cue/testdata/cycle/050_resolved_self-reference_cycles_with_disjunctions.txtar
@@ -43,3 +43,27 @@
     z: 3
   })
 }
+-- out/eval --
+(struct){
+  a: (struct){ |((struct){
+      x: (int){ 1 }
+      z: (int){ 2 }
+      y: (int){ 3 }
+    }, (struct){
+      y: (int){ 1 }
+    }) }
+  b: (struct){ |((struct){
+      x: (int){ 2 }
+    }, (struct){
+      z: (int){ 2 }
+      y: (int){ 3 }
+      x: (int){ 1 }
+    }) }
+  c: (struct){ |((struct){
+      y: (int){ 3 }
+      x: (int){ 1 }
+      z: (int){ 2 }
+    }, (struct){
+      z: (int){ 3 }
+    }) }
+}
diff --git a/cue/testdata/resolve/051_resolved_self-reference_cycles_with_disjunction.txtar b/cue/testdata/cycle/051_resolved_self-reference_cycles_with_disjunction.txtar
similarity index 79%
rename from cue/testdata/resolve/051_resolved_self-reference_cycles_with_disjunction.txtar
rename to cue/testdata/cycle/051_resolved_self-reference_cycles_with_disjunction.txtar
index 411f100..6021da7 100644
--- a/cue/testdata/resolve/051_resolved_self-reference_cycles_with_disjunction.txtar
+++ b/cue/testdata/cycle/051_resolved_self-reference_cycles_with_disjunction.txtar
@@ -35,7 +35,7 @@
 xc4: xc2 + 1
 xc5: xc2 + 2
 
-// The above is resolved by setting xd1 explicitly.
+// The below is resolved by setting xd1 explicitly.
 xd1: xd2 & 8 | xd4 & 9 | xd5 & 9
 xd2: xd3 + 2
 xd3: 6 & xd1-2
@@ -43,7 +43,7 @@
 xd5: xd2 + 2
 xd1: 8
 
-// The above is resolved by setting xd1 explicitly to the wrong
+// The below is resolved by setting xd1 explicitly to the wrong
 // value, resulting in an error.
 xe1: xe2 & 8 | xe4 & 9 | xe5 & 9
 xe2: xe3 + 2
@@ -58,7 +58,7 @@
 xf3: 6 & xf1-2 | xf4 & 9
 xf4: xf2 + 2
 
-z1: z2+1 | z3+5
+z1: z2+1 | z3+5 // +3 for fun
 z2: z3 + 2
 z3: z1 - 3
 z3: 8
@@ -157,3 +157,51 @@
   z3: (〈0;z1〉 - 3)
   z3: 8
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  xa1: (int){ 8 }
+  xa2: (int){ 8 }
+  xa3: (int){ 6 }
+  xa4: (int){ 10 }
+  xb1: (int){ 8 }
+  xb2: (int){ 8 }
+  xb3: (int){ 6 }
+  xb4: (int){ 10 }
+  xc1: (int){ |((int){ 8 }, (int){ 9 }) }
+  xc2: (int){ 8 }
+  xc3: (int){ 6 }
+  xc4: (int){ 9 }
+  xc5: (int){ 10 }
+  xd1: (int){ 8 }
+  xd2: (int){ 8 }
+  xd3: (int){ 6 }
+  xd4: (int){ 9 }
+  xd5: (int){ 10 }
+  xe1: (_|_){
+    // [incomplete] empty disjunction
+  }
+  xe2: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:44:6
+  }
+  xe3: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:44:6
+  }
+  xe4: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:44:6
+  }
+  xe5: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:44:6
+  }
+  xf1: (int){ 8 }
+  xf2: (int){ 8 }
+  xf3: (int){ 6 }
+  xf4: (int){ 10 }
+  z1: (int){ |((int){ 11 }, (int){ 13 }) }
+  z2: (int){ 10 }
+  z3: (int){ 8 }
+}
diff --git a/cue/testdata/resolve/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar b/cue/testdata/cycle/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar
similarity index 80%
rename from cue/testdata/resolve/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar
rename to cue/testdata/cycle/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar
index 92537aa..c69587d 100644
--- a/cue/testdata/resolve/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar
+++ b/cue/testdata/cycle/052_resolved_self-reference_cycles_with_disjunction_with_defaults.txtar
@@ -127,3 +127,47 @@
   z3: (〈0;z1〉 - 3)
   z3: 8
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  xa1: (int){ 8 }
+  xa2: (int){ 8 }
+  xa3: (int){ 6 }
+  xa4: (int){ 10 }
+  xb1: (int){ 8 }
+  xb2: (int){ 8 }
+  xb3: (int){ 6 }
+  xb4: (int){ 10 }
+  xc1: (int){ |(*(int){ 8 }, (int){ 9 }) }
+  xc2: (int){ 8 }
+  xc3: (int){ 6 }
+  xc4: (int){ 9 }
+  xc5: (int){ 10 }
+  xd1: (int){ |(*(int){ 8 }, (int){ 9 }) }
+  xd2: (int){ 8 }
+  xd3: (int){ 6 }
+  xd4: (int){ 9 }
+  xd5: (int){ 10 }
+  xe1: (_|_){
+    // [incomplete] empty disjunction
+  }
+  xe2: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:37:6
+  }
+  xe3: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:37:6
+  }
+  xe4: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:37:6
+  }
+  xe5: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:37:6
+  }
+  z1: (int){ |(*(int){ 11 }, (int){ 13 }) }
+  z2: (int){ 10 }
+  z3: (int){ 8 }
+}
diff --git a/cue/testdata/cycle/cycle_with_bounds.txtar b/cue/testdata/cycle/cycle_with_bounds.txtar
new file mode 100644
index 0000000..38d6eff
--- /dev/null
+++ b/cue/testdata/cycle/cycle_with_bounds.txtar
@@ -0,0 +1,27 @@
+-- in.cue --
+#Value: int
+
+foo: #Value
+foo: != bar
+bar: #Value
+bar: != foo
+
+bar: 0
+foo: 1
+-- out/compile --
+--- in.cue
+{
+  #Value: int
+  foo: 〈0;#Value〉
+  foo: !=〈0;bar〉
+  bar: 〈0;#Value〉
+  bar: !=〈0;foo〉
+  bar: 0
+  foo: 1
+}
+-- out/eval --
+(struct){
+  #Value: (int){ int }
+  foo: (int){ 1 }
+  bar: (int){ 0 }
+}
diff --git a/cue/testdata/cycle/issue241.txtar b/cue/testdata/cycle/issue241.txtar
new file mode 100644
index 0000000..833dab2
--- /dev/null
+++ b/cue/testdata/cycle/issue241.txtar
@@ -0,0 +1,79 @@
+-- in.cue --
+#Value: 0 | 1
+
+foo: #Value
+foo: != bar
+bar: #Value
+bar: != foo
+
+#Value: 0 | 1
+
+Foo: #Value
+Foo: != Bar
+Bar: #Value
+Bar: != Foo
+Bar: 0
+
+cell: a:  0 | 1
+cell: a:  != cell.b
+cell: b:  0 | 1
+cell: b:  != cell.a
+cell: a:  0
+cell: b:  _
+
+a: cell.a
+b: cell.b
+
+-- out/eval --
+(struct){
+  #Value: (int){ |((int){ 0 }, (int){ 1 }) }
+  foo: (_|_){
+    // [incomplete] incomplete cause disjunction
+  }
+  bar: (_|_){
+    // [incomplete] incomplete cause disjunction
+  }
+  Foo: (int){ 1 }
+  Bar: (int){ 0 }
+  cell: (struct){
+    a: (int){ 0 }
+    b: (int){ 1 }
+  }
+  a: (int){ 0 }
+  b: (int){ 1 }
+}
+-- out/compile --
+--- in.cue
+{
+  #Value: (0|1)
+  foo: 〈0;#Value〉
+  foo: !=〈0;bar〉
+  bar: 〈0;#Value〉
+  bar: !=〈0;foo〉
+  #Value: (0|1)
+  Foo: 〈0;#Value〉
+  Foo: !=〈0;Bar〉
+  Bar: 〈0;#Value〉
+  Bar: !=〈0;Foo〉
+  Bar: 0
+  cell: {
+    a: (0|1)
+  }
+  cell: {
+    a: !=〈1;cell〉.b
+  }
+  cell: {
+    b: (0|1)
+  }
+  cell: {
+    b: !=〈1;cell〉.a
+  }
+  cell: {
+    a: 0
+  }
+  cell: {
+    b: _
+  }
+  a: 〈0;cell〉.a
+  b: 〈0;cell〉.b
+}
diff --git a/cue/testdata/cycle/issue242.txtar b/cue/testdata/cycle/issue242.txtar
new file mode 100644
index 0000000..b195f0d
--- /dev/null
+++ b/cue/testdata/cycle/issue242.txtar
@@ -0,0 +1,321 @@
+-- in.cue --
+size:: 2
+#CellValue: 0 | 1 | 2 | 3
+cell: "0": "0": #CellValue
+cell: "0": "0": != cell["0"]["1"]
+cell: "0": "0": != cell["1"]["0"]
+cell: "0": "0": != cell["1"]["1"]
+
+cell: "0": "1": #CellValue
+cell: "0": "1": != cell["0"]["0"]
+cell: "0": "1": != cell["1"]["0"]
+cell: "0": "1": != cell["1"]["1"]
+
+cell: "1": "0": #CellValue
+cell: "1": "0": != cell["0"]["0"]
+cell: "1": "0": != cell["0"]["1"]
+cell: "1": "0": != cell["1"]["1"]
+
+cell: "1": "1": #CellValue
+cell: "1": "1": != cell["0"]["0"]
+cell: "1": "1": != cell["0"]["1"]
+cell: "1": "1": != cell["1"]["0"]
+
+cell: "0": "0": 0
+cell: "0": "1": 1
+cell: "1": "0": 2
+cell: "1": "1": 3
+
+cell2: a:  0 | 1
+cell2: a:  != cell2.b
+
+cell2: b:  0 | 1
+cell2: b:  != cell2.a
+
+cell2: a:  _
+cell2: b:  1
+
+cell3: a:  0 | 1
+cell3: a:  != cell3.b
+
+cell3: b:  0 | 1
+cell3: b:  != cell3.a
+
+cell3: a:  0
+cell3: b:  _
+
+cell4: a:  0 | 1
+cell4: a:  != cell4.b
+
+cell4: b:  0 | 1
+cell4: b:  != cell4.a
+
+cell4: b:  _
+cell4: a:  0
+
+cell5: b:  0 | 1
+cell5: b:  != cell5.a
+
+cell5: a:  0 | 1
+cell5: a:  != cell5.b
+
+cell5: b:  _
+cell5: a:  0
+
+a: #CellValue
+a: != b
+a: != c
+a: != d
+
+b: #CellValue
+b: != a
+b: != c
+b: != d
+
+c: #CellValue
+c: != a
+c: != b
+c: != d
+
+d: #CellValue
+d: != a
+d: != b
+d: != c
+
+a: 0
+b: 1
+c: 2
+d: 3
+-- out/eval --
+(struct){
+  size: (int){ 2 }
+  #CellValue: (int){ |((int){ 0 }, (int){ 1 }, (int){ 2 }, (int){ 3 }) }
+  cell: (struct){
+    "0": (struct){
+      "0": (int){ 0 }
+      "1": (int){ 1 }
+    }
+    "1": (struct){
+      "0": (int){ 2 }
+      "1": (int){ 3 }
+    }
+  }
+  cell2: (struct){
+    a: (int){ 0 }
+    b: (int){ 1 }
+  }
+  cell3: (struct){
+    a: (int){ 0 }
+    b: (int){ 1 }
+  }
+  cell4: (struct){
+    a: (int){ 0 }
+    b: (int){ 1 }
+  }
+  cell5: (struct){
+    b: (int){ 1 }
+    a: (int){ 0 }
+  }
+  a: (int){ 0 }
+  b: (int){ 1 }
+  c: (int){ 2 }
+  d: (int){ 3 }
+}
+-- out/compile --
+--- in.cue
+{
+  size:: 2
+  #CellValue: (0|1|2|3)
+  cell: {
+    "0": {
+      "0": 〈2;#CellValue〉
+    }
+  }
+  cell: {
+    "0": {
+      "0": !=〈2;cell〉["0"]["1"]
+    }
+  }
+  cell: {
+    "0": {
+      "0": !=〈2;cell〉["1"]["0"]
+    }
+  }
+  cell: {
+    "0": {
+      "0": !=〈2;cell〉["1"]["1"]
+    }
+  }
+  cell: {
+    "0": {
+      "1": 〈2;#CellValue〉
+    }
+  }
+  cell: {
+    "0": {
+      "1": !=〈2;cell〉["0"]["0"]
+    }
+  }
+  cell: {
+    "0": {
+      "1": !=〈2;cell〉["1"]["0"]
+    }
+  }
+  cell: {
+    "0": {
+      "1": !=〈2;cell〉["1"]["1"]
+    }
+  }
+  cell: {
+    "1": {
+      "0": 〈2;#CellValue〉
+    }
+  }
+  cell: {
+    "1": {
+      "0": !=〈2;cell〉["0"]["0"]
+    }
+  }
+  cell: {
+    "1": {
+      "0": !=〈2;cell〉["0"]["1"]
+    }
+  }
+  cell: {
+    "1": {
+      "0": !=〈2;cell〉["1"]["1"]
+    }
+  }
+  cell: {
+    "1": {
+      "1": 〈2;#CellValue〉
+    }
+  }
+  cell: {
+    "1": {
+      "1": !=〈2;cell〉["0"]["0"]
+    }
+  }
+  cell: {
+    "1": {
+      "1": !=〈2;cell〉["0"]["1"]
+    }
+  }
+  cell: {
+    "1": {
+      "1": !=〈2;cell〉["1"]["0"]
+    }
+  }
+  cell: {
+    "0": {
+      "0": 0
+    }
+  }
+  cell: {
+    "0": {
+      "1": 1
+    }
+  }
+  cell: {
+    "1": {
+      "0": 2
+    }
+  }
+  cell: {
+    "1": {
+      "1": 3
+    }
+  }
+  cell2: {
+    a: (0|1)
+  }
+  cell2: {
+    a: !=〈1;cell2〉.b
+  }
+  cell2: {
+    b: (0|1)
+  }
+  cell2: {
+    b: !=〈1;cell2〉.a
+  }
+  cell2: {
+    a: _
+  }
+  cell2: {
+    b: 1
+  }
+  cell3: {
+    a: (0|1)
+  }
+  cell3: {
+    a: !=〈1;cell3〉.b
+  }
+  cell3: {
+    b: (0|1)
+  }
+  cell3: {
+    b: !=〈1;cell3〉.a
+  }
+  cell3: {
+    a: 0
+  }
+  cell3: {
+    b: _
+  }
+  cell4: {
+    a: (0|1)
+  }
+  cell4: {
+    a: !=〈1;cell4〉.b
+  }
+  cell4: {
+    b: (0|1)
+  }
+  cell4: {
+    b: !=〈1;cell4〉.a
+  }
+  cell4: {
+    b: _
+  }
+  cell4: {
+    a: 0
+  }
+  cell5: {
+    b: (0|1)
+  }
+  cell5: {
+    b: !=〈1;cell5〉.a
+  }
+  cell5: {
+    a: (0|1)
+  }
+  cell5: {
+    a: !=〈1;cell5〉.b
+  }
+  cell5: {
+    b: _
+  }
+  cell5: {
+    a: 0
+  }
+  a: 〈0;#CellValue〉
+  a: !=〈0;b〉
+  a: !=〈0;c〉
+  a: !=〈0;d〉
+  b: 〈0;#CellValue〉
+  b: !=〈0;a〉
+  b: !=〈0;c〉
+  b: !=〈0;d〉
+  c: 〈0;#CellValue〉
+  c: !=〈0;a〉
+  c: !=〈0;b〉
+  c: !=〈0;d〉
+  d: 〈0;#CellValue〉
+  d: !=〈0;a〉
+  d: !=〈0;b〉
+  d: !=〈0;c〉
+  a: 0
+  b: 1
+  c: 2
+  d: 3
+}
diff --git a/cue/testdata/cycle/issue429.txtar b/cue/testdata/cycle/issue429.txtar
new file mode 100644
index 0000000..fb9aaff
--- /dev/null
+++ b/cue/testdata/cycle/issue429.txtar
@@ -0,0 +1,72 @@
+-- in.cue --
+range1: {
+	min: *1 | int
+	range: >min
+	range: 8
+}
+range2: {
+	min: *1 | int
+	max: int & >min
+}
+rg: range2 & {
+//	min: 1
+	max: 8
+}
+#Size : {
+  res: uint | * 0
+  num: > res | *(1 + res)
+  max: > num | *num
+}
+
+a: #Size & {
+  num: 5
+}
+-- out/eval --
+(struct){
+  range1: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    range: (int){ 8 }
+  }
+  range2: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    max: (int){ &(>1, int) }
+  }
+  rg: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    max: (int){ 8 }
+  }
+  #Size: (#struct){
+    res: (int){ |(*(int){ 0 }, (int){ &(>=0, int) }) }
+    num: (number){ |(*(int){ 1 }, (number){ >0 }) }
+    max: (number){ |(*(int){ 1 }, (number){ >0 }, (number){ >1 }) }
+  }
+  a: (#struct){
+    res: (int){ |(*(int){ 0 }, (int){ &(>=0, int) }) }
+    num: (int){ 5 }
+    max: (number){ |((int){ 5 }, (number){ >5 }) }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  range1: {
+    min: (*1|int)
+    range: >〈0;min〉
+    range: 8
+  }
+  range2: {
+    min: (*1|int)
+    max: (int & >〈0;min〉)
+  }
+  rg: (〈0;range2〉 & {
+    max: 8
+  })
+  #Size: {
+    res: (&(int, >=0)|*0)
+    num: (>〈0;res〉|*(1 + 〈0;res〉))
+    max: (>〈0;num〉|*〈0;num〉)
+  }
+  a: (〈0;#Size〉 & {
+    num: 5
+  })
+}
diff --git a/cue/testdata/cycle/with_defaults.txtar b/cue/testdata/cycle/with_defaults.txtar
new file mode 100644
index 0000000..74f31e5
--- /dev/null
+++ b/cue/testdata/cycle/with_defaults.txtar
@@ -0,0 +1,45 @@
+-- in.cue --
+range1: {
+	min: *1 | int
+	range: >min
+	range: 8
+}
+range2: {
+	min: *1 | int
+	max: int & >min
+}
+rg: range2 & {
+//	min: 1
+	max: 8
+}
+-- out/eval --
+(struct){
+  range1: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    range: (int){ 8 }
+  }
+  range2: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    max: (int){ &(>1, int) }
+  }
+  rg: (struct){
+    min: (int){ |(*(int){ 1 }, (int){ int }) }
+    max: (int){ 8 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  range1: {
+    min: (*1|int)
+    range: >〈0;min〉
+    range: 8
+  }
+  range2: {
+    min: (*1|int)
+    max: (int & >〈0;min〉)
+  }
+  rg: (〈0;range2〉 & {
+    max: 8
+  })
+}
diff --git a/cue/testdata/resolve/026_combined_definitions.txtar b/cue/testdata/definitions/026_combined_definitions.txtar
similarity index 64%
rename from cue/testdata/resolve/026_combined_definitions.txtar
rename to cue/testdata/definitions/026_combined_definitions.txtar
index 20c0b4b..87f2eaf 100644
--- a/cue/testdata/resolve/026_combined_definitions.txtar
+++ b/cue/testdata/definitions/026_combined_definitions.txtar
@@ -116,3 +116,51 @@
     a: int
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #D1: (#struct){
+    env: (#struct){
+      a: (string){ "A" }
+      b: (string){ "B" }
+    }
+    #def: (#struct){
+      a: (string){ "A" }
+      b: (string){ "B" }
+    }
+  }
+  d1: (_|_){
+    // [eval]
+    env: (_|_){
+      // [eval] field `c` not allowed
+      a: (string){ "A" }
+      b: (string){ "B" }
+      c: (string){ "C" }
+    }
+    #def: (#struct){
+      a: (string){ "A" }
+      b: (string){ "B" }
+    }
+  }
+  #D2: (#struct){
+    a: (int){ int }
+    b: (int){ int }
+  }
+  #D3: (#struct){
+    env: (#struct){
+      a: (string){ "A" }
+      b: (string){ "B" }
+    }
+  }
+  #D4: (_|_){
+    // [eval]
+    env: (_|_){
+      // [eval] field `b` not allowed
+      a: (int){ int }
+      b: (int){ int }
+    }
+  }
+  #DC: (#struct){
+    a: (int){ int }
+  }
+}
diff --git a/cue/testdata/resolve/028_recursive_closing_starting_at_non-definition.txtar b/cue/testdata/definitions/028_recursive_closing_starting_at_non-definition.txtar
similarity index 70%
rename from cue/testdata/resolve/028_recursive_closing_starting_at_non-definition.txtar
rename to cue/testdata/definitions/028_recursive_closing_starting_at_non-definition.txtar
index 381b092..11d24e5 100644
--- a/cue/testdata/resolve/028_recursive_closing_starting_at_non-definition.txtar
+++ b/cue/testdata/definitions/028_recursive_closing_starting_at_non-definition.txtar
@@ -71,3 +71,27 @@
     }
   })
 }
+-- out/eval --
+(struct){
+  z: (struct){
+    a: (struct){
+      #B: (#struct){
+        c: (#struct){
+          d: (int){ 1 }
+          f: (int){ 1 }
+        }
+      }
+    }
+  }
+  A: (struct){
+    a: (struct){
+      #B: (#struct){
+        c: (#struct){
+          d: (int){ 1 }
+          f: (int){ 1 }
+          e: (int){ 2 }
+        }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/032_definitions_with_embedding.txtar b/cue/testdata/definitions/032_definitions_with_embedding.txtar
similarity index 68%
rename from cue/testdata/resolve/032_definitions_with_embedding.txtar
rename to cue/testdata/definitions/032_definitions_with_embedding.txtar
index bd354a0..1286a02 100644
--- a/cue/testdata/resolve/032_definitions_with_embedding.txtar
+++ b/cue/testdata/definitions/032_definitions_with_embedding.txtar
@@ -79,3 +79,36 @@
     }
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #E: (#struct){
+    a: (#struct){
+      b: (int){ int }
+    }
+  }
+  #S: (#struct){
+    a: (#struct){
+      b: (int){ int }
+      c: (int){ int }
+    }
+    b: (int){ 3 }
+  }
+  #e1: (_|_){
+    // [eval]
+    a: (_|_){
+      // [eval] field `d` not allowed
+      b: (int){ int }
+      c: (int){ int }
+      d: (int){ 4 }
+    }
+    b: (int){ 3 }
+  }
+  #v1: (#struct){
+    a: (#struct){
+      b: (int){ int }
+      c: (int){ 4 }
+    }
+    b: (int){ 3 }
+  }
+}
diff --git "a/cue/testdata/fulleval/033_Issue_\043153.txtar" "b/cue/testdata/definitions/033_Issue_\043153.txtar"
similarity index 70%
rename from "cue/testdata/fulleval/033_Issue_\043153.txtar"
rename to "cue/testdata/definitions/033_Issue_\043153.txtar"
index 8d208ba..a3e84a32 100644
--- "a/cue/testdata/fulleval/033_Issue_\043153.txtar"
+++ "b/cue/testdata/definitions/033_Issue_\043153.txtar"
@@ -60,3 +60,25 @@
     ]
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  listOfCloseds: (_|_){
+    // [eval]
+    0: (_|_){
+      // [eval] field `b` not allowed
+      a: (int){ |(*(int){ 0 }, (int){ int }) }
+      b: (int){ 2 }
+    }
+  }
+  Foo: (struct){
+    listOfCloseds: (list){
+    }
+  }
+  #Closed: (#struct){
+    a: (int){ |(*(int){ 0 }, (int){ int }) }
+  }
+  Junk: (struct){
+    b: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/resolve/036_closing_with_failed_optional.txtar b/cue/testdata/definitions/036_closing_with_failed_optional.txtar
similarity index 82%
rename from cue/testdata/resolve/036_closing_with_failed_optional.txtar
rename to cue/testdata/definitions/036_closing_with_failed_optional.txtar
index ec4d97a..a5dd8bb 100644
--- a/cue/testdata/resolve/036_closing_with_failed_optional.txtar
+++ b/cue/testdata/definitions/036_closing_with_failed_optional.txtar
@@ -97,3 +97,27 @@
     a: int
   }
 }
+-- out/eval --
+(struct){
+  #k1: (#struct){
+    a: (int){ int }
+  }
+  #k2: (#struct){
+    a: (int){ int }
+  }
+  o1: (struct){
+  }
+  #o2: (#struct){
+  }
+  #d1: (struct){ |((#struct){
+      b: (int){ 4 }
+    }, (#struct){
+      c: (int){ 5 }
+    }) }
+  v1: (#struct){
+    b: (int){ 4 }
+  }
+  #A: (#struct){
+    a: (int){ int }
+  }
+}
diff --git a/cue/testdata/fulleval/036_optionals_in_open_structs.txtar b/cue/testdata/definitions/036_optionals_in_open_structs.txtar
similarity index 87%
rename from cue/testdata/fulleval/036_optionals_in_open_structs.txtar
rename to cue/testdata/definitions/036_optionals_in_open_structs.txtar
index 584ab4c..64719ca 100644
--- a/cue/testdata/fulleval/036_optionals_in_open_structs.txtar
+++ b/cue/testdata/definitions/036_optionals_in_open_structs.txtar
@@ -55,3 +55,15 @@
     aaa: 3
   })
 }
+-- out/eval --
+(struct){
+  A: (struct){
+  }
+  B: (struct){
+  }
+  #C: (#struct){
+  }
+  c: (#struct){
+    aaa: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/resolve/037_closing_with_comprehensions.txtar b/cue/testdata/definitions/037_closing_with_comprehensions.txtar
similarity index 70%
rename from cue/testdata/resolve/037_closing_with_comprehensions.txtar
rename to cue/testdata/definitions/037_closing_with_comprehensions.txtar
index 9c30294..285a79e 100644
--- a/cue/testdata/resolve/037_closing_with_comprehensions.txtar
+++ b/cue/testdata/definitions/037_closing_with_comprehensions.txtar
@@ -102,3 +102,33 @@
     }
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #A: (#struct){
+    f1: (int){ int }
+    f2: (int){ int }
+  }
+  #B: (#struct){
+    f1: (int){ int }
+  }
+  #C: (#struct){
+    f1: (int){ int }
+  }
+  #D: (_|_){
+    // [eval] cannot mix bulk optional fields with dynamic fields, embeddings, or comprehensions within the same struct
+    f1: (int){ int }
+  }
+  #E: (_|_){
+    // [eval] field `f3` not allowed
+    f1: (int){ int }
+    f2: (int){ int }
+    f3: (int){ int }
+  }
+  a: (_|_){
+    // [eval] field `f3` not allowed
+    f1: (int){ int }
+    f2: (int){ int }
+    f3: (int){ int }
+  }
+}
diff --git a/cue/testdata/fulleval/037_conjunction_of_optional_sets.txtar b/cue/testdata/definitions/037_conjunction_of_optional_sets.txtar
similarity index 80%
rename from cue/testdata/fulleval/037_conjunction_of_optional_sets.txtar
rename to cue/testdata/definitions/037_conjunction_of_optional_sets.txtar
index ceaed37..d55c6e5 100644
--- a/cue/testdata/fulleval/037_conjunction_of_optional_sets.txtar
+++ b/cue/testdata/definitions/037_conjunction_of_optional_sets.txtar
@@ -50,3 +50,23 @@
     aaa: 3
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #A: (#struct){
+  }
+  #B: (#struct){
+  }
+  #C: (#struct){
+  }
+  c: (_|_){
+    // [eval] field `aaa` not allowed
+    aaa: (int){ 3 }
+  }
+  #D: (#struct){
+  }
+  d: (_|_){
+    // [eval] field `aaa` not allowed
+    aaa: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/fulleval/038_continue_recursive_closing_for_optionals.txtar b/cue/testdata/definitions/038_continue_recursive_closing_for_optionals.txtar
similarity index 74%
rename from cue/testdata/fulleval/038_continue_recursive_closing_for_optionals.txtar
rename to cue/testdata/definitions/038_continue_recursive_closing_for_optionals.txtar
index b0bf47e..fbe88f7 100644
--- a/cue/testdata/fulleval/038_continue_recursive_closing_for_optionals.txtar
+++ b/cue/testdata/definitions/038_continue_recursive_closing_for_optionals.txtar
@@ -36,3 +36,17 @@
     }
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #S: (#struct){
+  }
+  a: (_|_){
+    // [eval]
+    v: (_|_){
+      // [eval] field `b` not allowed
+      b: (int){ int }
+      a: (int){ int }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/039_augment_closed_optionals.txtar b/cue/testdata/definitions/039_augment_closed_optionals.txtar
similarity index 87%
rename from cue/testdata/fulleval/039_augment_closed_optionals.txtar
rename to cue/testdata/definitions/039_augment_closed_optionals.txtar
index daabec7..2b99ed1 100644
--- a/cue/testdata/fulleval/039_augment_closed_optionals.txtar
+++ b/cue/testdata/definitions/039_augment_closed_optionals.txtar
@@ -82,3 +82,20 @@
     aaa: 4
   })
 }
+-- out/eval --
+(struct){
+  #A: (#struct){
+  }
+  #B: (#struct){
+  }
+  #C: (#struct){
+  }
+  c: (#struct){
+    QQ: (int){ 3 }
+  }
+  #D: (#struct){
+  }
+  d: (#struct){
+    aaa: (int){ 4 }
+  }
+}
diff --git a/cue/testdata/definitions/dynamic.txtar b/cue/testdata/definitions/dynamic.txtar
new file mode 100644
index 0000000..02561ba
--- /dev/null
+++ b/cue/testdata/definitions/dynamic.txtar
@@ -0,0 +1,21 @@
+-- in.cue --
+#A: {
+	a:      "foo"
+	"\(a)": 3
+}
+
+-- out/eval --
+(struct){
+  #A: (#struct){
+    a: (string){ "foo" }
+    foo: (int){ 3 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #A: {
+    a: "foo"
+    "\(〈0;a〉)": 3
+  }
+}
diff --git a/cue/testdata/definitions/issue317.txtar b/cue/testdata/definitions/issue317.txtar
new file mode 100644
index 0000000..12c2c82
--- /dev/null
+++ b/cue/testdata/definitions/issue317.txtar
@@ -0,0 +1,210 @@
+# Allow otherField
+
+-- in.cue --
+#T: {
+	#Schema: [_]: #D
+	concrete: #Schema
+}
+#D: {
+	fieldWithDefault: *0 | int
+	...
+}
+s: #T & {
+	#Schema: {
+		foo: otherField: string
+	}
+	concrete: foo: otherField: "hello"
+}
+
+#Container: {
+	Env: [Name=string]: {
+			name:  string
+			value: string
+	}
+}
+#Deployment: {
+        #Containers: [Name=string]: #Container
+        containers: [ for c in #Containers { c } ] // Problem is here.
+}
+Something: {
+        #Deployment
+        #Containers: {
+                // Does not allow entries other than value. Either add ... or embed
+                "a thing": Env: foobar: value: "foo"
+        }
+}
+x: Something & #Deployment
+
+-- out/eval --
+(struct){
+  #T: (#struct){
+    #Schema: (#struct){
+    }
+    concrete: (#struct){
+    }
+  }
+  #D: (#struct){
+    fieldWithDefault: (int){ |(*(int){ 0 }, (int){ int }) }
+  }
+  s: (#struct){
+    #Schema: (#struct){
+      foo: (#struct){
+        otherField: (string){ string }
+        fieldWithDefault: (int){ |(*(int){ 0 }, (int){ int }) }
+      }
+    }
+    concrete: (#struct){
+      foo: (#struct){
+        otherField: (string){ "hello" }
+        fieldWithDefault: (int){ |(*(int){ 0 }, (int){ int }) }
+      }
+    }
+  }
+  #Container: (#struct){
+    Env: (#struct){
+    }
+  }
+  #Deployment: (#struct){
+    #Containers: (#struct){
+    }
+    containers: (#list){
+    }
+  }
+  Something: (#struct){
+    #Containers: (#struct){
+      "a thing": (#struct){
+        Env: (#struct){
+          foobar: (#struct){
+            value: (string){ "foo" }
+            name: (string){ string }
+          }
+        }
+      }
+    }
+    containers: (#list){
+      0: (#struct){
+        Env: (#struct){
+          foobar: (#struct){
+            value: (string){ "foo" }
+            name: (string){ string }
+          }
+        }
+      }
+    }
+  }
+  x: (#struct){
+    #Containers: (#struct){
+      "a thing": (#struct){
+        Env: (#struct){
+          foobar: (#struct){
+            value: (string){ "foo" }
+            name: (string){ string }
+          }
+        }
+      }
+    }
+    containers: (#list){
+      0: (#struct){
+        Env: (#struct){
+          foobar: (#struct){
+            value: (string){ "foo" }
+            name: (string){ string }
+          }
+        }
+      }
+    }
+  }
+}
+-- out/export --
+#T: {
+	#Schema: [_]: #D
+	concrete: #Schema
+}
+#D: {
+	fieldWithDefault: *0 | int
+	...
+}
+s: #T & {
+	#Schema: {
+		foo: otherField: string
+	}
+	concrete: foo: otherField: "hello"
+}
+
+#Container: {
+	Env: [Name=string]: {
+		name:  string
+		value: string
+	}
+}
+#Deployment: {
+	#Containers: [Name=string]: #Container
+
+	containers: [ for c in #Containers {c}] // Problem is here.
+}
+Something: {
+	#Deployment
+	#Containers: {
+		// Does not allow entries other than value. Either add ... or embed
+		"a thing": Env: foobar: value: "foo"
+	}
+}
+x: Something & #Deployment
+
+-- out/compile --
+--- in.cue
+{
+  #T: {
+    #Schema: {
+      [_]: 〈2;#D〉
+    }
+    concrete: 〈0;#Schema〉
+  }
+  #D: {
+    fieldWithDefault: (*0|int)
+    ...
+  }
+  s: (〈0;#T〉 & {
+    #Schema: {
+      foo: {
+        otherField: string
+      }
+    }
+    concrete: {
+      foo: {
+        otherField: "hello"
+      }
+    }
+  })
+  #Container: {
+    Env: {
+      [string]: {
+        name: string
+        value: string
+      }
+    }
+  }
+  #Deployment: {
+    #Containers: {
+      [string]: 〈2;#Container〉
+    }
+    containers: [
+      for _, c in 〈0;#Containers〉 {
+        〈1;c〉
+      },
+    ]
+  }
+  Something: {
+    〈1;#Deployment〉
+    #Containers: {
+      "a thing": {
+        Env: {
+          foobar: {
+            value: "foo"
+          }
+        }
+      }
+    }
+  }
+  x: (〈0;Something〉 & 〈0;#Deployment〉)
+}
diff --git a/cue/testdata/definitions/issue359.txtar b/cue/testdata/definitions/issue359.txtar
new file mode 100644
index 0000000..691c772
--- /dev/null
+++ b/cue/testdata/definitions/issue359.txtar
@@ -0,0 +1,154 @@
+-- in.cue --
+#simple: {
+  peso: *1|int
+  edad: *2|int
+}
+
+// Second struct is not considered closed, as expected
+good: #simple & {
+  peso: 4
+}
+
+
+#complex: {
+  things: [string]: #simple
+}
+
+// Still, no closedness issue in the second struct
+#many: #complex & {
+  things: hola: peso: 2
+  things: sol: peso: 3
+}
+
+// Inner struct in second struct IS considered closed: why?
+bad: #many  & {
+  things: hola: peso: 2
+}
+
+// non-definition equivalent
+many: #complex & {
+  things: hola: peso: 2
+  things: sol: peso: 3
+}
+
+// Now inner struct on second struct is NOT considered closed
+notbad: many  & {
+  things: hola: peso: 2
+}
+-- out/eval --
+(struct){
+  #simple: (#struct){
+    peso: (int){ |(*(int){ 1 }, (int){ int }) }
+    edad: (int){ |(*(int){ 2 }, (int){ int }) }
+  }
+  good: (#struct){
+    peso: (int){ 4 }
+    edad: (int){ |(*(int){ 2 }, (int){ int }) }
+  }
+  #complex: (#struct){
+    things: (#struct){
+    }
+  }
+  #many: (#struct){
+    things: (#struct){
+      hola: (#struct){
+        peso: (int){ 2 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+      sol: (#struct){
+        peso: (int){ 3 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+    }
+  }
+  bad: (#struct){
+    things: (#struct){
+      hola: (#struct){
+        peso: (int){ 2 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+      sol: (#struct){
+        peso: (int){ 3 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+    }
+  }
+  many: (#struct){
+    things: (#struct){
+      hola: (#struct){
+        peso: (int){ 2 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+      sol: (#struct){
+        peso: (int){ 3 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+    }
+  }
+  notbad: (#struct){
+    things: (#struct){
+      hola: (#struct){
+        peso: (int){ 2 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+      sol: (#struct){
+        peso: (int){ 3 }
+        edad: (int){ |(*(int){ 2 }, (int){ int }) }
+      }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #simple: {
+    peso: (*1|int)
+    edad: (*2|int)
+  }
+  good: (〈0;#simple〉 & {
+    peso: 4
+  })
+  #complex: {
+    things: {
+      [string]: 〈2;#simple〉
+    }
+  }
+  #many: (〈0;#complex〉 & {
+    things: {
+      hola: {
+        peso: 2
+      }
+    }
+    things: {
+      sol: {
+        peso: 3
+      }
+    }
+  })
+  bad: (〈0;#many〉 & {
+    things: {
+      hola: {
+        peso: 2
+      }
+    }
+  })
+  many: (〈0;#complex〉 & {
+    things: {
+      hola: {
+        peso: 2
+      }
+    }
+    things: {
+      sol: {
+        peso: 3
+      }
+    }
+  })
+  notbad: (〈0;many〉 & {
+    things: {
+      hola: {
+        peso: 2
+      }
+    }
+  })
+}
diff --git a/cue/testdata/definitions/issue367.txtar b/cue/testdata/definitions/issue367.txtar
new file mode 100644
index 0000000..8e64842
--- /dev/null
+++ b/cue/testdata/definitions/issue367.txtar
@@ -0,0 +1,55 @@
+TODO: l4 should be allowed
+
+-- in.cue --
+#def1: l1: ["l2"]: {l3: int, l4: 26}
+#special1: #def1     & {l1: l2: l3: <100}
+instance:  #special1 & {l1: l2: l3: 34}
+-- out/eval --
+(struct){
+  #def1: (#struct){
+    l1: (#struct){
+    }
+  }
+  #special1: (#struct){
+    l1: (#struct){
+      l2: (#struct){
+        l3: (int){ &(<100, int) }
+        l4: (int){ 26 }
+      }
+    }
+  }
+  instance: (#struct){
+    l1: (#struct){
+      l2: (#struct){
+        l3: (int){ 34 }
+        l4: (int){ 26 }
+      }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #def1: {
+    l1: {
+      ["l2"]: {
+        l3: int
+        l4: 26
+      }
+    }
+  }
+  #special1: (〈0;#def1〉 & {
+    l1: {
+      l2: {
+        l3: <100
+      }
+    }
+  })
+  instance: (〈0;#special1〉 & {
+    l1: {
+      l2: {
+        l3: 34
+      }
+    }
+  })
+}
diff --git a/cue/testdata/definitions/issue419.txtar b/cue/testdata/definitions/issue419.txtar
new file mode 100644
index 0000000..a620500
--- /dev/null
+++ b/cue/testdata/definitions/issue419.txtar
@@ -0,0 +1,54 @@
+-- in.cue --
+#A: {
+	a: string
+}
+
+#B: {
+	_b: string
+}
+
+#X: #A | #B
+
+l: [...#X]
+
+l: [
+	{_b: "bar"}
+]
+-- out/eval --
+(struct){
+  #A: (#struct){
+    a: (string){ string }
+  }
+  #B: (#struct){
+    _b: (string){ string }
+  }
+  #X: (struct){ |((#struct){
+      a: (string){ string }
+    }, (#struct){
+      _b: (string){ string }
+    }) }
+  l: (#list){
+    0: (#struct){
+      _b: (string){ "bar" }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #A: {
+    a: string
+  }
+  #B: {
+    _b: string
+  }
+  #X: (〈0;#A〉|〈0;#B〉)
+  l: [
+    ...〈0;#X〉,
+  ]
+  l: [
+    {
+      _b: "bar"
+    },
+  ]
+}
diff --git a/cue/testdata/definitions/list.txtar b/cue/testdata/definitions/list.txtar
new file mode 100644
index 0000000..7b99534
--- /dev/null
+++ b/cue/testdata/definitions/list.txtar
@@ -0,0 +1,43 @@
+-- in.cue --
+c: #R & {
+    w: [{}, {b: int}]
+}
+#R: {
+    w: [{}, ...]
+}
+-- out/eval --
+(struct){
+  c: (#struct){
+    w: (#list){
+      0: (#struct){
+      }
+      1: (#struct){
+        b: (int){ int }
+      }
+    }
+  }
+  #R: (#struct){
+    w: (list){
+      0: (#struct){
+      }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  c: (〈0;#R〉 & {
+    w: [
+      {},
+      {
+        b: int
+      },
+    ]
+  })
+  #R: {
+    w: [
+      {},
+      ...,
+    ]
+  }
+}
diff --git a/cue/testdata/disjunctions/019_ips.txtar b/cue/testdata/disjunctions/019_ips.txtar
new file mode 100644
index 0000000..9d24edc
--- /dev/null
+++ b/cue/testdata/disjunctions/019_ips.txtar
@@ -0,0 +1,93 @@
+# DO NOT EDIT; generated by go run testdata/gen.go
+#
+#name: ips
+#evalFull
+-- in.cue --
+IP: 4 * [ uint8]
+
+Private:
+	*[ 192, 168, uint8, uint8] |
+	[ 10, uint8, uint8, uint8] |
+	[ 172, >=16 & <=32, uint8, uint8]
+
+Inst: Private & [ _, 10, ...]
+
+MyIP: Inst & [_, _, 10, 10]
+-- out/def --
+IP: [uint8, uint8, uint8, uint8]
+Private: *[192, 168, uint8, uint8] | [10, uint8, uint8, uint8] | [172, >=16 & <=32, uint8, uint8]
+Inst: [10, 10, uint8, uint8]
+MyIP: [10, 10, 10, 10]
+-- out/legacy-debug --
+<0>{IP: [(int & >=0 & int & <=255),(int & >=0 & int & <=255),(int & >=0 & int & <=255),(int & >=0 & int & <=255)], Private: [192,168,(int & >=0 & int & <=255),(int & >=0 & int & <=255)], Inst: [10,10,(int & >=0 & int & <=255),(int & >=0 & int & <=255)], MyIP: [10,10,10,10]}
+-- out/compile --
+--- in.cue
+{
+  IP: (4 * [
+    &(int, >=0, <=255),
+  ])
+  Private: (*[
+    192,
+    168,
+    &(int, >=0, <=255),
+    &(int, >=0, <=255),
+  ]|[
+    10,
+    &(int, >=0, <=255),
+    &(int, >=0, <=255),
+    &(int, >=0, <=255),
+  ]|[
+    172,
+    (>=16 & <=32),
+    &(int, >=0, <=255),
+    &(int, >=0, <=255),
+  ])
+  Inst: (〈0;Private〉 & [
+    _,
+    10,
+    ...,
+  ])
+  MyIP: (〈0;Inst〉 & [
+    _,
+    _,
+    10,
+    10,
+  ])
+}
+-- out/eval --
+(struct){
+  IP: (#list){
+    0: (int){ &(>=0, <=255, int) }
+    1: (int){ &(>=0, <=255, int) }
+    2: (int){ &(>=0, <=255, int) }
+    3: (int){ &(>=0, <=255, int) }
+  }
+  Private: (list){ |(*(#list){
+      0: (int){ 192 }
+      1: (int){ 168 }
+      2: (int){ &(>=0, <=255, int) }
+      3: (int){ &(>=0, <=255, int) }
+    }, (#list){
+      0: (int){ 10 }
+      1: (int){ &(>=0, <=255, int) }
+      2: (int){ &(>=0, <=255, int) }
+      3: (int){ &(>=0, <=255, int) }
+    }, (#list){
+      0: (int){ 172 }
+      1: (number){ &(>=16, <=32) }
+      2: (int){ &(>=0, <=255, int) }
+      3: (int){ &(>=0, <=255, int) }
+    }) }
+  Inst: (#list){
+    0: (int){ 10 }
+    1: (int){ 10 }
+    2: (int){ &(>=0, <=255, int) }
+    3: (int){ &(>=0, <=255, int) }
+  }
+  MyIP: (#list){
+    0: (int){ 10 }
+    1: (int){ 10 }
+    2: (int){ 10 }
+    3: (int){ 10 }
+  }
+}
diff --git a/cue/testdata/disjunctions/defembed.txtar b/cue/testdata/disjunctions/defembed.txtar
new file mode 100644
index 0000000..5dcaf0d
--- /dev/null
+++ b/cue/testdata/disjunctions/defembed.txtar
@@ -0,0 +1,101 @@
+Interaction between defaults, embedding and disjunctions.
+
+// TODO: at the moment, using a default will select it.
+-- in.cue --
+x: {
+    // All of these resolve to *2 | 3
+    m1: (*1 | (*2 | 3)) & (>=2 & <=3)
+    m2: (*1 | (*2 | 3)) & (2 | 3)
+    m3: (*1 | *(*2 | 3)) & (2 | 3)
+}
+y1: x & {
+    {m4: x.m1+x.m2+x.m3}
+}
+y2: {
+    x
+    {m4: y2.m1+y2.m2+y2.m3}
+}
+Y=y3: {
+    x
+    {m4: Y.m1+Y.m2+Y.m3}
+}
+y4: x & {
+    {m4: y4.m1+y4.m2+y4.m3}
+}
+
+// Second disjunct in embedding is not possible because of previous declaration
+// of `b`, so it should be resolved to {a: 1}.
+b: (*"a" | "b") | "c"
+{a: b} | {b: int}
+
+-- out/eval --
+(struct){
+  x: (struct){
+    m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+  }
+  y1: (struct){
+    m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m4: (int){ 6 }
+  }
+  y2: (struct){
+    m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m4: (int){ 6 }
+  }
+  y3: (struct){
+    m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m4: (int){ 6 }
+  }
+  y4: (struct){
+    m1: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m2: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m3: (int){ |(*(int){ 2 }, (int){ 3 }) }
+    m4: (int){ 6 }
+  }
+  b: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+  a: (string){ |(*(string){ "a" }, (string){ "b" }, (string){ "c" }) }
+}
+-- out/compile --
+--- in.cue
+{
+  x: {
+    m1: ((*1|(*2|3)) & (>=2 & <=3))
+    m2: ((*1|(*2|3)) & (2|3))
+    m3: ((*1|*(*2|3)) & (2|3))
+  }
+  y1: (〈0;x〉 & {
+    {
+      m4: ((〈2;x〉.m1 + 〈2;x〉.m2) + 〈2;x〉.m3)
+    }
+  })
+  y2: {
+    〈1;x〉
+    {
+      m4: ((〈2;y2〉.m1 + 〈2;y2〉.m2) + 〈2;y2〉.m3)
+    }
+  }
+  y3: {
+    〈1;x〉
+    {
+      m4: ((〈2;y3〉.m1 + 〈2;y3〉.m2) + 〈2;y3〉.m3)
+    }
+  }
+  y4: (〈0;x〉 & {
+    {
+      m4: ((〈2;y4〉.m1 + 〈2;y4〉.m2) + 〈2;y4〉.m3)
+    }
+  })
+  b: ((*"a"|"b")|"c")
+  ({
+    a: 〈1;b〉
+  }|{
+    b: int
+  })
+}
diff --git a/cue/testdata/eval/bounds.txtar b/cue/testdata/eval/bounds.txtar
new file mode 100644
index 0000000..8ca60f3
--- /dev/null
+++ b/cue/testdata/eval/bounds.txtar
@@ -0,0 +1,37 @@
+-- in.cue --
+x0: 5
+x1: b5 & 30
+
+b0: <x0 & >0
+b1: b0 & int
+b2: int & <5.5
+b3: <10 & <=5
+b4: >=20 & >20
+b5: >=21 & >20
+b6: int & >5 & <= 6
+
+-- out/eval --
+(struct){
+  x0: (int){ 5 }
+  x1: (int){ 30 }
+  b0: (number){ &(>0, <5) }
+  b1: (int){ &(>0, <5, int) }
+  b2: (int){ &(<5.5, int) }
+  b3: (number){ <=5 }
+  b4: (number){ >20 }
+  b5: (number){ >=21 }
+  b6: (int){ 6 }
+}
+-- out/compile --
+--- in.cue
+{
+  x0: 5
+  x1: (〈0;b5〉 & 30)
+  b0: (<〈0;x0〉 & >0)
+  b1: (〈0;b0〉 & int)
+  b2: (int & <5.5)
+  b3: (<10 & <=5)
+  b4: (>=20 & >20)
+  b5: (>=21 & >20)
+  b6: ((int & >5) & <=6)
+}
diff --git a/cue/testdata/eval/bulk.txtar b/cue/testdata/eval/bulk.txtar
new file mode 100644
index 0000000..c32e0e0
--- /dev/null
+++ b/cue/testdata/eval/bulk.txtar
@@ -0,0 +1,50 @@
+-- in.cue --
+a: {
+	foo: a: 1
+	[X = =~"foo"]: {
+		b: 1
+		name: X
+	}
+}
+d: a & {
+	"foobar": {
+		c: 2
+	}
+}
+
+-- out/eval --
+(struct){
+  a: (struct){
+    foo: (struct){
+      a: (int){ 1 }
+    }
+  }
+  d: (struct){
+    foo: (struct){
+      a: (int){ 1 }
+    }
+    foobar: (struct){
+      c: (int){ 2 }
+      b: (int){ 1 }
+      name: (string){ "foobar" }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: {
+    foo: {
+      a: 1
+    }
+    [=~"foo"]: {
+      b: 1
+      name: 〈1;-〉
+    }
+  }
+  d: (〈0;a〉 & {
+    foobar: {
+      c: 2
+    }
+  })
+}
diff --git a/cue/testdata/eval/closed_disjunction.txtar b/cue/testdata/eval/closed_disjunction.txtar
new file mode 100644
index 0000000..8ecf601
--- /dev/null
+++ b/cue/testdata/eval/closed_disjunction.txtar
@@ -0,0 +1,52 @@
+-- in.cue --
+#A: {
+    b?: int
+    *{} | {c?: int} | {d?: int} | {e?: int}
+}
+
+a: #A & {
+    b: 3
+    c: 3
+}
+
+b: #A & {
+    c: 3
+    d: 4
+}
+-- out/eval --
+(_|_){
+  // [eval]
+  #A: (#struct){
+  }
+  a: (#struct){
+    b: (int){ 3 }
+    c: (int){ 3 }
+  }
+  b: (_|_){
+    // [eval] field `d` not allowed
+    c: (int){ 3 }
+    d: (int){ 4 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #A: {
+    b?: int
+    (*{}|{
+      c?: int
+    }|{
+      d?: int
+    }|{
+      e?: int
+    })
+  }
+  a: (〈0;#A〉 & {
+    b: 3
+    c: 3
+  })
+  b: (〈0;#A〉 & {
+    c: 3
+    d: 4
+  })
+}
diff --git a/cue/testdata/eval/closedness.txtar b/cue/testdata/eval/closedness.txtar
new file mode 100644
index 0000000..417429e
--- /dev/null
+++ b/cue/testdata/eval/closedness.txtar
@@ -0,0 +1,63 @@
+-- in.cue --
+#E: {
+    c: int
+}
+#A: {
+    b: int
+    q: {
+        #E
+        d: int
+    }
+}
+a: #A & {
+    b: 3
+    q: {
+        c: 2
+        e: 43
+    }
+}
+-- out/eval --
+(_|_){
+  // [eval]
+  #E: (#struct){
+    c: (int){ int }
+  }
+  #A: (#struct){
+    b: (int){ int }
+    q: (#struct){
+      c: (int){ int }
+      d: (int){ int }
+    }
+  }
+  a: (_|_){
+    // [eval]
+    b: (int){ 3 }
+    q: (_|_){
+      // [eval] field `e` not allowed
+      c: (int){ 2 }
+      d: (int){ int }
+      e: (int){ 43 }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  #E: {
+    c: int
+  }
+  #A: {
+    b: int
+    q: {
+      〈2;#E〉
+      d: int
+    }
+  }
+  a: (〈0;#A〉 & {
+    b: 3
+    q: {
+      c: 2
+      e: 43
+    }
+  })
+}
diff --git a/cue/testdata/eval/comprehensions.txtar b/cue/testdata/eval/comprehensions.txtar
new file mode 100644
index 0000000..09152da
--- /dev/null
+++ b/cue/testdata/eval/comprehensions.txtar
@@ -0,0 +1,51 @@
+-- in.cue --
+a: { x: 10, y: 100, z: 50 }
+b: {
+	for k, v in a if v <= 50 {
+		"\(k)": v
+	}
+	x: int
+	if x > 3 {
+		k: 20
+	}
+	k: int
+	if k > 0 {
+		l: 40
+	}
+}
+-- out/eval --
+(struct){
+  a: (struct){
+    x: (int){ 10 }
+    y: (int){ 100 }
+    z: (int){ 50 }
+  }
+  b: (struct){
+    x: (int){ 10 }
+    k: (int){ 20 }
+    z: (int){ 50 }
+    l: (int){ 40 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: {
+    x: 10
+    y: 100
+    z: 50
+  }
+  b: {
+    for k, v in 〈1;a〉 if (〈0;v〉 <= 50) {
+      "\(〈1;k〉)": 〈1;v〉
+    }
+    x: int
+    if (〈0;x〉 > 3) {
+      k: 20
+    }
+    k: int
+    if (〈0;k〉 > 0) {
+      l: 40
+    }
+  }
+}
diff --git a/cue/testdata/eval/cycles_ref.txtar b/cue/testdata/eval/cycles_ref.txtar
new file mode 100644
index 0000000..a1d1bbb
--- /dev/null
+++ b/cue/testdata/eval/cycles_ref.txtar
@@ -0,0 +1,42 @@
+-- in.cue --
+a: b + 100
+b: a - 100
+a: 200
+
+
+c: d & { b: 2 }
+d: e
+e: { a: 1 }
+e: c
+-- out/eval --
+(struct){
+  a: (int){ 200 }
+  b: (int){ 100 }
+  c: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  d: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+  e: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: (〈0;b〉 + 100)
+  b: (〈0;a〉 - 100)
+  a: 200
+  c: (〈0;d〉 & {
+    b: 2
+  })
+  d: 〈0;e〉
+  e: {
+    a: 1
+  }
+  e: 〈0;c〉
+}
diff --git a/cue/testdata/eval/disjunctions.txtar b/cue/testdata/eval/disjunctions.txtar
new file mode 100644
index 0000000..426940f
--- /dev/null
+++ b/cue/testdata/eval/disjunctions.txtar
@@ -0,0 +1,76 @@
+-- in.cue --
+a: *1 | int
+aa: *1 | *2 | int
+
+b: {
+	name: "int"
+	val: int
+} | {
+	name: "str"
+	val: string
+}
+
+d: b & { val: 3 }
+c: b & { name: "int", val: 3 }
+e: b & { val: "foo" }
+f: b & { name: "str", val: 3 }
+
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (int){ |(*(int){ 1 }, (int){ int }) }
+  aa: (int){ |(*(int){ 1 }, *(int){ 2 }, (int){ int }) }
+  b: (struct){ |((struct){
+      name: (string){ "int" }
+      val: (int){ int }
+    }, (struct){
+      name: (string){ "str" }
+      val: (string){ string }
+    }) }
+  d: (struct){
+    val: (int){ 3 }
+    name: (string){ "int" }
+  }
+  c: (struct){
+    name: (string){ "int" }
+    val: (int){ 3 }
+  }
+  e: (struct){
+    val: (string){ "foo" }
+    name: (string){ "str" }
+  }
+  f: (_|_){
+    // [eval]
+    name: (string){ "str" }
+    val: (_|_){
+      // [eval] invalid value *adt.BasicType (mismatched types string and int)
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: (*1|int)
+  aa: (*1|*2|int)
+  b: ({
+    name: "int"
+    val: int
+  }|{
+    name: "str"
+    val: string
+  })
+  d: (〈0;b〉 & {
+    val: 3
+  })
+  c: (〈0;b〉 & {
+    name: "int"
+    val: 3
+  })
+  e: (〈0;b〉 & {
+    val: "foo"
+  })
+  f: (〈0;b〉 & {
+    name: "str"
+    val: 3
+  })
+}
diff --git a/cue/testdata/eval/dynamic_field.txtar b/cue/testdata/eval/dynamic_field.txtar
new file mode 100644
index 0000000..d42d461
--- /dev/null
+++ b/cue/testdata/eval/dynamic_field.txtar
@@ -0,0 +1,47 @@
+-- in.cue --
+a: "foo"
+"\(a)": b: c: d: e
+e: 2
+
+b: "bar"
+X="\(b)": {
+	a: 1
+}
+c: X
+-- out/eval --
+(struct){
+  a: (string){ "foo" }
+  e: (int){ 2 }
+  b: (string){ "bar" }
+  c: (struct){
+    a: (int){ 1 }
+  }
+  foo: (struct){
+    b: (struct){
+      c: (struct){
+        d: (int){ 2 }
+      }
+    }
+  }
+  bar: (struct){
+    a: (int){ 1 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: "foo"
+  "\(〈0;a〉)": {
+    b: {
+      c: {
+        d: 〈3;e〉
+      }
+    }
+  }
+  e: 2
+  b: "bar"
+  "\(〈0;b〉)": {
+    a: 1
+  }
+  c: 〈0;("\(〈0;b〉)")〉
+}
diff --git a/cue/testdata/eval/expressions.txtar b/cue/testdata/eval/expressions.txtar
new file mode 100644
index 0000000..0b7f376
--- /dev/null
+++ b/cue/testdata/eval/expressions.txtar
@@ -0,0 +1,21 @@
+-- in.cue --
+a: 1
+b: 1 + 2
+c: 3 - 1
+d: 5 rem 3
+
+-- out/eval --
+(struct){
+  a: (int){ 1 }
+  b: (int){ 3 }
+  c: (int){ 2 }
+  d: (int){ 2 }
+}
+-- out/compile --
+--- in.cue
+{
+  a: 1
+  b: (1 + 2)
+  c: (3 - 1)
+  d: (5 rem 3)
+}
diff --git a/cue/testdata/eval/incomplete.txtar b/cue/testdata/eval/incomplete.txtar
new file mode 100644
index 0000000..62f637e
--- /dev/null
+++ b/cue/testdata/eval/incomplete.txtar
@@ -0,0 +1,60 @@
+These should all be an incomplete errors.
+
+-- in.cue --
+s: string
+
+e1: s + s
+e2: >"bar" & s // TODO
+e3: >s & "foo" // TODO
+
+e4: >e1 & s
+e5: <e5 & s
+
+E: {
+  a: c-b
+  b: c-a
+  c: a+b & >=5
+}
+-- out/eval --
+(struct){
+  s: (string){ string }
+  e1: (_|_){
+    // [incomplete] non-concrete value *adt.BasicType in operand to +:
+    //     ./in.cue:3:5
+  }
+  e2: (string){ >"bar" }
+  e3: (string){ "foo" }
+  e4: (_|_){
+    // [incomplete] non-concrete value *adt.BasicType in operand to +:
+    //     ./in.cue:3:5
+  }
+  e5: (_|_){
+    // [cycle] cycle error
+  }
+  E: (struct){
+    a: (_|_){
+      // [cycle] cycle error
+    }
+    b: (_|_){
+      // [cycle] cycle error
+    }
+    c: (_|_){
+      // [cycle] cycle error
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  s: string
+  e1: (〈0;s〉 + 〈0;s〉)
+  e2: (>"bar" & 〈0;s〉)
+  e3: (>〈0;s〉 & "foo")
+  e4: (>〈0;e1〉 & 〈0;s〉)
+  e5: (<〈0;e5〉 & 〈0;s〉)
+  E: {
+    a: (〈0;c〉 - 〈0;b〉)
+    b: (〈0;c〉 - 〈0;a〉)
+    c: ((〈0;a〉 + 〈0;b〉) & >=5)
+  }
+}
diff --git a/cue/testdata/eval/let.txtar b/cue/testdata/eval/let.txtar
new file mode 100644
index 0000000..e7e9c38
--- /dev/null
+++ b/cue/testdata/eval/let.txtar
@@ -0,0 +1,41 @@
+-- in.cue --
+let A = 3 * 3
+let B = A + A
+let C = B + B
+let D = C + C
+let E = D + D
+let F = E + E
+b: {
+    let G = F + F
+    let H = G + G
+    let I = H + H
+    let J = I + I
+    let K = J + J
+    let L = K + K
+    let M = L + L
+    let N = M + M
+    let O = N + N
+    let P = O + O
+    let Q = P + P
+    let R = Q + Q
+    let S = R + R
+    let T = S + S
+
+    a: T
+    b: F
+}
+-- out/eval --
+(struct){
+  b: (struct){
+    a: (int){ 4718592 }
+    b: (int){ 288 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  b: {
+    a: 〈0;let T〉
+    b: 〈1;let F〉
+  }
+}
diff --git a/cue/testdata/eval/lists.txtar b/cue/testdata/eval/lists.txtar
new file mode 100644
index 0000000..93df5d4
--- /dev/null
+++ b/cue/testdata/eval/lists.txtar
@@ -0,0 +1,56 @@
+-- in.cue --
+a: [...int]
+a: [1, 2, 3, 5]
+a: [1, 2, 3, d]
+
+b: a[3]
+d: 5
+
+c: [for x in [[1, 2]][0] { x + d }]
+
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+    3: (int){ 5 }
+  }
+  b: (int){ 5 }
+  d: (int){ 5 }
+  c: (#list){
+    0: (int){ 6 }
+    1: (int){ 7 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: [
+    ...int,
+  ]
+  a: [
+    1,
+    2,
+    3,
+    5,
+  ]
+  a: [
+    1,
+    2,
+    3,
+    〈0;d〉,
+  ]
+  b: 〈0;a〉[3]
+  d: 5
+  c: [
+    for _, x in [
+      [
+        1,
+        2,
+      ],
+    ][0] {
+      (〈1;x〉 + 〈2;d〉)
+    },
+  ]
+}
diff --git a/cue/testdata/eval/merge.txtar b/cue/testdata/eval/merge.txtar
new file mode 100644
index 0000000..7bb1f4b
--- /dev/null
+++ b/cue/testdata/eval/merge.txtar
@@ -0,0 +1,96 @@
+TODO: image field is not pre-evaluated (not a huge deal)
+TODO: allow dynamic fields
+
+-- in.cue --
+key: "app01"
+manifests: [
+    {
+    deployment: #map: [string]: { name: string, image: string, desc: string }
+    deployment: #map: "\(key)": name: "app01"
+    },
+    {
+        deployment: #map: app01: image: "image01"
+        // TODO: allow
+        // deployment: #map: "\("app03")": image: "image01"
+    }
+]
+// unify
+results: _
+for _, manifest in manifests {
+    results: manifest
+}
+-- out/eval --
+(struct){
+  key: (string){ "app01" }
+  manifests: (#list){
+    0: (struct){
+      deployment: (struct){
+        #map: (#struct){
+          app01: (#struct){
+            name: (string){ "app01" }
+            image: (string){ string }
+            desc: (string){ string }
+          }
+        }
+      }
+    }
+    1: (struct){
+      deployment: (struct){
+        #map: (#struct){
+          app01: (#struct){
+            image: (string){ "image01" }
+          }
+        }
+      }
+    }
+  }
+  results: (struct){
+    deployment: (struct){
+      #map: (#struct){
+        app01: (#struct){
+          image: (string){ "image01" }
+          name: (string){ "app01" }
+          desc: (string){ string }
+        }
+      }
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  key: "app01"
+  manifests: [
+    {
+      deployment: {
+        #map: {
+          [string]: {
+            name: string
+            image: string
+            desc: string
+          }
+        }
+      }
+      deployment: {
+        #map: {
+          "\(〈3;key〉)": {
+            name: "app01"
+          }
+        }
+      }
+    },
+    {
+      deployment: {
+        #map: {
+          app01: {
+            image: "image01"
+          }
+        }
+      }
+    },
+  ]
+  results: _
+  for _, manifest in 〈0;manifests〉 {
+    results: 〈1;manifest〉
+  }
+}
diff --git a/cue/testdata/eval/resolve_basic.txtar b/cue/testdata/eval/resolve_basic.txtar
new file mode 100644
index 0000000..5b5f01b
--- /dev/null
+++ b/cue/testdata/eval/resolve_basic.txtar
@@ -0,0 +1,40 @@
+-- in.cue --
+	a: 1
+	b: a + 1
+	d: {
+		x: _
+		y: b + x
+	}
+	e: d & {
+		x: 5
+	}
+
+-- out/eval --
+(struct){
+  a: (int){ 1 }
+  b: (int){ 2 }
+  d: (struct){
+    x: (_){ _ }
+    y: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:5:6
+    }
+  }
+  e: (struct){
+    x: (int){ 5 }
+    y: (int){ 7 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: 1
+  b: (〈0;a〉 + 1)
+  d: {
+    x: _
+    y: (〈1;b〉 + 〈0;x〉)
+  }
+  e: (〈0;d〉 & {
+    x: 5
+  })
+}
diff --git a/cue/testdata/eval/resolve_env.txtar b/cue/testdata/eval/resolve_env.txtar
new file mode 100644
index 0000000..45c8bb3
--- /dev/null
+++ b/cue/testdata/eval/resolve_env.txtar
@@ -0,0 +1,65 @@
+-- in.cue --
+a: {
+  d: int
+  b: {
+    c: d
+  }
+}
+x: {
+  d: 2
+  b: a.b.c // should be int, not 2
+}
+a1: y: 5
+a1: a2: a3: a4: a5: a1.y
+b: a1.a2.a3.a4.a5
+-- out/eval --
+(struct){
+  a: (struct){
+    d: (int){ int }
+    b: (struct){
+      c: (int){ int }
+    }
+  }
+  x: (struct){
+    d: (int){ 2 }
+    b: (int){ int }
+  }
+  a1: (struct){
+    y: (int){ 5 }
+    a2: (struct){
+      a3: (struct){
+        a4: (struct){
+          a5: (int){ 5 }
+        }
+      }
+    }
+  }
+  b: (int){ 5 }
+}
+-- out/compile --
+--- in.cue
+{
+  a: {
+    d: int
+    b: {
+      c: 〈1;d〉
+    }
+  }
+  x: {
+    d: 2
+    b: 〈1;a〉.b.c
+  }
+  a1: {
+    y: 5
+  }
+  a1: {
+    a2: {
+      a3: {
+        a4: {
+          a5: 〈4;a1〉.y
+        }
+      }
+    }
+  }
+  b: 〈0;a1〉.a2.a3.a4.a5
+}
diff --git a/cue/testdata/eval/selectors.txtar b/cue/testdata/eval/selectors.txtar
new file mode 100644
index 0000000..5b5f01b
--- /dev/null
+++ b/cue/testdata/eval/selectors.txtar
@@ -0,0 +1,40 @@
+-- in.cue --
+	a: 1
+	b: a + 1
+	d: {
+		x: _
+		y: b + x
+	}
+	e: d & {
+		x: 5
+	}
+
+-- out/eval --
+(struct){
+  a: (int){ 1 }
+  b: (int){ 2 }
+  d: (struct){
+    x: (_){ _ }
+    y: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:5:6
+    }
+  }
+  e: (struct){
+    x: (int){ 5 }
+    y: (int){ 7 }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: 1
+  b: (〈0;a〉 + 1)
+  d: {
+    x: _
+    y: (〈1;b〉 + 〈0;x〉)
+  }
+  e: (〈0;d〉 & {
+    x: 5
+  })
+}
diff --git a/cue/testdata/eval/unify.txtar b/cue/testdata/eval/unify.txtar
new file mode 100644
index 0000000..2ac87ed
--- /dev/null
+++ b/cue/testdata/eval/unify.txtar
@@ -0,0 +1,54 @@
+-- in.cue --
+a: d: {
+    #base
+    #info: {...}
+    Y: #info.X
+}
+
+#base: {
+    #info: {...}
+}
+
+a: [Name=string]: {#info: {
+    X: "foo"
+}}
+-- out/eval --
+(struct){
+  a: (struct){
+    d: (#struct){
+      #info: (#struct){
+        X: (string){ "foo" }
+      }
+      Y: (string){ "foo" }
+    }
+  }
+  #base: (#struct){
+    #info: (#struct){
+    }
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  a: {
+    d: {
+      〈2;#base〉
+      #info: {
+        ...
+      }
+      Y: 〈0;#info〉.X
+    }
+  }
+  #base: {
+    #info: {
+      ...
+    }
+  }
+  a: {
+    [string]: {
+      #info: {
+        X: "foo"
+      }
+    }
+  }
+}
diff --git a/cue/testdata/export/000.txtar b/cue/testdata/export/000.txtar
index 477870c..87e1b5a 100644
--- a/cue/testdata/export/000.txtar
+++ b/cue/testdata/export/000.txtar
@@ -17,3 +17,5 @@
 {
   "hello"
 }
+-- out/eval --
+(string){ "hello" }
diff --git a/cue/testdata/export/001.txtar b/cue/testdata/export/001.txtar
index 4153b68..53f8e21 100644
--- a/cue/testdata/export/001.txtar
+++ b/cue/testdata/export/001.txtar
@@ -17,3 +17,5 @@
 {
   'hello'
 }
+-- out/eval --
+(bytes){ 'hello' }
diff --git a/cue/testdata/export/002.txtar b/cue/testdata/export/002.txtar
index b878ce5..9097c1c 100644
--- a/cue/testdata/export/002.txtar
+++ b/cue/testdata/export/002.txtar
@@ -28,3 +28,5 @@
 {
   'hello\nworld'
 }
+-- out/eval --
+(bytes){ 'hello\nworld' }
diff --git a/cue/testdata/export/003.txtar b/cue/testdata/export/003.txtar
index 876a8bf..ca8e6b5 100644
--- a/cue/testdata/export/003.txtar
+++ b/cue/testdata/export/003.txtar
@@ -28,3 +28,5 @@
 {
   "hello\nworld"
 }
+-- out/eval --
+(string){ "hello\nworld" }
diff --git a/cue/testdata/export/004.txtar b/cue/testdata/export/004.txtar
index 359e551..d201ad7 100644
--- a/cue/testdata/export/004.txtar
+++ b/cue/testdata/export/004.txtar
@@ -22,3 +22,10 @@
     _bar: int
   }
 }
+-- out/eval --
+(struct){
+  $type: (int){ 3 }
+  "_": (int){ int }
+  "_foo": (int){ int }
+  _bar: (int){ int }
+}
diff --git a/cue/testdata/export/005.txtar b/cue/testdata/export/005.txtar
index 89885a8..fb4dc48 100644
--- a/cue/testdata/export/005.txtar
+++ b/cue/testdata/export/005.txtar
@@ -21,3 +21,12 @@
     f: string
   }
 }
+-- out/eval --
+(struct){
+  a: (int){ 1 }
+  b: (int){ 3 }
+  c: (null){ null }
+  d: (bool){ true }
+  e: (_){ _ }
+  f: (string){ string }
+}
diff --git a/cue/testdata/export/006.txtar b/cue/testdata/export/006.txtar
index fb9eaf1..5396597 100644
--- a/cue/testdata/export/006.txtar
+++ b/cue/testdata/export/006.txtar
@@ -25,3 +25,23 @@
     e: 〈0;a〉.t[2:3]
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    b: (float){ 2.0 }
+    s: (string){ "abc" }
+  }
+  b: (float){ 2.0 }
+  c: (_|_){
+    // [incomplete] undefined field c:
+    //     ./in.cue:1:38
+  }
+  d: (_|_){
+    // [incomplete] undefined field d:
+    //     ./in.cue:1:46
+  }
+  e: (_|_){
+    // [incomplete] undefined field t:
+    //     ./in.cue:1:57
+  }
+}
diff --git a/cue/testdata/export/007.txtar b/cue/testdata/export/007.txtar
index e3dc540..b4ad798 100644
--- a/cue/testdata/export/007.txtar
+++ b/cue/testdata/export/007.txtar
@@ -25,3 +25,24 @@
     e: 〈0;#a〉.t[2:3]
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #a: (#struct){
+    b: (float){ 2.0 }
+    s: (string){ "abc" }
+  }
+  b: (float){ 2.0 }
+  c: (_|_){
+    // [eval] undefined field c:
+    //     ./in.cue:1:41
+  }
+  d: (_|_){
+    // [eval] undefined field d:
+    //     ./in.cue:1:50
+  }
+  e: (_|_){
+    // [eval] undefined field t:
+    //     ./in.cue:1:62
+  }
+}
diff --git a/cue/testdata/export/008.txtar b/cue/testdata/export/008.txtar
index e490970..7001856 100644
--- a/cue/testdata/export/008.txtar
+++ b/cue/testdata/export/008.txtar
@@ -14,3 +14,13 @@
     ]
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (_|_){
+    // [eval]
+    0: (_|_){
+      // [eval] incompatible values *adt.Num and *adt.Num
+    }
+  }
+}
diff --git a/cue/testdata/export/009.txtar b/cue/testdata/export/009.txtar
index f3971d9..a6f04b7 100644
--- a/cue/testdata/export/009.txtar
+++ b/cue/testdata/export/009.txtar
@@ -72,3 +72,33 @@
     ]
   }
 }
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ int }
+    3: (int){ int }
+    4: (int){ int }
+  }
+  b: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  c: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  d: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  e: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  f: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/export/010.txtar b/cue/testdata/export/010.txtar
index 6b4f65d..be462a4 100644
--- a/cue/testdata/export/010.txtar
+++ b/cue/testdata/export/010.txtar
@@ -73,3 +73,33 @@
     ]
   }
 }
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ int }
+    3: (int){ int }
+    4: (int){ int }
+  }
+  b: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  c: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  d: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  e: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  f: (list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/export/011.txtar b/cue/testdata/export/011.txtar
index 3b62924..a998183 100644
--- a/cue/testdata/export/011.txtar
+++ b/cue/testdata/export/011.txtar
@@ -33,3 +33,14 @@
     d: 〈0;a〉["b"]
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    b: (#list){
+    }
+  }
+  c: (#list){
+  }
+  d: (#list){
+  }
+}
diff --git a/cue/testdata/export/012.txtar b/cue/testdata/export/012.txtar
index f34909f..6b2570f 100644
--- a/cue/testdata/export/012.txtar
+++ b/cue/testdata/export/012.txtar
@@ -14,3 +14,11 @@
     b: 〈0;a〉[2:3]
   }
 }
+-- out/eval --
+(struct){
+  a: ((int|string)){ |(*(string){ "foo" }, *(string){ "bar" }, *(string){ string }, (int){ int }) }
+  b: (_|_){
+    // [incomplete] non-concrete slice subject *adt.FieldReference:
+    //     ./in.cue:1:41
+  }
+}
diff --git a/cue/testdata/export/013.txtar b/cue/testdata/export/013.txtar
index 4d63d82..ad96a09 100644
--- a/cue/testdata/export/013.txtar
+++ b/cue/testdata/export/013.txtar
@@ -13,3 +13,7 @@
     a: ((>=0 & <=10) & !=1)
   }
 }
+-- out/eval --
+(struct){
+  a: (number){ &(>=0, <=10, !=1) }
+}
diff --git a/cue/testdata/export/014.txtar b/cue/testdata/export/014.txtar
index 330ae1f..378a97e 100644
--- a/cue/testdata/export/014.txtar
+++ b/cue/testdata/export/014.txtar
@@ -14,3 +14,7 @@
     a: ((>=0 & <=10) & !=1)
   }
 }
+-- out/eval --
+(struct){
+  a: (number){ &(>=0, <=10, !=1) }
+}
diff --git a/cue/testdata/export/015.txtar b/cue/testdata/export/015.txtar
index 18e54f1..0029871 100644
--- a/cue/testdata/export/015.txtar
+++ b/cue/testdata/export/015.txtar
@@ -20,3 +20,10 @@
     ]
   }
 }
+-- out/eval --
+(struct){
+  a: (int){ |((int){ 1 }, (int){ 2 }) }
+  b: (#list){
+    0: (int){ |((int){ 1 }, (int){ 2 }) }
+  }
+}
diff --git a/cue/testdata/export/016.txtar b/cue/testdata/export/016.txtar
index 2d5c8a4..dea2289 100644
--- a/cue/testdata/export/016.txtar
+++ b/cue/testdata/export/016.txtar
@@ -37,17 +37,33 @@
 {
   {
     u16: ((int & >=0) & <=65535)
-    u32: (>=0 & <=4294967295)
-    u64: (>=0 & <=18446744073709551615)
-    u128: (>=0 & <=340282366920938463463374607431768211455)
-    u8: (>=0 & <=255)
-    ua: ((>=0 & <=65535) & >0)
+    u32: &(int, >=0, <=4294967295)
+    u64: &(int, >=0, <=18446744073709551615)
+    u128: &(int, >=0, <=340282366920938463463374607431768211455)
+    u8: &(int, >=0, <=255)
+    ua: (&(int, >=0, <=65535) & >0)
     us: ((>=0 & <10000) & int)
     i16: ((>=-32768 & int) & <=32767)
-    i32: ((>=-2147483648 & <=2147483647) & >0)
-    i64: (>=-9223372036854775808 & <=9223372036854775807)
-    i128: (>=-170141183460469231731687303715884105728 & <=170141183460469231731687303715884105727)
-    f64: (>=-1.797693134862315708145274237317043567981E+308 & <=1.797693134862315708145274237317043567981E+308)
-    fi: ((>=-1.797693134862315708145274237317043567981E+308 & <=1.797693134862315708145274237317043567981E+308) & int)
+    i32: (&(int, >=-2147483648, <=2147483647) & >0)
+    i64: &(int, >=-9223372036854775808, <=9223372036854775807)
+    i128: &(int, >=-170141183460469231731687303715884105728, <=170141183460469231731687303715884105727)
+    f64: &(>=-1.797693134862315708145274237317043567981E+308, <=1.797693134862315708145274237317043567981E+308)
+    fi: (&(>=-1.797693134862315708145274237317043567981E+308, <=1.797693134862315708145274237317043567981E+308) & int)
   }
 }
+-- out/eval --
+(struct){
+  u16: (int){ &(>=0, <=65535, int) }
+  u32: (int){ &(>=0, <=4294967295, int) }
+  u64: (int){ &(>=0, <=18446744073709551615, int) }
+  u128: (int){ &(>=0, <=340282366920938463463374607431768211455, int) }
+  u8: (int){ &(>=0, <=255, int) }
+  ua: (int){ &(>0, <=65535, int) }
+  us: (int){ &(>=0, <10000, int) }
+  i16: (int){ &(>=-32768, <=32767, int) }
+  i32: (int){ &(>0, <=2147483647, int) }
+  i64: (int){ &(>=-9223372036854775808, <=9223372036854775807, int) }
+  i128: (int){ &(>=-170141183460469231731687303715884105728, <=170141183460469231731687303715884105727, int) }
+  f64: (number){ &(>=-1.797693134862315708145274237317043567981E+308, <=1.797693134862315708145274237317043567981E+308) }
+  fi: (int){ &(>=-1.797693134862315708145274237317043567981E+308, <=1.797693134862315708145274237317043567981E+308, int) }
+}
diff --git a/cue/testdata/export/017.txtar b/cue/testdata/export/017.txtar
index 8ef3f0e..4c1db35 100644
--- a/cue/testdata/export/017.txtar
+++ b/cue/testdata/export/017.txtar
@@ -36,3 +36,13 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  b: (struct){
+    "1": (int){ 2 }
+  }
+}
diff --git a/cue/testdata/export/018.txtar b/cue/testdata/export/018.txtar
index fb48a4d..afc6548 100644
--- a/cue/testdata/export/018.txtar
+++ b/cue/testdata/export/018.txtar
@@ -33,3 +33,14 @@
     ]
   }
 }
+-- out/eval --
+(struct){
+  a: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  b: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/export/019.txtar b/cue/testdata/export/019.txtar
index 68c5e65..3f0ab87 100644
--- a/cue/testdata/export/019.txtar
+++ b/cue/testdata/export/019.txtar
@@ -14,3 +14,12 @@
     b: "Count: \(〈0;a〉) times"
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (number){ &(>=0, <=10) }
+  b: (_|_){
+    // [eval] invalid interpolation: cannot use *adt.Conjunction (type number) as type string:
+    //     ./in.cue:1:20
+  }
+}
diff --git a/cue/testdata/export/021.txtar b/cue/testdata/export/021.txtar
index 2aefbcf..6618525 100644
--- a/cue/testdata/export/021.txtar
+++ b/cue/testdata/export/021.txtar
@@ -42,3 +42,18 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  b: (struct){
+    idx: (_|_){
+      // [incomplete] invalid non-ground value *adt.BasicType (must be concrete string)
+    }
+    str: (string){ string }
+    a: (struct){
+      b: (int){ 4 }
+    }
+  }
+  a: (struct){
+    b: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/export/022.txtar b/cue/testdata/export/022.txtar
index 8281305..d60c7ac 100644
--- a/cue/testdata/export/022.txtar
+++ b/cue/testdata/export/022.txtar
@@ -83,3 +83,18 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  job: (struct){
+    list: (struct){
+      command: (string){ "ls" }
+      name: (string){ "list" }
+      replicas: (int){ |(*(int){ 1 }, (int){ &(>=0, int) }) }
+    }
+    nginx: (struct){
+      command: (string){ "nginx" }
+      replicas: (int){ 2 }
+      name: (string){ "nginx" }
+    }
+  }
+}
diff --git a/cue/testdata/export/023.txtar b/cue/testdata/export/023.txtar
index 3d05af8..2612cd7 100644
--- a/cue/testdata/export/023.txtar
+++ b/cue/testdata/export/023.txtar
@@ -79,3 +79,26 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  #emb: (#struct){
+    a: (int){ 1 }
+    sub: (#struct){
+      f: (int){ 3 }
+    }
+  }
+  #def: (#struct){
+    a: (int){ 1 }
+    sub: (#struct){
+      f: (int){ 3 }
+    }
+    b: (int){ 2 }
+  }
+  #f: (#struct){
+    a: (int){ 10 }
+  }
+  #e: (#struct){
+    a: (int){ 10 }
+    b: (int){ &(<100, int) }
+  }
+}
diff --git a/cue/testdata/export/024.txtar b/cue/testdata/export/024.txtar
index 90f2748..cc381c9 100644
--- a/cue/testdata/export/024.txtar
+++ b/cue/testdata/export/024.txtar
@@ -58,3 +58,40 @@
     val2: 〈0;#def2〉
   }
 }
+-- out/eval --
+(struct){
+  reg: (struct){
+    foo: (int){ 1 }
+    bar: (struct){
+      baz: (int){ 3 }
+    }
+  }
+  #def: (#struct){
+    a: (int){ 1 }
+    sub: (#struct){
+      foo: (int){ 1 }
+      bar: (#struct){
+        baz: (int){ 3 }
+      }
+    }
+  }
+  val: (#struct){
+    a: (int){ 1 }
+    sub: (#struct){
+      foo: (int){ 1 }
+      bar: (#struct){
+        baz: (int){ 3 }
+      }
+    }
+  }
+  #def2: (#struct){
+    a: (#struct){
+      b: (int){ int }
+    }
+  }
+  val2: (#struct){
+    a: (#struct){
+      b: (int){ int }
+    }
+  }
+}
diff --git a/cue/testdata/export/025.txtar b/cue/testdata/export/025.txtar
index 5b4d7da..b63886f 100644
--- a/cue/testdata/export/025.txtar
+++ b/cue/testdata/export/025.txtar
@@ -40,3 +40,11 @@
     c: (*1|2)
   }
 }
+-- out/eval --
+(struct){
+  b: (_|_){
+    // [incomplete] invalid non-ground value *adt.BasicType (must be concrete int)
+  }
+  a: (int){ int }
+  c: (int){ |(*(int){ 1 }, (int){ 2 }) }
+}
diff --git a/cue/testdata/export/026.txtar b/cue/testdata/export/026.txtar
index 709bc1d..84e3611 100644
--- a/cue/testdata/export/026.txtar
+++ b/cue/testdata/export/026.txtar
@@ -28,3 +28,6 @@
     }
   }
 }
+-- out/eval --
+(struct){
+}
diff --git a/cue/testdata/export/029.txtar b/cue/testdata/export/029.txtar
index 10d6edd..facd344 100644
--- a/cue/testdata/export/029.txtar
+++ b/cue/testdata/export/029.txtar
@@ -49,3 +49,24 @@
     })
   }
 }
+-- out/eval --
+(struct){
+  #And: (#struct){
+    #: (#struct){
+      "Fn::And": (list){
+      }
+    }
+  }
+  #Ands: (#struct){
+    #: (#struct){
+      "Fn::And": (#list){
+        0: ((int|struct)){ |((int){ 3 }, (#struct){
+            #: (#struct){
+              "Fn::And": (list){
+              }
+            }
+          }) }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/export/031.txtar b/cue/testdata/export/031.txtar
index eb3fb49..3cc9ae7 100644
--- a/cue/testdata/export/031.txtar
+++ b/cue/testdata/export/031.txtar
@@ -24,3 +24,14 @@
     ]))
   }
 }
+-- out/eval --
+(struct){
+  A: (#list){
+    0: (int){ &(>=0, int) }
+  }
+  B: (list){ |((#list){
+      0: (int){ 10 }
+    }, (#list){
+      0: (int){ 192 }
+    }) }
+}
diff --git a/cue/testdata/export/032.txtar b/cue/testdata/export/032.txtar
index 4205cc5..5b92dd7 100644
--- a/cue/testdata/export/032.txtar
+++ b/cue/testdata/export/032.txtar
@@ -24,3 +24,7 @@
     foo: 3
   }
 }
+-- out/eval --
+(struct){
+  foo: (int){ 3 }
+}
diff --git a/cue/testdata/fulleval/000_detect_conflicting_value.txtar b/cue/testdata/fulleval/000_detect_conflicting_value.txtar
index 703940b..2e5a97f 100644
--- a/cue/testdata/fulleval/000_detect_conflicting_value.txtar
+++ b/cue/testdata/fulleval/000_detect_conflicting_value.txtar
@@ -15,3 +15,9 @@
   a: 8000.9
   a: (7080|int)
 }
+-- out/eval --
+(struct){
+  a: (_|_){
+    // [incomplete] empty disjunction
+  }
+}
diff --git a/cue/testdata/fulleval/001_conflicts_in_optional_fields_are_okay_.txtar b/cue/testdata/fulleval/001_conflicts_in_optional_fields_are_okay_.txtar
index a2eb226..0585389 100644
--- a/cue/testdata/fulleval/001_conflicts_in_optional_fields_are_okay_.txtar
+++ b/cue/testdata/fulleval/001_conflicts_in_optional_fields_are_okay_.txtar
@@ -34,3 +34,16 @@
     b?: 4
   })
 }
+-- out/eval --
+(struct){
+  d: (struct){ |((struct){
+      a: (int){ 1 }
+    }, (struct){
+      a: (int){ 2 }
+    }) }
+  c: (struct){ |((struct){
+      a: (int){ 1 }
+    }, (struct){
+      a: (int){ 2 }
+    }) }
+}
diff --git a/cue/testdata/fulleval/002_resolve_all_disjunctions.txtar b/cue/testdata/fulleval/002_resolve_all_disjunctions.txtar
index 73896a5..c116336 100644
--- a/cue/testdata/fulleval/002_resolve_all_disjunctions.txtar
+++ b/cue/testdata/fulleval/002_resolve_all_disjunctions.txtar
@@ -81,3 +81,20 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  service: (struct){
+    foo: (struct){
+      name: (string){ |(*(string){ "foo" }, (string){ string }) }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+    bar: (struct){
+      port: (int){ 8000 }
+      name: (string){ |(*(string){ "bar" }, (string){ string }) }
+    }
+    baz: (struct){
+      name: (string){ "foobar" }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/003_field_templates.txtar b/cue/testdata/fulleval/003_field_templates.txtar
index e7da736..8b367aa 100644
--- a/cue/testdata/fulleval/003_field_templates.txtar
+++ b/cue/testdata/fulleval/003_field_templates.txtar
@@ -129,3 +129,29 @@
     bar: _
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    k: (int){ 1 }
+  }
+  b: (struct){
+    v: (struct){
+      x: (int){ 0 }
+      y: (int){ |(*(int){ 1 }, (int){ int }) }
+    }
+    w: (struct){
+      y: (int){ 0 }
+      x: (int){ 0 }
+    }
+  }
+  c: (struct){
+    foo: (struct){
+      name: (string){ "foo" }
+      y: (int){ 1 }
+    }
+    bar: (struct){
+      name: (string){ "bar" }
+      y: (int){ 1 }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/004_field_comprehension.txtar b/cue/testdata/fulleval/004_field_comprehension.txtar
index b73710c..0efe4e9 100644
--- a/cue/testdata/fulleval/004_field_comprehension.txtar
+++ b/cue/testdata/fulleval/004_field_comprehension.txtar
@@ -89,3 +89,20 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    b: (int){ 2 }
+    c: (int){ 3 }
+  }
+  b: (struct){
+    a: (int){ 1 }
+    b: (int){ 2 }
+    c: (int){ 3 }
+    d: (int){ 4 }
+  }
+  c: (struct){
+    b: (int){ 2 }
+    c: (int){ 3 }
+  }
+}
diff --git a/cue/testdata/fulleval/005_conditional_field.txtar b/cue/testdata/fulleval/005_conditional_field.txtar
index f00f3e8..b200d78 100644
--- a/cue/testdata/fulleval/005_conditional_field.txtar
+++ b/cue/testdata/fulleval/005_conditional_field.txtar
@@ -54,3 +54,15 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  b: (bool){ true }
+  c: (struct){
+    a: (int){ 3 }
+  }
+  d: (_|_){
+    // [incomplete] incomplete
+    a: (int){ int }
+  }
+  a: (string){ "foo" }
+}
diff --git a/cue/testdata/fulleval/006_referencing_field_in_field_comprehension.txtar b/cue/testdata/fulleval/006_referencing_field_in_field_comprehension.txtar
index 8e873dd..0d812b9 100644
--- a/cue/testdata/fulleval/006_referencing_field_in_field_comprehension.txtar
+++ b/cue/testdata/fulleval/006_referencing_field_in_field_comprehension.txtar
@@ -56,3 +56,14 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    b: (struct){
+      c: (int){ 4 }
+      d: (int){ 5 }
+    }
+    c: (int){ 4 }
+    d: (int){ 5 }
+  }
+}
diff --git a/cue/testdata/fulleval/007_different_labels_for_templates.txtar b/cue/testdata/fulleval/007_different_labels_for_templates.txtar
index 47f95ed..9d20351 100644
--- a/cue/testdata/fulleval/007_different_labels_for_templates.txtar
+++ b/cue/testdata/fulleval/007_different_labels_for_templates.txtar
@@ -46,3 +46,11 @@
     foo: {}
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    foo: (struct){
+      name: (string){ "foo" }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/008_nested_templates_in_one_field.txtar b/cue/testdata/fulleval/008_nested_templates_in_one_field.txtar
index b27f3ac..c20d4e0 100644
--- a/cue/testdata/fulleval/008_nested_templates_in_one_field.txtar
+++ b/cue/testdata/fulleval/008_nested_templates_in_one_field.txtar
@@ -141,3 +141,33 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    A: (struct){
+      b: (struct){
+        B: (struct){
+          name: (string){ "A" }
+          kind: (string){ "B" }
+        }
+      }
+    }
+    C: (struct){
+      b: (struct){
+        D: (struct){
+          name: (string){ "C" }
+          kind: (string){ "D" }
+        }
+      }
+    }
+    EE: (struct){
+      b: (struct){
+        FF: (struct){
+          c: (string){ "bar" }
+          name: (string){ "EE" }
+          kind: (string){ "FF" }
+        }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/009_template_unification_within_one_struct.txtar b/cue/testdata/fulleval/009_template_unification_within_one_struct.txtar
index fe90f38..4119764 100644
--- a/cue/testdata/fulleval/009_template_unification_within_one_struct.txtar
+++ b/cue/testdata/fulleval/009_template_unification_within_one_struct.txtar
@@ -90,3 +90,21 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    A: (struct){
+      name: (string){ "A" }
+      kind: (string){ "A" }
+    }
+    C: (struct){
+      name: (string){ "C" }
+      kind: (string){ "C" }
+    }
+    E: (struct){
+      c: (string){ "bar" }
+      name: (string){ "E" }
+      kind: (string){ "E" }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/010_field_comprehensions_with_multiple_keys.txtar b/cue/testdata/fulleval/010_field_comprehensions_with_multiple_keys.txtar
index b07c106..d1797d1 100644
--- a/cue/testdata/fulleval/010_field_comprehensions_with_multiple_keys.txtar
+++ b/cue/testdata/fulleval/010_field_comprehensions_with_multiple_keys.txtar
@@ -168,3 +168,50 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    A: (struct){
+      b: (struct){
+        B: (struct){
+          a: (string){ "A" }
+          b: (string){ "B" }
+        }
+      }
+    }
+    C: (struct){
+      b: (struct){
+        D: (struct){
+          a: (string){ "C" }
+          b: (string){ "D" }
+        }
+      }
+    }
+    E: (struct){
+      b: (struct){
+        F: (struct){
+          a: (string){ "E" }
+          b: (string){ "F" }
+        }
+      }
+    }
+  }
+  A: (struct){
+    B: (struct){
+      a: (string){ "A" }
+      b: (string){ "B" }
+    }
+  }
+  C: (struct){
+    D: (struct){
+      a: (string){ "C" }
+      b: (string){ "D" }
+    }
+  }
+  E: (struct){
+    F: (struct){
+      a: (string){ "E" }
+      b: (string){ "F" }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/011_field_comprehensions_with_templates.txtar b/cue/testdata/fulleval/011_field_comprehensions_with_templates.txtar
index 425c80d..69eb849 100644
--- a/cue/testdata/fulleval/011_field_comprehensions_with_templates.txtar
+++ b/cue/testdata/fulleval/011_field_comprehensions_with_templates.txtar
@@ -77,3 +77,16 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  num: (int){ 1 }
+  a: (struct){
+    b: (struct){
+      c: (struct){
+        d: (string){ "bar" }
+        name: (string){ "b" }
+        kind: (string){ "c" }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/012_disjunctions_of_lists.txtar b/cue/testdata/fulleval/012_disjunctions_of_lists.txtar
index 19719f7..2b1ed96 100644
--- a/cue/testdata/fulleval/012_disjunctions_of_lists.txtar
+++ b/cue/testdata/fulleval/012_disjunctions_of_lists.txtar
@@ -32,3 +32,21 @@
     "d",
   ])
 }
+-- out/eval --
+(struct){
+  l: (list){ |(*(#list){
+      0: (int){ int }
+      1: (int){ int }
+    }, (#list){
+      0: (string){ string }
+      1: (string){ string }
+    }) }
+  l1: (#list){
+    0: (string){ "a" }
+    1: (string){ "b" }
+  }
+  l2: (#list){
+    0: (string){ "c" }
+    1: (string){ "d" }
+  }
+}
diff --git a/cue/testdata/fulleval/013_normalization.txtar b/cue/testdata/fulleval/013_normalization.txtar
index d9da622..ae38c67 100644
--- a/cue/testdata/fulleval/013_normalization.txtar
+++ b/cue/testdata/fulleval/013_normalization.txtar
@@ -1,5 +1,7 @@
-# DO NOT EDIT; generated by go run testdata/gen.go
-#
+TODO: the new evaluator currently does not normalize disjuncts.
+It needs to be determined under which circumstances is desirable.
+The redundancy that remains from not normalizing can be useful.
+
 #name: normalization
 #evalFull
 -- in.cue --
@@ -19,3 +21,9 @@
   b: (*1|*int)
   c: (*1.0|*float)
 }
+-- out/eval --
+(struct){
+  a: (string){ string }
+  b: (int){ |(*(int){ 1 }, *(int){ int }) }
+  c: (float){ |(*(float){ 1.0 }, *(float){ float }) }
+}
diff --git a/cue/testdata/fulleval/014_default_disambiguation_and_elimination.txtar b/cue/testdata/fulleval/014_default_disambiguation_and_elimination.txtar
index 7e65db7..4fd2f63 100644
--- a/cue/testdata/fulleval/014_default_disambiguation_and_elimination.txtar
+++ b/cue/testdata/fulleval/014_default_disambiguation_and_elimination.txtar
@@ -26,3 +26,11 @@
   d: (〈0;b〉 & 〈0;a〉)
   e: (*1|*1)
 }
+-- out/eval --
+(struct){
+  a: (int){ |(*(int){ 1 }, (int){ int }) }
+  b: (int){ |(*(int){ 3 }, (int){ int }) }
+  c: (int){ |((int){ 1 }, (int){ 3 }, (int){ int }) }
+  d: (int){ |((int){ 3 }, (int){ 1 }, (int){ int }) }
+  e: (int){ 1 }
+}
diff --git a/cue/testdata/fulleval/016_struct_comprehension_with_template.txtar b/cue/testdata/fulleval/016_struct_comprehension_with_template.txtar
index 2073981..41184ee 100644
--- a/cue/testdata/fulleval/016_struct_comprehension_with_template.txtar
+++ b/cue/testdata/fulleval/016_struct_comprehension_with_template.txtar
@@ -135,3 +135,40 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  result: (#list){
+    0: (struct){
+      name: (string){ |(*(string){ "foo" }, (string){ string }) }
+      type: (string){ "service" }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+    1: (struct){
+      port: (int){ 8000 }
+      name: (string){ |(*(string){ "bar" }, (string){ string }) }
+      type: (string){ "service" }
+    }
+    2: (struct){
+      name: (string){ "foobar" }
+      type: (string){ "service" }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+  }
+  service: (struct){
+    foo: (struct){
+      name: (string){ |(*(string){ "foo" }, (string){ string }) }
+      type: (string){ "service" }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+    bar: (struct){
+      port: (int){ 8000 }
+      name: (string){ |(*(string){ "bar" }, (string){ string }) }
+      type: (string){ "service" }
+    }
+    baz: (struct){
+      name: (string){ "foobar" }
+      type: (string){ "service" }
+      port: (int){ |(*(int){ 7080 }, (int){ int }) }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/017_resolutions_in_struct_comprehension_keys.txtar b/cue/testdata/fulleval/017_resolutions_in_struct_comprehension_keys.txtar
index c44514e..1f089d3 100644
--- a/cue/testdata/fulleval/017_resolutions_in_struct_comprehension_keys.txtar
+++ b/cue/testdata/fulleval/017_resolutions_in_struct_comprehension_keys.txtar
@@ -30,3 +30,9 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    "c.": (string){ "a" }
+  }
+}
diff --git a/cue/testdata/fulleval/018_recursive_evaluation_within_list.txtar b/cue/testdata/fulleval/018_recursive_evaluation_within_list.txtar
index 9e6b75c..4181a81 100644
--- a/cue/testdata/fulleval/018_recursive_evaluation_within_list.txtar
+++ b/cue/testdata/fulleval/018_recursive_evaluation_within_list.txtar
@@ -58,3 +58,37 @@
     c: string
   }
 }
+-- out/eval --
+(struct){
+  l: (#list){
+    0: (struct){
+      d: (string){ "t" }
+      c: (string){ "t" }
+    }
+  }
+  a: (struct){
+    d: (string){ "t" }
+    c: (string){ "t" }
+  }
+  b: (struct){
+    d: (string){ string }
+    c: (string){ string }
+  }
+  l1: (#list){
+    0: (struct){
+      d: (string){ "st" }
+      c: (string){ "t" }
+    }
+  }
+  a1: (struct){
+    d: (string){ "st" }
+    c: (string){ "t" }
+  }
+  b1: (struct){
+    d: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:10:5
+    }
+    c: (string){ string }
+  }
+}
diff --git a/cue/testdata/fulleval/019_ips.txtar b/cue/testdata/fulleval/019_ips.txtar
deleted file mode 100644
index a44d073..0000000
--- a/cue/testdata/fulleval/019_ips.txtar
+++ /dev/null
@@ -1,56 +0,0 @@
-# DO NOT EDIT; generated by go run testdata/gen.go
-#
-#name: ips
-#evalFull
--- in.cue --
-IP: 4 * [ uint8]
-
-Private:
-	*[ 192, 168, uint8, uint8] |
-	[ 10, uint8, uint8, uint8] |
-	[ 172, >=16 & <=32, uint8, uint8]
-
-Inst: Private & [ _, 10, ...]
-
-MyIP: Inst & [_, _, 10, 10]
--- out/def --
-IP: [uint8, uint8, uint8, uint8]
-Private: *[192, 168, uint8, uint8] | [10, uint8, uint8, uint8] | [172, >=16 & <=32, uint8, uint8]
-Inst: [10, 10, uint8, uint8]
-MyIP: [10, 10, 10, 10]
--- out/legacy-debug --
-<0>{IP: [(int & >=0 & int & <=255),(int & >=0 & int & <=255),(int & >=0 & int & <=255),(int & >=0 & int & <=255)], Private: [192,168,(int & >=0 & int & <=255),(int & >=0 & int & <=255)], Inst: [10,10,(int & >=0 & int & <=255),(int & >=0 & int & <=255)], MyIP: [10,10,10,10]}
--- out/compile --
---- in.cue
-{
-  IP: (4 * [
-    (>=0 & <=255),
-  ])
-  Private: (*[
-    192,
-    168,
-    (>=0 & <=255),
-    (>=0 & <=255),
-  ]|[
-    10,
-    (>=0 & <=255),
-    (>=0 & <=255),
-    (>=0 & <=255),
-  ]|[
-    172,
-    (>=16 & <=32),
-    (>=0 & <=255),
-    (>=0 & <=255),
-  ])
-  Inst: (〈0;Private〉 & [
-    _,
-    10,
-    ...,
-  ])
-  MyIP: (〈0;Inst〉 & [
-    _,
-    _,
-    10,
-    10,
-  ])
-}
diff --git a/cue/testdata/fulleval/020_complex_interaction_of_groundness.txtar b/cue/testdata/fulleval/020_complex_interaction_of_groundness.txtar
index 29d64ab..228da48 100644
--- a/cue/testdata/fulleval/020_complex_interaction_of_groundness.txtar
+++ b/cue/testdata/fulleval/020_complex_interaction_of_groundness.txtar
@@ -59,3 +59,23 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  res: (#list){
+    0: (struct){
+      d: (string){ "b" }
+      s: (string){ "ab" }
+    }
+  }
+  a: (struct){
+    b: (struct){
+      c: (struct){
+        d: (string){ string }
+        s: (_|_){
+          // [incomplete] non-concrete value *adt.BasicType in operand to +:
+          //     ./in.cue:4:34
+        }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/021_complex_groundness_2.txtar b/cue/testdata/fulleval/021_complex_groundness_2.txtar
index a44907a..fc9cdeb 100644
--- a/cue/testdata/fulleval/021_complex_groundness_2.txtar
+++ b/cue/testdata/fulleval/021_complex_groundness_2.txtar
@@ -1,6 +1,6 @@
 # DO NOT EDIT; generated by go run testdata/gen.go
 #
-#name: complex groundness 2
+#name: complex groundness  2
 #evalFull
 -- in.cue --
 r1: f1 & {y: "c"}
@@ -69,3 +69,38 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  r1: (struct){
+    y: (string){ "c" }
+    res: (struct){
+      d: (string){ "c" }
+      s: (string){ "ac" }
+    }
+  }
+  f1: (struct){
+    y: (string){ string }
+    res: (struct){
+      d: (string){ string }
+      s: (_|_){
+        // [incomplete] non-concrete value *adt.BasicType in operand to +:
+        //     ./in.cue:5:25
+        // non-concrete value *adt.BasicType in operand to +:
+        //     ./in.cue:6:34
+      }
+    }
+  }
+  a: (struct){
+    b: (struct){
+      c: (struct){
+        d: (string){ string }
+        s: (_|_){
+          // [incomplete] non-concrete value *adt.BasicType in operand to +:
+          //     ./in.cue:5:25
+          // non-concrete value *adt.BasicType in operand to +:
+          //     ./in.cue:6:34
+        }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/022_references_from_template_to_concrete.txtar b/cue/testdata/fulleval/022_references_from_template_to_concrete.txtar
index 552769c..dc6a8be 100644
--- a/cue/testdata/fulleval/022_references_from_template_to_concrete.txtar
+++ b/cue/testdata/fulleval/022_references_from_template_to_concrete.txtar
@@ -85,3 +85,26 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  res: (#list){
+    0: (struct){
+      x: (struct){
+        b: (struct){
+          str: (string){ "DDDD" }
+        }
+        a: (string){ "XDDDD" }
+        c: (string){ "X" }
+      }
+    }
+  }
+  t: (struct){
+    x: (struct){
+      b: (struct){
+        str: (string){ "DDDD" }
+      }
+      a: (string){ "XDDDD" }
+      c: (string){ "X" }
+    }
+  }
+}
diff --git "a/cue/testdata/fulleval/024_Issue_\04323.txtar" "b/cue/testdata/fulleval/024_Issue_\04323.txtar"
index cbabfdf..effdf5d 100644
--- "a/cue/testdata/fulleval/024_Issue_\04323.txtar"
+++ "b/cue/testdata/fulleval/024_Issue_\04323.txtar"
@@ -26,3 +26,18 @@
     a: 3
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  x: (struct){ |((struct){
+      a: (int){ 1 }
+    }, (struct){
+      a: (int){ 2 }
+    }) }
+  y: (_|_){
+    // [eval]
+    a: (_|_){
+      // [eval] incompatible values *adt.Num and *adt.Num
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/026_dont_convert_incomplete_errors_to_non-incomplete.txtar b/cue/testdata/fulleval/026_dont_convert_incomplete_errors_to_non-incomplete.txtar
index 72b82ef..348ad9a 100644
--- a/cue/testdata/fulleval/026_dont_convert_incomplete_errors_to_non-incomplete.txtar
+++ b/cue/testdata/fulleval/026_dont_convert_incomplete_errors_to_non-incomplete.txtar
@@ -44,3 +44,58 @@
 s3:  strings.ContainsAny(str, "dd")
 -- out/legacy-debug --
 <0>{n1: <1>{min: <<2>.max, max: ><2>.min}, n2: -<3>.num, num: <4, n3: +<3>.num, n4: (<3>.num + <3>.num), n5: (<3>.num - <3>.num), n6: (<3>.num * <3>.num), n7: (<3>.num / <3>.num), b1: !<3>.is, is: bool, s1: ""+<3>.str+"", str: string, s2: strings.ContainsAny ("dd"), s3: <4>.ContainsAny (<3>.str,"dd")}
+-- out/eval --
+(_|_){
+  // [eval]
+  n1: (struct){
+    min: (_|_){
+      // [cycle] cycle error
+    }
+    max: (_|_){
+      // [cycle] cycle error
+    }
+  }
+  n2: (_|_){
+    // [incomplete] operand *adt.UnaryExpr of '-' not concrete (was number):
+    //     ./in.cue:4:6
+  }
+  n3: (_|_){
+    // [incomplete] operand *adt.UnaryExpr of '+' not concrete (was number):
+    //     ./in.cue:5:6
+  }
+  n4: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to +:
+    //     ./in.cue:6:5
+  }
+  n5: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to -:
+    //     ./in.cue:7:5
+  }
+  n6: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to *:
+    //     ./in.cue:8:5
+  }
+  n7: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to /:
+    //     ./in.cue:9:5
+  }
+  b1: (_|_){
+    // [incomplete] operand *adt.UnaryExpr of '!' not concrete (was bool):
+    //     ./in.cue:11:6
+  }
+  s1: (_|_){
+    // [incomplete] invalid interpolation: incomplete string value '*adt.BasicType':
+    //     ./in.cue:13:5
+  }
+  s2: (_|_){
+    // [eval] cannot call non-function *adt.SelectorExpr (type nil):
+    //     ./in.cue:14:5
+  }
+  s3: (_|_){
+    // [eval] cannot call non-function *adt.SelectorExpr (type nil):
+    //     ./in.cue:15:5
+  }
+  str: (string){ string }
+  num: (number){ <4 }
+  is: (bool){ bool }
+}
diff --git a/cue/testdata/fulleval/028_slice_rewrite_bug.txtar b/cue/testdata/fulleval/028_slice_rewrite_bug.txtar
index d97ef30..84bd424 100644
--- a/cue/testdata/fulleval/028_slice_rewrite_bug.txtar
+++ b/cue/testdata/fulleval/028_slice_rewrite_bug.txtar
@@ -55,3 +55,20 @@
     ]
   })
 }
+-- out/eval --
+(struct){
+  fn: (struct){
+    arg: (#list){
+      0: (int){ 1 }
+    }
+    out: (#list){
+    }
+  }
+  fn1: (struct){
+    arg: (#list){
+      0: (int){ 1 }
+    }
+    out: (#list){
+    }
+  }
+}
diff --git "a/cue/testdata/fulleval/029_Issue_\04394.txtar" "b/cue/testdata/fulleval/029_Issue_\04394.txtar"
index 268d3d1..e15758e 100644
--- "a/cue/testdata/fulleval/029_Issue_\04394.txtar"
+++ "b/cue/testdata/fulleval/029_Issue_\04394.txtar"
@@ -83,3 +83,42 @@
     _hidden: 〈1;foo〉["_hidden"]
   }
 }
+-- out/eval --
+(struct){
+  foo: (struct){
+    txt: (int){ 2 }
+    #def: (int){ 3 }
+    regular: (int){ 4 }
+    _hidden: (int){ 5 }
+  }
+  comp: (struct){
+    txt: (int){ 2 }
+    regular: (int){ 4 }
+  }
+  select: (struct){
+    opt: (_|_){
+      // [incomplete] undefined field opt:
+      //     ./in.cue:10:15
+    }
+    txt: (int){ 2 }
+    #def: (int){ 3 }
+    regular: (int){ 4 }
+    _hidden: (int){ 5 }
+  }
+  index: (struct){
+    opt: (_|_){
+      // [incomplete] undefined field opt:
+      //     ./in.cue:17:15
+    }
+    txt: (int){ 2 }
+    #def: (_|_){
+      // [incomplete] undefined field "#def":
+      //     ./in.cue:19:15
+    }
+    regular: (int){ 4 }
+    _hidden: (_|_){
+      // [incomplete] undefined field "_hidden":
+      //     ./in.cue:21:15
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/030_retain_references_with_interleaved_embedding.txtar b/cue/testdata/fulleval/030_retain_references_with_interleaved_embedding.txtar
index c6aa3d4..fb56b9b 100644
--- a/cue/testdata/fulleval/030_retain_references_with_interleaved_embedding.txtar
+++ b/cue/testdata/fulleval/030_retain_references_with_interleaved_embedding.txtar
@@ -75,3 +75,18 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    d: (#struct){
+      #info: (#struct){
+        X: (string){ "foo" }
+      }
+      Y: (string){ "foo" }
+    }
+  }
+  #base: (#struct){
+    #info: (#struct){
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/031_comparison_against_bottom.txtar b/cue/testdata/fulleval/031_comparison_against_bottom.txtar
index 2dc0bc4..8ed9d73 100644
--- a/cue/testdata/fulleval/031_comparison_against_bottom.txtar
+++ b/cue/testdata/fulleval/031_comparison_against_bottom.txtar
@@ -32,30 +32,53 @@
 -- out/compile --
 --- in.cue
 {
-  a: (_|_ == _|_)
+  a: (_|_(from source) == _|_(from source))
   b: ((〈0;err〉 == 1) & 2)
-  c: (〈0;err〉 == _|_)
-  d: (〈0;err〉 != _|_)
+  c: (〈0;err〉 == _|_(from source))
+  d: (〈0;err〉 != _|_(from source))
   e: ((〈0;err〉 != 1) & 3)
   f: (({
     a: 1
   } & {
     a: 2
-  }) == _|_)
+  }) == _|_(from source))
   g: (({
     a: 1
   } & {
     b: 2
-  }) == _|_)
-  h: (_|_ == ({
+  }) == _|_(from source))
+  h: (_|_(from source) == ({
     a: 1
   } & {
     a: 2
   }))
-  i: (_|_ == ({
+  i: (_|_(from source) == ({
     a: 1
   } & {
     b: 2
   }))
   err: (1 & 2)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (bool){ true }
+  b: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:2:4
+  }
+  c: (bool){ true }
+  d: (bool){ false }
+  e: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:2:4
+  }
+  f: (bool){ true }
+  g: (bool){ false }
+  h: (bool){ true }
+  i: (bool){ false }
+  err: (_|_){
+    // [eval] incompatible values *adt.Num and *adt.Num:
+    //     ./in.cue:2:4
+  }
+}
diff --git a/cue/testdata/fulleval/034_label_and_field_aliases.txtar b/cue/testdata/fulleval/034_label_and_field_aliases.txtar
index 6da9b27..2277995 100644
--- a/cue/testdata/fulleval/034_label_and_field_aliases.txtar
+++ b/cue/testdata/fulleval/034_label_and_field_aliases.txtar
@@ -59,3 +59,14 @@
   "\(〈0;a〉)": 5
   c: 〈0;("\(〈0;a〉)")〉
 }
+-- out/eval --
+(struct){
+  p: (struct){
+  }
+  "foo=bar": (string){ "str" }
+  a: (string){ "str" }
+  bb: (int){ 4 }
+  b1: (int){ 4 }
+  c: (int){ 5 }
+  str: (int){ 5 }
+}
diff --git a/cue/testdata/fulleval/035_optionals_with_label_filters.txtar b/cue/testdata/fulleval/035_optionals_with_label_filters.txtar
index db328a6..834cce6 100644
--- a/cue/testdata/fulleval/035_optionals_with_label_filters.txtar
+++ b/cue/testdata/fulleval/035_optionals_with_label_filters.txtar
@@ -100,3 +100,42 @@
     }
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #JobID: (string){ =~"^[a-zA-Z]*$" }
+  #Job: (#struct){
+    name: (string){ string }
+    cmd: (string){ string }
+  }
+  #Jobs: (#struct){
+  }
+  jobs: (struct){
+    foo: (struct){
+      name: (string){ "allGood" }
+    }
+  }
+  jobs1: (_|_){
+    // [eval] field `foo1` not allowed
+    foo1: (#struct){
+    }
+  }
+  jobs2: (_|_){
+    // [eval]
+    fooTest: (_|_){
+      // [eval]
+      name: (_|_){
+        // [eval] invalid value *adt.Vertex (out of bound *adt.BoundValue)
+      }
+      cmd: (string){ string }
+    }
+  }
+  jobs3: (_|_){
+    // [eval] field `fooTest1` not allowed
+    fooTest1: (_|_){
+      // [eval] field `cmd` not allowed
+      name: (string){ "badName" }
+      cmd: (string){ string }
+    }
+  }
+}
diff --git a/cue/testdata/fulleval/040.txtar b/cue/testdata/fulleval/040.txtar
index 8a6a51c..6d1a247 100644
--- a/cue/testdata/fulleval/040.txtar
+++ b/cue/testdata/fulleval/040.txtar
@@ -65,3 +65,22 @@
     op: "pull"
   })
 }
+-- out/eval --
+(struct){
+  #Task: (struct){ |((#struct){
+      op: (string){ "pull" }
+      tag: (string){ |(*(string){ "latest" }, (string){ string }) }
+      refToTag: (string){ |(*(string){ "latest" }, (string){ string }) }
+      tagExpr: (string){ "latestdd" }
+      tagInString: (string){ "latest" }
+    }, (#struct){
+      op: (string){ "scratch" }
+    }) }
+  foo: (#struct){
+    op: (string){ "pull" }
+    tag: (string){ |(*(string){ "latest" }, (string){ string }) }
+    refToTag: (string){ |(*(string){ "latest" }, (string){ string }) }
+    tagExpr: (string){ "latestdd" }
+    tagInString: (string){ "latest" }
+  }
+}
diff --git a/cue/testdata/fulleval/041.txtar b/cue/testdata/fulleval/041.txtar
index ea9458f..35b4e1c 100644
--- a/cue/testdata/fulleval/041.txtar
+++ b/cue/testdata/fulleval/041.txtar
@@ -34,3 +34,13 @@
     #ok: false
   })
 }
+-- out/eval --
+(struct){
+  t: (struct){
+    #ok: (bool){ true }
+    x: (int){ int }
+  }
+  s: (struct){
+    #ok: (bool){ false }
+  }
+}
diff --git a/cue/testdata/fulleval/042_cross-dependent_comprehension.txtar b/cue/testdata/fulleval/042_cross-dependent_comprehension.txtar
index ba83101..902e4d5 100644
--- a/cue/testdata/fulleval/042_cross-dependent_comprehension.txtar
+++ b/cue/testdata/fulleval/042_cross-dependent_comprehension.txtar
@@ -38,3 +38,18 @@
   })
   y: 〈0;x〉
 }
+-- out/eval --
+(struct){
+  #a: (_|_){
+    // [incomplete] incomplete
+    b: (bool){ bool }
+  }
+  x: (#struct){
+    b: (bool){ true }
+    c: (int){ 4 }
+  }
+  y: (#struct){
+    b: (bool){ true }
+    c: (int){ 4 }
+  }
+}
diff --git a/cue/testdata/fulleval/043_optional_expanded_before_lookup.txtar b/cue/testdata/fulleval/043_optional_expanded_before_lookup.txtar
index 6240798..499a8eb 100644
--- a/cue/testdata/fulleval/043_optional_expanded_before_lookup.txtar
+++ b/cue/testdata/fulleval/043_optional_expanded_before_lookup.txtar
@@ -68,3 +68,18 @@
   }
   B: (〈0;test〉.A & {})
 }
+-- out/eval --
+(struct){
+  test: (struct){
+    A: (struct){
+      field1: (string){ "1" }
+      field2: (string){ "2" }
+      name: (string){ "A" }
+    }
+  }
+  B: (struct){
+    field1: (string){ "1" }
+    field2: (string){ "2" }
+    name: (string){ "A" }
+  }
+}
diff --git "a/cue/testdata/fulleval/044_Issue_\043178.txtar" "b/cue/testdata/fulleval/044_Issue_\043178.txtar"
index 6aa4e0a..b34015d 100644
--- "a/cue/testdata/fulleval/044_Issue_\043178.txtar"
+++ "b/cue/testdata/fulleval/044_Issue_\043178.txtar"
@@ -24,3 +24,17 @@
 bar:  hex.EncodedLen(len)
 -- out/legacy-debug --
 <0>{foo: <1>.Decode (<2>.data), data: bytes, len: int, bar: <3>.EncodedLen (<2>.len)}
+-- out/eval --
+(_|_){
+  // [eval]
+  foo: (_|_){
+    // [eval] cannot call non-function *adt.SelectorExpr (type nil):
+    //     ./in.cue:5:7
+  }
+  data: (bytes){ bytes }
+  len: (int){ int }
+  bar: (_|_){
+    // [eval] cannot call non-function *adt.SelectorExpr (type nil):
+    //     ./in.cue:9:6
+  }
+}
diff --git a/cue/testdata/fulleval/046_non-structural_direct_cycles.txtar b/cue/testdata/fulleval/046_non-structural_direct_cycles.txtar
index 6671153..f711acd 100644
--- a/cue/testdata/fulleval/046_non-structural_direct_cycles.txtar
+++ b/cue/testdata/fulleval/046_non-structural_direct_cycles.txtar
@@ -26,3 +26,18 @@
     bar: 1
   } & 〈0;c2〉.bar)
 }
+-- out/eval --
+(struct){
+  c1: (_|_){
+    // [incomplete] undefined field bar:
+    //     ./in.cue:1:24
+    bar: (struct){
+      baz: (int){ 2 }
+    }
+  }
+  c2: (_|_){
+    // [incomplete] undefined field bar:
+    //     ./in.cue:2:24
+    bar: (int){ 1 }
+  }
+}
diff --git a/cue/testdata/fulleval/047_dont_bind_to_string_labels.txtar b/cue/testdata/fulleval/047_dont_bind_to_string_labels.txtar
index 0092645..5565646 100644
--- a/cue/testdata/fulleval/047_dont_bind_to_string_labels.txtar
+++ b/cue/testdata/fulleval/047_dont_bind_to_string_labels.txtar
@@ -38,3 +38,11 @@
     z: 〈1;x〉
   }
 }
+-- out/eval --
+(struct){
+  x: (int){ 1 }
+  y: (struct){
+    x: (int){ 2 }
+    z: (int){ 1 }
+  }
+}
diff --git a/cue/testdata/fulleval/053_issue312.txtar b/cue/testdata/fulleval/053_issue312.txtar
index 161329c..4b60559 100644
--- a/cue/testdata/fulleval/053_issue312.txtar
+++ b/cue/testdata/fulleval/053_issue312.txtar
@@ -20,3 +20,6 @@
 {}
 -- out/legacy-debug --
 <0>{ <1>for _, x in [1] yield <2>{}, (*close (<3>{}) | <4>{[]: <5>(_: string)->null, })}
+-- out/eval --
+(#struct){
+}
diff --git a/cue/testdata/fulleval/054_issue312.txtar b/cue/testdata/fulleval/054_issue312.txtar
index 8959563..20833cf 100644
--- a/cue/testdata/fulleval/054_issue312.txtar
+++ b/cue/testdata/fulleval/054_issue312.txtar
@@ -32,3 +32,9 @@
     〈2;y〉
   }
 }
+-- out/eval --
+(struct){
+  y: ((int|struct)){ |(*(int){ 1 }, (struct){
+      a: (int){ 2 }
+    }) }
+}
diff --git a/cue/testdata/fulleval/055_issue318.txtar b/cue/testdata/fulleval/055_issue318.txtar
index dc1ea58..1da6b56 100644
--- a/cue/testdata/fulleval/055_issue318.txtar
+++ b/cue/testdata/fulleval/055_issue318.txtar
@@ -41,3 +41,26 @@
     vy: 〈0;arg〉.y
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #T: (_|_){
+    // [eval]
+    arg: (#struct){
+      x: (string){ string }
+    }
+    out1: (_|_){
+      // [eval] invalid interpolation: undefined field y:
+      //     ./in.cue:3:8
+    }
+    out2: (_|_){
+      // [eval] invalid interpolation: undefined field y:
+      //     ./in.cue:4:8
+    }
+    vx: (string){ string }
+    vy: (_|_){
+      // [eval] undefined field y:
+      //     ./in.cue:6:12
+    }
+  }
+}
diff --git a/cue/testdata/gen.go b/cue/testdata/gen.go
index 5b91180..ef96b82 100644
--- a/cue/testdata/gen.go
+++ b/cue/testdata/gen.go
@@ -42,6 +42,7 @@
 
 //go:generate go run gen.go
 //go:generate go test ../internal/compile --update
+//go:generate go test ../internal/eval --update
 
 func main() {
 	flag.Parse()
diff --git a/cue/testdata/interpolation/041_interpolation.txtar b/cue/testdata/interpolation/041_interpolation.txtar
new file mode 100644
index 0000000..d86a24b
--- /dev/null
+++ b/cue/testdata/interpolation/041_interpolation.txtar
@@ -0,0 +1,53 @@
+# DO NOT EDIT; generated by go run testdata/gen.go
+#
+#name: interpolation
+#evalPartial
+-- in.cue --
+a: "\(4)"
+b: "one \(a) two \( a+c )"
+c: "one"
+d: "\(r)"
+u: "\(_)"
+r: _
+e: "\([])"
+-- out/def --
+a: "4"
+b: "one 4 two 4one"
+c: "one"
+d: "\(r)"
+r: _
+u: "\(_)"
+e: _|_ // expression in interpolation must evaluate to a number kind or string (found list)
+-- out/legacy-debug --
+<0>{a: "4", b: "one 4 two 4one", c: "one", d: ""+<1>.r+"", r: _, u: ""+_+"", e: _|_([]:expression in interpolation must evaluate to a number kind or string (found list))}
+-- out/compile --
+--- in.cue
+{
+  a: "\(4)"
+  b: "one \(〈0;a〉) two \((〈0;a〉 + 〈0;c〉))"
+  c: "one"
+  d: "\(〈0;r〉)"
+  u: "\(_)"
+  r: _
+  e: "\([])"
+}
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (string){ "4" }
+  b: (string){ "one 4 two 4one" }
+  c: (string){ "one" }
+  d: (_|_){
+    // [incomplete] invalid interpolation: incomplete string value '*adt.BasicType':
+    //     ./in.cue:4:4
+  }
+  u: (_|_){
+    // [incomplete] invalid interpolation: incomplete string value '*adt.Top':
+    //     ./in.cue:5:4
+  }
+  r: (_){ _ }
+  e: (_|_){
+    // [eval] invalid interpolation: cannot use *adt.Vertex (type list) as type string:
+    //     ./in.cue:7:4
+  }
+}
diff --git a/cue/testdata/resolve/042_multiline_interpolation.txtar b/cue/testdata/interpolation/042_multiline_interpolation.txtar
similarity index 87%
rename from cue/testdata/resolve/042_multiline_interpolation.txtar
rename to cue/testdata/interpolation/042_multiline_interpolation.txtar
index a36cd3b..dcab261 100644
--- a/cue/testdata/resolve/042_multiline_interpolation.txtar
+++ b/cue/testdata/interpolation/042_multiline_interpolation.txtar
@@ -191,3 +191,14 @@
   \(4)
   "
 }
+-- out/eval --
+(struct){
+  a1: (string){ "before\n4\nafter" }
+  a2: (string){ "before\n4\n" }
+  a3: (string){ "\n4\nafter" }
+  a4: (string){ "\n4\n" }
+  m1: (string){ "before\n4\nafter" }
+  m2: (string){ "before\n4\n" }
+  m3: (string){ "\n4\nafter" }
+  m4: (string){ "\n4\n" }
+}
diff --git a/cue/testdata/resolve/019_list_types.txtar b/cue/testdata/lists/019_list_types.txtar
similarity index 74%
rename from cue/testdata/resolve/019_list_types.txtar
rename to cue/testdata/lists/019_list_types.txtar
index d9cf033..80fd580 100644
--- a/cue/testdata/resolve/019_list_types.txtar
+++ b/cue/testdata/lists/019_list_types.txtar
@@ -116,3 +116,45 @@
     ...float,
   ]
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  l0: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+  }
+  l2: (#list){
+    0: (struct){
+      a: (int){ 1 }
+    }
+    1: (struct){
+      a: (int){ 2 }
+      b: (int){ 3 }
+    }
+  }
+  s1: (#list){
+    0: (int){ int }
+  }
+  s2: (#list){
+    0: (int){ 2 }
+  }
+  i1: (int){ int }
+  i2: (int){ 3 }
+  t0: (#list){
+    0: (struct){
+      a: (int){ 8 }
+    }
+  }
+  t1: (list){
+  }
+  e0: (_|_){
+    // [eval] incompatible list lengths (1 and 2)
+    0: (struct){
+    }
+    1: (struct){
+    }
+  }
+  e1: (list){
+  }
+}
diff --git a/cue/testdata/lists/020_list_arithmetic.txtar b/cue/testdata/lists/020_list_arithmetic.txtar
new file mode 100644
index 0000000..ab11e05
--- /dev/null
+++ b/cue/testdata/lists/020_list_arithmetic.txtar
@@ -0,0 +1,528 @@
+# DO NOT EDIT; generated by go run testdata/gen.go
+#
+#name: list arithmetic
+#evalPartial
+-- in.cue --
+l0: 3 * [1, 2, 3]
+l1: 0 * [1, 2, 3]
+l2: 10 * []
+l3: <=2 * []
+l4: <=2 * [int]
+l5: <=2 * (int * [int])
+l6: 3 * [...int]
+l7: 3 * [1, ...int]
+l8: 3 * [1, 2, ...int]
+
+s0:  [] + []
+s1:  [1] + []
+s2:  [] + [2]
+s3:  [1] + [2]
+s4:  [1, 2] + []
+s5:  [] + [1, 2]
+s6:  [1] + [1, 2]
+s7:  [1, 2] + [1]
+s8:  [1, 2] + [1, 2]
+s9:  [] + [...]
+s10: [1] + [...]
+s11: [] + [2, ...]
+s12: [1] + [2, ...]
+s13: [1, 2] + [...]
+s14: [] + [1, 2, ...]
+s15: [1] + [1, 2, ...]
+s16: [1, 2] + [1, ...]
+s17: [1, 2] + [1, 2, ...]
+
+s18: [...] + []
+s19: [1, ...] + []
+s20: [...] + [2]
+s21: [1, ...] + [2]
+s22: [1, 2, ...] + []
+s23: [...] + [1, 2]
+s24: [1, ...] + [1, 2]
+s25: [1, 2, ...] + [1]
+s26: [1, 2, ...] + [1, 2]
+s27: [...] + [...]
+s28: [1, ...] + [...]
+s29: [...] + [2, ...]
+s30: [1, ...] + [2, ...]
+s31: [1, 2, ...] + [...]
+s32: [...] + [1, 2, ...]
+s33: [1, ...] + [1, 2, ...]
+s34: [1, 2, ...] + [1, ...]
+s35: [1, 2, ...] + [1, 2, ...]
+-- out/def --
+l0: [1, 2, 3, 1, 2, 3, 1, 2, 3]
+l1: []
+l2: []
+l3: <=2 * []
+l4: <=2 * [int]
+l5: <=2 * (int * [int])
+l6: []
+l7: [1, 1, 1]
+l8: [1, 2, 1, 2, 1, 2]
+s0: []
+s1: [1]
+s2: [2]
+s3: [1, 2]
+s4: [1, 2]
+s5: [1, 2]
+s6: [1, 1, 2]
+s7: [1, 2, 1]
+s8: [1, 2, 1, 2]
+s9: []
+s10: [1]
+s11: [2]
+s12: [1, 2]
+s13: [1, 2]
+s14: [1, 2]
+s15: [1, 1, 2]
+s16: [1, 2, 1]
+s17: [1, 2, 1, 2]
+s18: []
+s19: [1]
+s20: [2]
+s21: [1, 2]
+s22: [1, 2]
+s23: [1, 2]
+s24: [1, 1, 2]
+s25: [1, 2, 1]
+s26: [1, 2, 1, 2]
+s27: []
+s28: [1]
+s29: [2]
+s30: [1, 2]
+s31: [1, 2]
+s32: [1, 2]
+s33: [1, 1, 2]
+s34: [1, 2, 1]
+s35: [1, 2, 1, 2]
+-- out/legacy-debug --
+<0>{l0: [1,2,3,1,2,3,1,2,3], l1: [], l2: [], l3: (<=2 * []), l4: (<=2 * [int]), l5: (<=2 * (int * [int])), l6: [], l7: [1,1,1], l8: [1,2,1,2,1,2], s0: [], s1: [1], s2: [2], s3: [1,2], s4: [1,2], s5: [1,2], s6: [1,1,2], s7: [1,2,1], s8: [1,2,1,2], s9: [], s10: [1], s11: [2], s12: [1,2], s13: [1,2], s14: [1,2], s15: [1,1,2], s16: [1,2,1], s17: [1,2,1,2], s18: [], s19: [1], s20: [2], s21: [1,2], s22: [1,2], s23: [1,2], s24: [1,1,2], s25: [1,2,1], s26: [1,2,1,2], s27: [], s28: [1], s29: [2], s30: [1,2], s31: [1,2], s32: [1,2], s33: [1,1,2], s34: [1,2,1], s35: [1,2,1,2]}
+-- out/compile --
+--- in.cue
+{
+  l0: (3 * [
+    1,
+    2,
+    3,
+  ])
+  l1: (0 * [
+    1,
+    2,
+    3,
+  ])
+  l2: (10 * [])
+  l3: (<=2 * [])
+  l4: (<=2 * [
+    int,
+  ])
+  l5: (<=2 * (int * [
+    int,
+  ]))
+  l6: (3 * [
+    ...int,
+  ])
+  l7: (3 * [
+    1,
+    ...int,
+  ])
+  l8: (3 * [
+    1,
+    2,
+    ...int,
+  ])
+  s0: ([] + [])
+  s1: ([
+    1,
+  ] + [])
+  s2: ([] + [
+    2,
+  ])
+  s3: ([
+    1,
+  ] + [
+    2,
+  ])
+  s4: ([
+    1,
+    2,
+  ] + [])
+  s5: ([] + [
+    1,
+    2,
+  ])
+  s6: ([
+    1,
+  ] + [
+    1,
+    2,
+  ])
+  s7: ([
+    1,
+    2,
+  ] + [
+    1,
+  ])
+  s8: ([
+    1,
+    2,
+  ] + [
+    1,
+    2,
+  ])
+  s9: ([] + [
+    ...,
+  ])
+  s10: ([
+    1,
+  ] + [
+    ...,
+  ])
+  s11: ([] + [
+    2,
+    ...,
+  ])
+  s12: ([
+    1,
+  ] + [
+    2,
+    ...,
+  ])
+  s13: ([
+    1,
+    2,
+  ] + [
+    ...,
+  ])
+  s14: ([] + [
+    1,
+    2,
+    ...,
+  ])
+  s15: ([
+    1,
+  ] + [
+    1,
+    2,
+    ...,
+  ])
+  s16: ([
+    1,
+    2,
+  ] + [
+    1,
+    ...,
+  ])
+  s17: ([
+    1,
+    2,
+  ] + [
+    1,
+    2,
+    ...,
+  ])
+  s18: ([
+    ...,
+  ] + [])
+  s19: ([
+    1,
+    ...,
+  ] + [])
+  s20: ([
+    ...,
+  ] + [
+    2,
+  ])
+  s21: ([
+    1,
+    ...,
+  ] + [
+    2,
+  ])
+  s22: ([
+    1,
+    2,
+    ...,
+  ] + [])
+  s23: ([
+    ...,
+  ] + [
+    1,
+    2,
+  ])
+  s24: ([
+    1,
+    ...,
+  ] + [
+    1,
+    2,
+  ])
+  s25: ([
+    1,
+    2,
+    ...,
+  ] + [
+    1,
+  ])
+  s26: ([
+    1,
+    2,
+    ...,
+  ] + [
+    1,
+    2,
+  ])
+  s27: ([
+    ...,
+  ] + [
+    ...,
+  ])
+  s28: ([
+    1,
+    ...,
+  ] + [
+    ...,
+  ])
+  s29: ([
+    ...,
+  ] + [
+    2,
+    ...,
+  ])
+  s30: ([
+    1,
+    ...,
+  ] + [
+    2,
+    ...,
+  ])
+  s31: ([
+    1,
+    2,
+    ...,
+  ] + [
+    ...,
+  ])
+  s32: ([
+    ...,
+  ] + [
+    1,
+    2,
+    ...,
+  ])
+  s33: ([
+    1,
+    ...,
+  ] + [
+    1,
+    2,
+    ...,
+  ])
+  s34: ([
+    1,
+    2,
+    ...,
+  ] + [
+    1,
+    ...,
+  ])
+  s35: ([
+    1,
+    2,
+    ...,
+  ] + [
+    1,
+    2,
+    ...,
+  ])
+}
+-- out/eval --
+(_|_){
+  // [eval]
+  l0: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 3 }
+    3: (int){ 1 }
+    4: (int){ 2 }
+    5: (int){ 3 }
+    6: (int){ 1 }
+    7: (int){ 2 }
+    8: (int){ 3 }
+  }
+  l1: (#list){
+  }
+  l2: (#list){
+  }
+  l3: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to *:
+    //     ./in.cue:4:5
+  }
+  l4: (_|_){
+    // [incomplete] non-concrete value *adt.BoundValue in operand to *:
+    //     ./in.cue:5:5
+  }
+  l5: (_|_){
+    // [eval] value can never become concrete:
+    //     ./in.cue:6:12
+  }
+  l6: (#list){
+  }
+  l7: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 1 }
+  }
+  l8: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+    4: (int){ 1 }
+    5: (int){ 2 }
+  }
+  s0: (#list){
+  }
+  s1: (#list){
+    0: (int){ 1 }
+  }
+  s2: (#list){
+    0: (int){ 2 }
+  }
+  s3: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s4: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s5: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s6: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 2 }
+  }
+  s7: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+  }
+  s8: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+  }
+  s9: (#list){
+  }
+  s10: (#list){
+    0: (int){ 1 }
+  }
+  s11: (#list){
+    0: (int){ 2 }
+  }
+  s12: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s13: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s14: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s15: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 2 }
+  }
+  s16: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+  }
+  s17: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+  }
+  s18: (#list){
+  }
+  s19: (#list){
+    0: (int){ 1 }
+  }
+  s20: (#list){
+    0: (int){ 2 }
+  }
+  s21: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s22: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s23: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s24: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 2 }
+  }
+  s25: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+  }
+  s26: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+  }
+  s27: (#list){
+  }
+  s28: (#list){
+    0: (int){ 1 }
+  }
+  s29: (#list){
+    0: (int){ 2 }
+  }
+  s30: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s31: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s32: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+  }
+  s33: (#list){
+    0: (int){ 1 }
+    1: (int){ 1 }
+    2: (int){ 2 }
+  }
+  s34: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+  }
+  s35: (#list){
+    0: (int){ 1 }
+    1: (int){ 2 }
+    2: (int){ 1 }
+    3: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/resolve/021_list_equality.txtar b/cue/testdata/lists/021_list_equality.txtar
similarity index 84%
rename from cue/testdata/resolve/021_list_equality.txtar
rename to cue/testdata/lists/021_list_equality.txtar
index 7213b1d..afe31a5 100644
--- a/cue/testdata/resolve/021_list_equality.txtar
+++ b/cue/testdata/lists/021_list_equality.txtar
@@ -482,3 +482,54 @@
     ...,
   ])
 }
+-- out/eval --
+(struct){
+  eq0: (bool){ true }
+  eq1: (bool){ true }
+  eq2: (bool){ true }
+  eq3: (bool){ true }
+  eq4: (bool){ true }
+  eq5: (bool){ true }
+  eq6: (bool){ true }
+  eq7: (bool){ true }
+  eq8: (bool){ true }
+  eq9: (bool){ true }
+  eq10: (bool){ true }
+  eq11: (bool){ true }
+  ne0: (bool){ false }
+  ne1: (bool){ false }
+  ne2: (bool){ false }
+  ne3: (bool){ false }
+  ne4: (bool){ false }
+  ne5: (bool){ false }
+  ne6: (bool){ false }
+  ne7: (bool){ false }
+  ne8: (bool){ false }
+  ne9: (bool){ false }
+  ne10: (bool){ false }
+  ne11: (bool){ false }
+  feq0: (bool){ false }
+  feq1: (bool){ false }
+  feq2: (bool){ false }
+  feq3: (bool){ false }
+  feq4: (bool){ false }
+  feq5: (bool){ false }
+  feq6: (bool){ false }
+  feq7: (bool){ false }
+  feq8: (bool){ false }
+  feq9: (bool){ false }
+  feq10: (bool){ false }
+  feq11: (bool){ false }
+  fne0: (bool){ false }
+  fne1: (bool){ false }
+  fne2: (bool){ false }
+  fne3: (bool){ false }
+  fne4: (bool){ false }
+  fne5: (bool){ false }
+  fne6: (bool){ false }
+  fne7: (bool){ false }
+  fne8: (bool){ false }
+  fne9: (bool){ false }
+  fne10: (bool){ false }
+  fne11: (bool){ false }
+}
diff --git a/cue/testdata/references/embed_self.txtar b/cue/testdata/references/embed_self.txtar
new file mode 100644
index 0000000..b9e71a2
--- /dev/null
+++ b/cue/testdata/references/embed_self.txtar
@@ -0,0 +1,16 @@
+-- in.cue --
+Foo: {
+}
+
+Foo
+-- out/eval --
+(struct){
+  Foo: (struct){
+  }
+}
+-- out/compile --
+--- in.cue
+{
+  Foo: {}
+  〈0;Foo〉
+}
diff --git a/cue/testdata/resolve/000_convert___to_top.txtar b/cue/testdata/resolve/000_convert___to_top.txtar
index e2f3713..83014df 100644
--- a/cue/testdata/resolve/000_convert___to_top.txtar
+++ b/cue/testdata/resolve/000_convert___to_top.txtar
@@ -23,3 +23,8 @@
     [_]: _
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+  }
+}
diff --git a/cue/testdata/resolve/001.txtar b/cue/testdata/resolve/001.txtar
index b4af334..4ae2551 100644
--- a/cue/testdata/resolve/001.txtar
+++ b/cue/testdata/resolve/001.txtar
@@ -61,3 +61,18 @@
     d: 2
   }
 }
+-- out/eval --
+(struct){
+  a: (int){ 3 }
+  b: (struct){
+    c: (struct){
+      d: (int){ 3 }
+    }
+  }
+  c: (struct){
+    c: (int){ 2 }
+  }
+  d: (struct){
+    d: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/resolve/004.txtar b/cue/testdata/resolve/004.txtar
index 049a5aa..029e89f 100644
--- a/cue/testdata/resolve/004.txtar
+++ b/cue/testdata/resolve/004.txtar
@@ -38,3 +38,12 @@
   }
   b: _
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    d: (int){ 1 }
+  }
+  b: (struct){
+    d: (int){ 1 }
+  }
+}
diff --git a/cue/testdata/resolve/005_JSON.txtar b/cue/testdata/resolve/005_JSON.txtar
index 1a39c93..44cbf02 100644
--- a/cue/testdata/resolve/005_JSON.txtar
+++ b/cue/testdata/resolve/005_JSON.txtar
@@ -44,3 +44,12 @@
   }
   c: 〈0;o〉["a\nb"]
 }
+-- out/eval --
+(struct){
+  a: (int){ 3 }
+  b: (int){ 3 }
+  o: (struct){
+    "a\nb": (int){ 2 }
+  }
+  c: (int){ 2 }
+}
diff --git a/cue/testdata/resolve/006_arithmetic.txtar b/cue/testdata/resolve/006_arithmetic.txtar
index 506cb7d..7e57d3c 100644
--- a/cue/testdata/resolve/006_arithmetic.txtar
+++ b/cue/testdata/resolve/006_arithmetic.txtar
@@ -28,3 +28,15 @@
   v6: (1.0 / 1.0)
   e2: (int & (4.0 / 2.0))
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  v1: (float){ 5.0000000000E+11 }
+  v2: (bool){ true }
+  n1: (int){ 1 }
+  v5: (float){ 2.0 }
+  v6: (float){ 1 }
+  e2: (_|_){
+    // [eval] invalid value *adt.Num (mismatched types float and int)
+  }
+}
diff --git a/cue/testdata/resolve/007_inequality.txtar b/cue/testdata/resolve/007_inequality.txtar
index 5b6ffad..65a319e 100644
--- a/cue/testdata/resolve/007_inequality.txtar
+++ b/cue/testdata/resolve/007_inequality.txtar
@@ -44,3 +44,12 @@
   e: (null == [])
   f: (0 == 0.0)
 }
+-- out/eval --
+(struct){
+  a: (bool){ true }
+  b: (bool){ true }
+  c: (bool){ false }
+  d: (bool){ true }
+  e: (bool){ false }
+  f: (bool){ true }
+}
diff --git a/cue/testdata/resolve/008_attributes.txtar b/cue/testdata/resolve/008_attributes.txtar
index 0fa6191..ddc9a36 100644
--- a/cue/testdata/resolve/008_attributes.txtar
+++ b/cue/testdata/resolve/008_attributes.txtar
@@ -33,3 +33,18 @@
     foo: 1
   })
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    foo: (int){ 1 }
+  }
+  b: (struct){
+    foo: (int){ 1 }
+  }
+  c: (struct){
+    foo: (int){ 1 }
+  }
+  e: (struct){
+    foo: (int){ 1 }
+  }
+}
diff --git a/cue/testdata/resolve/009_optional_field_unification.txtar b/cue/testdata/resolve/009_optional_field_unification.txtar
index d69ba0a..7d7c7ac 100644
--- a/cue/testdata/resolve/009_optional_field_unification.txtar
+++ b/cue/testdata/resolve/009_optional_field_unification.txtar
@@ -63,3 +63,18 @@
   "g\(1)"?: 1
   "g\(2)"?: 2
 }
+-- out/eval --
+(struct){
+  a: (struct){
+  }
+  b: (struct){
+    foo: (string){ "foo" }
+  }
+  c: (struct){
+    foo: (string){ "foo" }
+  }
+  d: (struct){
+  }
+  g1: (int){ 1 }
+  g2: (int){ 2 }
+}
diff --git a/cue/testdata/resolve/010_optional_field_resolves_to_incomplete.txtar b/cue/testdata/resolve/010_optional_field_resolves_to_incomplete.txtar
index ec54948..7b017da 100644
--- a/cue/testdata/resolve/010_optional_field_resolves_to_incomplete.txtar
+++ b/cue/testdata/resolve/010_optional_field_resolves_to_incomplete.txtar
@@ -25,3 +25,16 @@
     c: 〈1;r〉["a"]
   }
 }
+-- out/eval --
+(struct){
+  r: (struct){
+    b: (_|_){
+      // [incomplete] undefined field a:
+      //     ./in.cue:3:6
+    }
+    c: (_|_){
+      // [incomplete] undefined field a:
+      //     ./in.cue:4:8
+    }
+  }
+}
diff --git a/cue/testdata/resolve/011_bounds.txtar b/cue/testdata/resolve/011_bounds.txtar
index adabfe1..775c728 100644
--- a/cue/testdata/resolve/011_bounds.txtar
+++ b/cue/testdata/resolve/011_bounds.txtar
@@ -136,3 +136,64 @@
   e8: (>11 & <=11)
   e9: (>"a" & <1)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  i1: (int){ 5 }
+  i2: (int){ 5 }
+  i3: (#list){
+  }
+  i4: (number){ &(!=2, !=4) }
+  s1: (number){ &(>=0, <=10, !=1) }
+  s2: (number){ &(>=0, <=10) }
+  s3: (number){ >5 }
+  s4: (number){ <10 }
+  s5: (number){ &(!=2, !=2) }
+  s6: (number){ &(>=2, !=2) }
+  s7: (number){ &(>=2, !=2) }
+  s8: (number){ >5 }
+  s10: (number){ &(>1, <=10) }
+  s11: (number){ &(>0, <12) }
+  s20: (number){ 10 }
+  s22: (number){ &(>5, <=6) }
+  s22a: (int){ 6 }
+  s22b: (int){ 6 }
+  s22c: (int){ 5 }
+  s22d: (int){ 5 }
+  s22e: (int){ 5 }
+  s22f: (int){ 5 }
+  s23: (number){ &(>0, <2) }
+  s23a: (int){ 1 }
+  s23b: (int){ 1 }
+  s23c: (int){ 1 }
+  s23d: (int){ 1 }
+  s23e: (number){ &(>0.0, <2.0) }
+  s30: (int){ &(>0, int) }
+  e1: (_|_){
+    // [eval] invalid value *adt.BoundValue (mismatched types (bool|string|bytes|list|struct|number) and null)
+  }
+  e2: (_|_){
+    // [eval] invalid value *adt.Null (mismatched types null and (bool|string|bytes|list|struct|number))
+  }
+  e3: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  e4: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  e5: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  e6: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  e7: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  e8: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  e9: (_|_){
+    // [eval] invalid value *adt.BoundValue (mismatched types number and string)
+  }
+}
diff --git a/cue/testdata/resolve/012_bound_conversions.txtar b/cue/testdata/resolve/012_bound_conversions.txtar
index f42d6da..8f2e779 100644
--- a/cue/testdata/resolve/012_bound_conversions.txtar
+++ b/cue/testdata/resolve/012_bound_conversions.txtar
@@ -46,3 +46,24 @@
   c3: (1.2 & (>=1 & <2))
   c4: (1.2 & ((>=1 & <2) & int))
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  r0: (int){ 1 }
+  r1: (int){ 1 }
+  r2: (int){ 1 }
+  r3: (int){ -1 }
+  r4: (int){ -1 }
+  r5: (number){ 1.1 }
+  r6: (number){ 1.1 }
+  c1: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  c2: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  c3: (float){ 1.2 }
+  c4: (_|_){
+    // [eval] invalid value *adt.BasicType (mismatched types int and float)
+  }
+}
diff --git a/cue/testdata/resolve/014_null_coalescing.txtar b/cue/testdata/resolve/014_null_coalescing.txtar
index 3d0605a..e4896c7 100644
--- a/cue/testdata/resolve/014_null_coalescing.txtar
+++ b/cue/testdata/resolve/014_null_coalescing.txtar
@@ -29,3 +29,9 @@
   b: (〈0;a〉.x|"b")
   c: (〈0;a〉["x"]|"c")
 }
+-- out/eval --
+(struct){
+  a: (null){ null }
+  b: (string){ "b" }
+  c: (string){ "c" }
+}
diff --git a/cue/testdata/resolve/016_index.txtar b/cue/testdata/resolve/016_index.txtar
index baa96cb..3486c2c 100644
--- a/cue/testdata/resolve/016_index.txtar
+++ b/cue/testdata/resolve/016_index.txtar
@@ -76,3 +76,51 @@
   }
   e7: 〈0;def〉["b"]
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (int){ 2 }
+  b: (string){ "bar" }
+  c: (_|_){
+    // [eval] invalid list index "3" (type string):
+    //     ./in.cue:3:20
+  }
+  d: (_|_){
+    // [eval] undefined field 0:
+    //     ./in.cue:4:16
+  }
+  l: (#list){
+  }
+  e1: (_|_){
+    // [eval] invalid list index _ (type string):
+    //     ./in.cue:6:9
+  }
+  e2: (_|_){
+    // [eval] invalid operand 2 (found int, want list or struct):
+    //     ./in.cue:7:5
+  }
+  e3: (_|_){
+    // [eval] invalid label type bool:
+    //     ./in.cue:8:5
+  }
+  e4: (_|_){
+    // [eval] undefined field 3:
+    //     ./in.cue:9:15
+  }
+  e5: (_|_){
+    // [eval] invalid negative index *adt.Num:
+    //     ./in.cue:10:5
+  }
+  e6: (_|_){
+    // [eval] invalid list index 1 (out of bounds):
+    //     ./in.cue:11:16
+  }
+  def: (struct){
+    a: (int){ 1 }
+    #b: (int){ 3 }
+  }
+  e7: (_|_){
+    // [incomplete] undefined field b:
+    //     ./in.cue:16:9
+  }
+}
diff --git a/cue/testdata/resolve/017_disjunctions_of_lists.txtar b/cue/testdata/resolve/017_disjunctions_of_lists.txtar
index bafa8b8..c86479b 100644
--- a/cue/testdata/resolve/017_disjunctions_of_lists.txtar
+++ b/cue/testdata/resolve/017_disjunctions_of_lists.txtar
@@ -32,3 +32,21 @@
     "d",
   ])
 }
+-- out/eval --
+(struct){
+  l: (list){ |((#list){
+      0: (int){ int }
+      1: (int){ int }
+    }, (#list){
+      0: (string){ string }
+      1: (string){ string }
+    }) }
+  l1: (#list){
+    0: (string){ "a" }
+    1: (string){ "b" }
+  }
+  l2: (#list){
+    0: (string){ "c" }
+    1: (string){ "d" }
+  }
+}
diff --git a/cue/testdata/resolve/018_slice.txtar b/cue/testdata/resolve/018_slice.txtar
index c14c0f6..7d50290 100644
--- a/cue/testdata/resolve/018_slice.txtar
+++ b/cue/testdata/resolve/018_slice.txtar
@@ -51,3 +51,39 @@
     2,
   ][:"9"]
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (#list){
+  }
+  b: (#list){
+  }
+  e1: (_|_){
+    // [eval] index 1 out of range:
+    //     ./in.cue:3:5
+  }
+  e2: (_|_){
+    // [eval] cannot convert negative number to uint64:
+    //     ./in.cue:4:5
+  }
+  e3: (_|_){
+    // [eval] invalid slice index: 1 > 0:
+    //     ./in.cue:5:5
+  }
+  e4: (_|_){
+    // [eval] index 2 out of range:
+    //     ./in.cue:6:5
+  }
+  e5: (_|_){
+    // [eval] cannot slice *adt.Num (type int):
+    //     ./in.cue:7:5
+  }
+  e6: (_|_){
+    // [eval] cannot use *adt.String (type string) as type int in slice index:
+    //     ./in.cue:8:5
+  }
+  e7: (_|_){
+    // [eval] cannot use *adt.String (type string) as type int in slice index:
+    //     ./in.cue:9:5
+  }
+}
diff --git a/cue/testdata/resolve/020_list_arithmetic.txtar b/cue/testdata/resolve/020_list_arithmetic.txtar
deleted file mode 100644
index f08f8bc..0000000
--- a/cue/testdata/resolve/020_list_arithmetic.txtar
+++ /dev/null
@@ -1,338 +0,0 @@
-# DO NOT EDIT; generated by go run testdata/gen.go
-#
-#name: list arithmetic
-#evalPartial
--- in.cue --
-l0: 3 * [1, 2, 3]
-l1: 0 * [1, 2, 3]
-l2: 10 * []
-l3: <=2 * []
-l4: <=2 * [int]
-l5: <=2 * (int * [int])
-l6: 3 * [...int]
-l7: 3 * [1, ...int]
-l8: 3 * [1, 2, ...int]
-
-s0:  [] + []
-s1:  [1] + []
-s2:  [] + [2]
-s3:  [1] + [2]
-s4:  [1, 2] + []
-s5:  [] + [1, 2]
-s6:  [1] + [1, 2]
-s7:  [1, 2] + [1]
-s8:  [1, 2] + [1, 2]
-s9:  [] + [...]
-s10: [1] + [...]
-s11: [] + [2, ...]
-s12: [1] + [2, ...]
-s13: [1, 2] + [...]
-s14: [] + [1, 2, ...]
-s15: [1] + [1, 2, ...]
-s16: [1, 2] + [1, ...]
-s17: [1, 2] + [1, 2, ...]
-
-s18: [...] + []
-s19: [1, ...] + []
-s20: [...] + [2]
-s21: [1, ...] + [2]
-s22: [1, 2, ...] + []
-s23: [...] + [1, 2]
-s24: [1, ...] + [1, 2]
-s25: [1, 2, ...] + [1]
-s26: [1, 2, ...] + [1, 2]
-s27: [...] + [...]
-s28: [1, ...] + [...]
-s29: [...] + [2, ...]
-s30: [1, ...] + [2, ...]
-s31: [1, 2, ...] + [...]
-s32: [...] + [1, 2, ...]
-s33: [1, ...] + [1, 2, ...]
-s34: [1, 2, ...] + [1, ...]
-s35: [1, 2, ...] + [1, 2, ...]
--- out/def --
-l0: [1, 2, 3, 1, 2, 3, 1, 2, 3]
-l1: []
-l2: []
-l3: <=2 * []
-l4: <=2 * [int]
-l5: <=2 * (int * [int])
-l6: []
-l7: [1, 1, 1]
-l8: [1, 2, 1, 2, 1, 2]
-s0: []
-s1: [1]
-s2: [2]
-s3: [1, 2]
-s4: [1, 2]
-s5: [1, 2]
-s6: [1, 1, 2]
-s7: [1, 2, 1]
-s8: [1, 2, 1, 2]
-s9: []
-s10: [1]
-s11: [2]
-s12: [1, 2]
-s13: [1, 2]
-s14: [1, 2]
-s15: [1, 1, 2]
-s16: [1, 2, 1]
-s17: [1, 2, 1, 2]
-s18: []
-s19: [1]
-s20: [2]
-s21: [1, 2]
-s22: [1, 2]
-s23: [1, 2]
-s24: [1, 1, 2]
-s25: [1, 2, 1]
-s26: [1, 2, 1, 2]
-s27: []
-s28: [1]
-s29: [2]
-s30: [1, 2]
-s31: [1, 2]
-s32: [1, 2]
-s33: [1, 1, 2]
-s34: [1, 2, 1]
-s35: [1, 2, 1, 2]
--- out/legacy-debug --
-<0>{l0: [1,2,3,1,2,3,1,2,3], l1: [], l2: [], l3: (<=2 * []), l4: (<=2 * [int]), l5: (<=2 * (int * [int])), l6: [], l7: [1,1,1], l8: [1,2,1,2,1,2], s0: [], s1: [1], s2: [2], s3: [1,2], s4: [1,2], s5: [1,2], s6: [1,1,2], s7: [1,2,1], s8: [1,2,1,2], s9: [], s10: [1], s11: [2], s12: [1,2], s13: [1,2], s14: [1,2], s15: [1,1,2], s16: [1,2,1], s17: [1,2,1,2], s18: [], s19: [1], s20: [2], s21: [1,2], s22: [1,2], s23: [1,2], s24: [1,1,2], s25: [1,2,1], s26: [1,2,1,2], s27: [], s28: [1], s29: [2], s30: [1,2], s31: [1,2], s32: [1,2], s33: [1,1,2], s34: [1,2,1], s35: [1,2,1,2]}
--- out/compile --
---- in.cue
-{
-  l0: (3 * [
-    1,
-    2,
-    3,
-  ])
-  l1: (0 * [
-    1,
-    2,
-    3,
-  ])
-  l2: (10 * [])
-  l3: (<=2 * [])
-  l4: (<=2 * [
-    int,
-  ])
-  l5: (<=2 * (int * [
-    int,
-  ]))
-  l6: (3 * [
-    ...int,
-  ])
-  l7: (3 * [
-    1,
-    ...int,
-  ])
-  l8: (3 * [
-    1,
-    2,
-    ...int,
-  ])
-  s0: ([] + [])
-  s1: ([
-    1,
-  ] + [])
-  s2: ([] + [
-    2,
-  ])
-  s3: ([
-    1,
-  ] + [
-    2,
-  ])
-  s4: ([
-    1,
-    2,
-  ] + [])
-  s5: ([] + [
-    1,
-    2,
-  ])
-  s6: ([
-    1,
-  ] + [
-    1,
-    2,
-  ])
-  s7: ([
-    1,
-    2,
-  ] + [
-    1,
-  ])
-  s8: ([
-    1,
-    2,
-  ] + [
-    1,
-    2,
-  ])
-  s9: ([] + [
-    ...,
-  ])
-  s10: ([
-    1,
-  ] + [
-    ...,
-  ])
-  s11: ([] + [
-    2,
-    ...,
-  ])
-  s12: ([
-    1,
-  ] + [
-    2,
-    ...,
-  ])
-  s13: ([
-    1,
-    2,
-  ] + [
-    ...,
-  ])
-  s14: ([] + [
-    1,
-    2,
-    ...,
-  ])
-  s15: ([
-    1,
-  ] + [
-    1,
-    2,
-    ...,
-  ])
-  s16: ([
-    1,
-    2,
-  ] + [
-    1,
-    ...,
-  ])
-  s17: ([
-    1,
-    2,
-  ] + [
-    1,
-    2,
-    ...,
-  ])
-  s18: ([
-    ...,
-  ] + [])
-  s19: ([
-    1,
-    ...,
-  ] + [])
-  s20: ([
-    ...,
-  ] + [
-    2,
-  ])
-  s21: ([
-    1,
-    ...,
-  ] + [
-    2,
-  ])
-  s22: ([
-    1,
-    2,
-    ...,
-  ] + [])
-  s23: ([
-    ...,
-  ] + [
-    1,
-    2,
-  ])
-  s24: ([
-    1,
-    ...,
-  ] + [
-    1,
-    2,
-  ])
-  s25: ([
-    1,
-    2,
-    ...,
-  ] + [
-    1,
-  ])
-  s26: ([
-    1,
-    2,
-    ...,
-  ] + [
-    1,
-    2,
-  ])
-  s27: ([
-    ...,
-  ] + [
-    ...,
-  ])
-  s28: ([
-    1,
-    ...,
-  ] + [
-    ...,
-  ])
-  s29: ([
-    ...,
-  ] + [
-    2,
-    ...,
-  ])
-  s30: ([
-    1,
-    ...,
-  ] + [
-    2,
-    ...,
-  ])
-  s31: ([
-    1,
-    2,
-    ...,
-  ] + [
-    ...,
-  ])
-  s32: ([
-    ...,
-  ] + [
-    1,
-    2,
-    ...,
-  ])
-  s33: ([
-    1,
-    ...,
-  ] + [
-    1,
-    2,
-    ...,
-  ])
-  s34: ([
-    1,
-    2,
-    ...,
-  ] + [
-    1,
-    ...,
-  ])
-  s35: ([
-    1,
-    2,
-    ...,
-  ] + [
-    1,
-    2,
-    ...,
-  ])
-}
diff --git a/cue/testdata/resolve/022_list_unification.txtar b/cue/testdata/resolve/022_list_unification.txtar
index 7f05375..58c68f6 100644
--- a/cue/testdata/resolve/022_list_unification.txtar
+++ b/cue/testdata/resolve/022_list_unification.txtar
@@ -32,3 +32,20 @@
     ]
   })
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    l: (#list){
+      0: (string){ "foo" }
+      1: (_){ _ }
+    }
+    v: (_){ _ }
+  }
+  b: (struct){
+    l: (#list){
+      0: (string){ "foo" }
+      1: (string){ "bar" }
+    }
+    v: (string){ "bar" }
+  }
+}
diff --git a/cue/testdata/resolve/023_correct_error_messages.txtar b/cue/testdata/resolve/023_correct_error_messages.txtar
index 20deba0..92f2d61 100644
--- a/cue/testdata/resolve/023_correct_error_messages.txtar
+++ b/cue/testdata/resolve/023_correct_error_messages.txtar
@@ -13,3 +13,10 @@
 {
   a: ("a" & 1)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a: (_|_){
+    // [eval] invalid value *adt.Num (mismatched types int and string)
+  }
+}
diff --git a/cue/testdata/resolve/024_structs.txtar b/cue/testdata/resolve/024_structs.txtar
index 01b08a5..f9d3a35 100644
--- a/cue/testdata/resolve/024_structs.txtar
+++ b/cue/testdata/resolve/024_structs.txtar
@@ -40,3 +40,28 @@
     c: int
   })
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    c: (int){ 5 }
+    d: (int){ 15 }
+  }
+  b: (struct){
+    c: (int){ 7 }
+    d: (int){ 21 }
+  }
+  t: (struct){
+    c: (number){ number }
+    d: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to *:
+      //     ./in.cue:3:19
+    }
+  }
+  ti: (struct){
+    c: (int){ int }
+    d: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to *:
+      //     ./in.cue:3:19
+    }
+  }
+}
diff --git a/cue/testdata/resolve/025_definitions.txtar b/cue/testdata/resolve/025_definitions.txtar
index 4094bd2..29e5e84 100644
--- a/cue/testdata/resolve/025_definitions.txtar
+++ b/cue/testdata/resolve/025_definitions.txtar
@@ -117,3 +117,47 @@
     Mixed: string
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #Foo: (#struct){
+    field: (int){ int }
+    recursive: (#struct){
+      field: (string){ string }
+    }
+  }
+  #Foo1: (#struct){
+    field: (int){ int }
+    field2: (string){ string }
+  }
+  foo: (_|_){
+    // [eval] field `feild` not allowed
+    field: (int){ int }
+    recursive: (#struct){
+      field: (string){ string }
+    }
+    feild: (int){ 2 }
+  }
+  foo1: (_|_){
+    // [eval]
+    field: (int){ 2 }
+    recursive: (_|_){
+      // [eval] field `feild` not allowed
+      field: (string){ string }
+      feild: (int){ 2 }
+    }
+  }
+  #Bar: (#struct){
+    field: (int){ int }
+  }
+  bar: (#struct){
+    field: (int){ int }
+    feild: (int){ 2 }
+  }
+  #Mixed: (string){ string }
+  Mixed: (string){ string }
+  mixedRec: (struct){
+    #Mixed: (string){ string }
+    Mixed: (string){ string }
+  }
+}
diff --git a/cue/testdata/resolve/027_new-style_definitions.txtar b/cue/testdata/resolve/027_new-style_definitions.txtar
index 830748c..5867d84 100644
--- a/cue/testdata/resolve/027_new-style_definitions.txtar
+++ b/cue/testdata/resolve/027_new-style_definitions.txtar
@@ -64,3 +64,19 @@
     a: "foo"
   })
 }
+-- out/eval --
+(struct){
+  #Foo: (#struct){
+    a: (int){ 1 }
+    b: (int){ int }
+  }
+  "#Foo": (#struct){
+    a: (int){ 1 }
+    b: (int){ 1 }
+  }
+  bulk: (struct){
+    #def: (int){ 4 }
+    _hid: (int){ 3 }
+    a: (string){ "foo" }
+  }
+}
diff --git a/cue/testdata/resolve/029_non-closed_definition_carries_over_closedness_to_enclosed_template.txtar b/cue/testdata/resolve/029_non-closed_definition_carries_over_closedness_to_enclosed_template.txtar
index 61622b5..1f38c91 100644
--- a/cue/testdata/resolve/029_non-closed_definition_carries_over_closedness_to_enclosed_template.txtar
+++ b/cue/testdata/resolve/029_non-closed_definition_carries_over_closedness_to_enclosed_template.txtar
@@ -102,3 +102,43 @@
     ]
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #S: (#struct){
+  }
+  a: (_|_){
+    // [eval]
+    v: (_|_){
+      // [eval] field `b` not allowed
+      b: (int){ int }
+      a: (int){ int }
+    }
+  }
+  #Q: (#struct){
+  }
+  b: (_|_){
+    // [eval]
+    w: (_|_){
+      // [eval] field `c` not allowed
+      c: (int){ int }
+      b: (int){ int }
+    }
+  }
+  #R: (#struct){
+  }
+  c: (_|_){
+    // [eval]
+    w: (_|_){
+      // [eval]
+      0: (_|_){
+        // [eval] field `d` not allowed
+        d: (int){ int }
+        a: (int){ int }
+      }
+      1: (#struct){
+        b: (int){ int }
+      }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/030_definitions_with_disjunctions.txtar b/cue/testdata/resolve/030_definitions_with_disjunctions.txtar
index 17b2d7e..b911766 100644
--- a/cue/testdata/resolve/030_definitions_with_disjunctions.txtar
+++ b/cue/testdata/resolve/030_definitions_with_disjunctions.txtar
@@ -59,3 +59,28 @@
     b: 2
   }
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #Foo: (struct){ |((#struct){
+      field: (int){ int }
+      a: (int){ 1 }
+    }, (#struct){
+      field: (int){ int }
+      b: (int){ 2 }
+    }) }
+  foo: (#struct){
+    field: (int){ int }
+    a: (int){ 1 }
+  }
+  bar: (_|_){
+    // [eval] field `c` not allowed
+    field: (int){ int }
+    c: (int){ 2 }
+    b: (int){ 2 }
+  }
+  baz: (#struct){
+    field: (int){ int }
+    b: (int){ 2 }
+  }
+}
diff --git a/cue/testdata/resolve/031_definitions_with_disjunctions_recurisive.txtar b/cue/testdata/resolve/031_definitions_with_disjunctions_recurisive.txtar
index 86013b4..42842f6 100644
--- a/cue/testdata/resolve/031_definitions_with_disjunctions_recurisive.txtar
+++ b/cue/testdata/resolve/031_definitions_with_disjunctions_recurisive.txtar
@@ -49,3 +49,17 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  #Foo: (#struct){
+    x: (struct){ |((#struct){
+        field: (int){ int }
+        c: (int){ 3 }
+        a: (int){ 1 }
+      }, (#struct){
+        field: (int){ int }
+        c: (int){ 3 }
+        b: (int){ 2 }
+      }) }
+  }
+}
diff --git a/cue/testdata/resolve/033_top-level_definition_with_struct_and_disjunction.txtar b/cue/testdata/resolve/033_top-level_definition_with_struct_and_disjunction.txtar
index d219134..fd4b8f8 100644
--- a/cue/testdata/resolve/033_top-level_definition_with_struct_and_disjunction.txtar
+++ b/cue/testdata/resolve/033_top-level_definition_with_struct_and_disjunction.txtar
@@ -50,3 +50,15 @@
     Size: 1
   })
 }
+-- out/eval --
+(struct){
+  #def: (struct){ |((#struct){
+      Type: (string){ "B" }
+      Text: (string){ string }
+      Size: (int){ 0 }
+    }, (#struct){
+      Type: (string){ "A" }
+      Text: (string){ string }
+      Size: (int){ 1 }
+    }) }
+}
diff --git a/cue/testdata/resolve/034_closing_structs.txtar b/cue/testdata/resolve/034_closing_structs.txtar
index 7bf10e0..227f9a8 100644
--- a/cue/testdata/resolve/034_closing_structs.txtar
+++ b/cue/testdata/resolve/034_closing_structs.txtar
@@ -51,3 +51,57 @@
 ctct: ct & ct
 -- out/legacy-debug --
 <0>{op: <1>{x: int}, ot: <2>{x: int, ...}, cp: <3>C{x: int}, ct: <4>{x: int, ...}, opot: <5>{x: int, ...}, otop: <6>{x: int, ...}, opcp: <7>C{x: int}, cpop: <8>C{x: int}, opct: <9>{x: int, ...}, ctop: <10>{x: int, ...}, otcp: <11>C{x: int}, cpot: <12>C{x: int}, otct: <13>{x: int, ...}, ctot: <14>{x: int, ...}, cpct: <15>C{x: int}, ctcp: <16>C{x: int}, ctct: <17>{x: int, ...}}
+-- out/eval --
+(struct){
+  op: (struct){
+    x: (int){ int }
+  }
+  ot: (struct){
+    x: (int){ int }
+  }
+  cp: (#struct){
+    x: (int){ int }
+  }
+  ct: (#struct){
+    x: (int){ int }
+  }
+  opot: (struct){
+    x: (int){ int }
+  }
+  otop: (struct){
+    x: (int){ int }
+  }
+  opcp: (#struct){
+    x: (int){ int }
+  }
+  cpop: (#struct){
+    x: (int){ int }
+  }
+  opct: (#struct){
+    x: (int){ int }
+  }
+  ctop: (#struct){
+    x: (int){ int }
+  }
+  otcp: (#struct){
+    x: (int){ int }
+  }
+  cpot: (#struct){
+    x: (int){ int }
+  }
+  otct: (#struct){
+    x: (int){ int }
+  }
+  ctot: (#struct){
+    x: (int){ int }
+  }
+  cpct: (#struct){
+    x: (int){ int }
+  }
+  ctcp: (#struct){
+    x: (int){ int }
+  }
+  ctct: (#struct){
+    x: (int){ int }
+  }
+}
diff --git a/cue/testdata/resolve/035_excluded_embedding_from_closing.txtar b/cue/testdata/resolve/035_excluded_embedding_from_closing.txtar
index 1fed0d7..2eca941 100644
--- a/cue/testdata/resolve/035_excluded_embedding_from_closing.txtar
+++ b/cue/testdata/resolve/035_excluded_embedding_from_closing.txtar
@@ -62,3 +62,34 @@
     }
   })
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  #S: (#struct){
+    c: (#struct){
+      d: (int){ int }
+    }
+    a: (#struct){
+      c: (int){ int }
+    }
+    b: (#struct){
+      open: (int){ int }
+    }
+  }
+  V: (_|_){
+    // [eval]
+    c: (_|_){
+      // [eval] field `e` not allowed
+      d: (int){ int }
+      e: (int){ int }
+    }
+    a: (#struct){
+      c: (int){ int }
+    }
+    b: (_|_){
+      // [eval] field `extra` not allowed
+      open: (int){ int }
+      extra: (int){ int }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/038_incomplete_comprehensions.txtar b/cue/testdata/resolve/038_incomplete_comprehensions.txtar
index f57accf..5f07859 100644
--- a/cue/testdata/resolve/038_incomplete_comprehensions.txtar
+++ b/cue/testdata/resolve/038_incomplete_comprehensions.txtar
@@ -47,3 +47,21 @@
     ]
   })
 }
+-- out/eval --
+(struct){
+  A: (_|_){
+    // [incomplete] incomplete feed source value src (type _):
+    //     ./in.cue:2:11
+    src: (_){ _ }
+    baz: (string){ "baz" }
+  }
+  B: (struct){
+    src: (#list){
+      0: (string){ "foo" }
+      1: (string){ "bar" }
+    }
+    baz: (string){ "baz" }
+    foo: (string){ "foo" }
+    bar: (string){ "bar" }
+  }
+}
diff --git a/cue/testdata/resolve/039_reference_to_root.txtar b/cue/testdata/resolve/039_reference_to_root.txtar
index 85324e5..c13bba8 100644
--- a/cue/testdata/resolve/039_reference_to_root.txtar
+++ b/cue/testdata/resolve/039_reference_to_root.txtar
@@ -77,3 +77,48 @@
     b: 100
   })
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    b: (int){ int }
+  }
+  c: (struct){
+    b: (int){ 100 }
+    d: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:4:5
+    }
+  }
+  x: (struct){
+    b: (int){ int }
+    c: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:8:5
+    }
+  }
+  y: (struct){
+    b: (int){ 100 }
+    c: (int){ 105 }
+  }
+  v: (struct){
+    b: (int){ int }
+    c: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:16:5
+    }
+  }
+  w: (struct){
+    b: (int){ 100 }
+    c: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:16:5
+    }
+  }
+  wp: (struct){
+    b: (int){ 100 }
+    c: (_|_){
+      // [incomplete] non-concrete value *adt.BasicType in operand to +:
+      //     ./in.cue:16:5
+    }
+  }
+}
diff --git a/cue/testdata/resolve/040_references_from_template_to_concrete.txtar b/cue/testdata/resolve/040_references_from_template_to_concrete.txtar
index 8126739..b54da04 100644
--- a/cue/testdata/resolve/040_references_from_template_to_concrete.txtar
+++ b/cue/testdata/resolve/040_references_from_template_to_concrete.txtar
@@ -85,3 +85,26 @@
     }
   }
 }
+-- out/eval --
+(struct){
+  res: (#list){
+    0: (struct){
+      x: (struct){
+        b: (struct){
+          str: (string){ "DDDD" }
+        }
+        a: (string){ "XDDDD" }
+        c: (string){ "X" }
+      }
+    }
+  }
+  t: (struct){
+    x: (struct){
+      b: (struct){
+        str: (string){ "DDDD" }
+      }
+      a: (string){ "XDDDD" }
+      c: (string){ "X" }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/041_interpolation.txtar b/cue/testdata/resolve/041_interpolation.txtar
deleted file mode 100644
index 6e6ee9e..0000000
--- a/cue/testdata/resolve/041_interpolation.txtar
+++ /dev/null
@@ -1,33 +0,0 @@
-# DO NOT EDIT; generated by go run testdata/gen.go
-#
-#name: interpolation
-#evalPartial
--- in.cue --
-a: "\(4)"
-b: "one \(a) two \( a+c )"
-c: "one"
-d: "\(r)"
-u: "\(_)"
-r: _
-e: "\([])"
--- out/def --
-a: "4"
-b: "one 4 two 4one"
-c: "one"
-d: "\(r)"
-r: _
-u: "\(_)"
-e: _|_ // expression in interpolation must evaluate to a number kind or string (found list)
--- out/legacy-debug --
-<0>{a: "4", b: "one 4 two 4one", c: "one", d: ""+<1>.r+"", r: _, u: ""+_+"", e: _|_([]:expression in interpolation must evaluate to a number kind or string (found list))}
--- out/compile --
---- in.cue
-{
-  a: "\(4)"
-  b: "one \(〈0;a〉) two \((〈0;a〉 + 〈0;c〉))"
-  c: "one"
-  d: "\(〈0;r〉)"
-  u: "\(_)"
-  r: _
-  e: "\([])"
-}
diff --git a/cue/testdata/resolve/043_diamond-shaped_constraints.txtar b/cue/testdata/resolve/043_diamond-shaped_constraints.txtar
index 6b82234..b605322 100644
--- a/cue/testdata/resolve/043_diamond-shaped_constraints.txtar
+++ b/cue/testdata/resolve/043_diamond-shaped_constraints.txtar
@@ -98,3 +98,27 @@
     }
   })
 }
+-- out/eval --
+(struct){
+  S: (struct){
+    A: (struct){
+      a: (int){ 1 }
+    }
+    B: (struct){
+      a: (int){ 1 }
+      b: (int){ 2 }
+    }
+  }
+  T: (struct){
+    A: (struct){
+      a: (int){ 1 }
+      c: (int){ 3 }
+    }
+    B: (struct){
+      a: (int){ 1 }
+      c: (int){ 3 }
+      b: (int){ 2 }
+      d: (int){ 4 }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/044_field_templates.txtar b/cue/testdata/resolve/044_field_templates.txtar
index a2b3aac..09e2887 100644
--- a/cue/testdata/resolve/044_field_templates.txtar
+++ b/cue/testdata/resolve/044_field_templates.txtar
@@ -129,3 +129,29 @@
     bar: _
   }
 }
+-- out/eval --
+(struct){
+  a: (struct){
+    k: (int){ 1 }
+  }
+  b: (struct){
+    v: (struct){
+      x: (int){ 0 }
+      y: (int){ |(*(int){ 1 }, (int){ int }) }
+    }
+    w: (struct){
+      x: (int){ 0 }
+      y: (int){ |(*(int){ 1 }, (int){ int }) }
+    }
+  }
+  c: (struct){
+    foo: (struct){
+      name: (string){ "foo" }
+      y: (int){ 1 }
+    }
+    bar: (struct){
+      name: (string){ "bar" }
+      y: (int){ 1 }
+    }
+  }
+}
diff --git a/cue/testdata/resolve/045_range_unification.txtar b/cue/testdata/resolve/045_range_unification.txtar
index 2a91782..95d0279 100644
--- a/cue/testdata/resolve/045_range_unification.txtar
+++ b/cue/testdata/resolve/045_range_unification.txtar
@@ -132,3 +132,60 @@
   n4: ((>=0.0 & <=0.1) & 0.09999)
   n5: ((>=1 & <=5) & 2.5)
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  a1: (int){ 3 }
+  a2: (int){ 1 }
+  a3: (int){ 5 }
+  a4: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  a5: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  a6: (int){ 3 }
+  a7: (int){ 1 }
+  a8: (int){ 5 }
+  a9: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  a10: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+  b1: (number){ &(>=1, <=5) }
+  b2: (number){ 1 }
+  b3: (number){ 5 }
+  b4: (number){ &(>=2, <=3) }
+  b5: (number){ &(>=3, <=5) }
+  b6: (number){ 5 }
+  b7: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  b8: (number){ &(>=1, <=5) }
+  b9: (number){ 1 }
+  b10: (number){ 5 }
+  b11: (number){ &(>=2, <=3) }
+  b12: (number){ &(>=3, <=5) }
+  b13: (number){ 5 }
+  b14: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  c1: (int){ &(>=1, <=5, int) }
+  c2: (int){ &(>=1, <=5, int) }
+  c3: (_|_){
+    // [eval] invalid value *adt.BoundValue (mismatched types number and string)
+  }
+  c4: (_|_){
+    // [eval] invalid value *adt.BasicType (mismatched types string and number)
+  }
+  s1: (string){ "e" }
+  s2: (string){ "ee" }
+  n1: (number){ &(>=1, <=2) }
+  n2: (_|_){
+    // [eval] bounds *adt.BoundValue *adt.BoundValue
+  }
+  n3: (int){ 2 }
+  n4: (float){ 0.09999 }
+  n5: (float){ 2.5 }
+}
diff --git a/cue/testdata/resolve/046_predefined_ranges.txtar b/cue/testdata/resolve/046_predefined_ranges.txtar
index f918a50..1597a35 100644
--- a/cue/testdata/resolve/046_predefined_ranges.txtar
+++ b/cue/testdata/resolve/046_predefined_ranges.txtar
@@ -20,10 +20,19 @@
 -- out/compile --
 --- in.cue
 {
-  k1: (>=-128 & <=127)
+  k1: &(int, >=-128, <=127)
   k1: 44
-  k2: (>=-9223372036854775808 & <=9223372036854775807)
+  k2: &(int, >=-9223372036854775808, <=9223372036854775807)
   k2: -8000000000
-  e1: (>=-32768 & <=32767)
+  e1: &(int, >=-32768, <=32767)
   e1: 100000
 }
+-- out/eval --
+(_|_){
+  // [eval]
+  k1: (int){ 44 }
+  k2: (int){ -8000000000 }
+  e1: (_|_){
+    // [eval] invalid value *adt.Num (out of bound *adt.BoundValue)
+  }
+}
diff --git a/cue/testdata/resolve/047_struct_comprehensions.txtar b/cue/testdata/resolve/047_struct_comprehensions.txtar
index b0cd52c..c4c5467 100644
--- a/cue/testdata/resolve/047_struct_comprehensions.txtar
+++ b/cue/testdata/resolve/047_struct_comprehensions.txtar
@@ -77,3 +77,15 @@
     "\(〈1;k〉)": 〈1;v〉
   }
 }
+-- out/eval --
+(struct){
+  obj: (struct){
+    foo: (struct){
+      a: (string){ "bar" }
+      sub: (struct){
+        as: (string){ "bar" }
+      }
+    }
+  }
+  reg: (int){ 4 }
+}
diff --git a/internal/core/adt/adt.go b/internal/core/adt/adt.go
index 2a91ff9..8ae9338 100644
--- a/internal/core/adt/adt.go
+++ b/internal/core/adt/adt.go
@@ -56,27 +56,28 @@
 // An Evaluator provides a method to convert to a value.
 type Evaluator interface {
 	Node
-	// TODO: Eval(c Context, env *Environment) Value
+	evaluate(ctx *OpContext) Value
 }
 
 // A Resolver represents a reference somewhere else within a tree that resolves
 // a value.
 type Resolver interface {
 	Node
-	// TODO: Resolve(c Context, env *Environment) Arc
+	resolve(ctx *OpContext) *Vertex
 }
 
+type YieldFunc func(env *Environment, s *StructLit)
+
 // A Yielder represents 0 or more labeled values of structs or lists.
 type Yielder interface {
 	Node
-	yielderNode()
-	// TODO: Yield()
+	yield(ctx *OpContext, fn YieldFunc)
 }
 
 // A Validator validates a Value. All Validators are Values.
 type Validator interface {
-	// TODO: Validate(c Context, v Value) *Bottom
 	Value
+	validate(c *OpContext, v Value) *Bottom
 }
 
 // Value
@@ -164,8 +165,7 @@
 
 // Decl and Yielder
 
-func (*LetClause) declNode()    {}
-func (*LetClause) yielderNode() {}
+func (*LetClause) declNode() {}
 
 // Decl and Elem
 
@@ -236,16 +236,12 @@
 
 // Decl, Elem, and Yielder
 
-func (*ForClause) declNode()    {}
-func (*ForClause) elemNode()    {}
-func (*ForClause) yielderNode() {}
-func (*IfClause) declNode()     {}
-func (*IfClause) elemNode()     {}
-func (*IfClause) yielderNode()  {}
+func (*ForClause) declNode() {}
+func (*ForClause) elemNode() {}
+func (*IfClause) declNode()  {}
+func (*IfClause) elemNode()  {}
 
-// Yielder
-
-func (*ValueClause) yielderNode() {}
+// Yielder only: ValueClause
 
 // Node
 
diff --git a/internal/core/adt/binop.go b/internal/core/adt/binop.go
new file mode 100644
index 0000000..2fb3205
--- /dev/null
+++ b/internal/core/adt/binop.go
@@ -0,0 +1,364 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+	"bytes"
+	"math/big"
+	"strings"
+
+	"cuelang.org/go/cue/errors"
+	"github.com/cockroachdb/apd/v2"
+)
+
+var apdCtx apd.Context
+
+func init() {
+	apdCtx = apd.BaseContext
+	apdCtx.Precision = 24
+}
+
+// BinOp handles all operations except AndOp and OrOp. This includes processing
+// unary comparators such as '<4' and '=~"foo"'.
+//
+// BinOp returns nil if not both left and right are concrete.
+func BinOp(c *OpContext, op Op, left, right Value) Value {
+	leftKind := left.Kind()
+	rightKind := right.Kind()
+
+	const msg = "non-concrete value '%v' to operation '%s'"
+	if left.Concreteness() > Concrete {
+		return &Bottom{
+			Code: IncompleteError,
+			Err:  errors.Newf(c.pos(), msg, c.Str(left), op),
+		}
+	}
+	if right.Concreteness() > Concrete {
+		return &Bottom{
+			Code: IncompleteError,
+			Err:  errors.Newf(c.pos(), msg, c.Str(right), op),
+		}
+	}
+
+	if a, ok := left.(*Bottom); ok {
+		return CombineErrors(nil, a, right)
+	}
+	if b, ok := left.(*Bottom); ok {
+		return b
+	}
+
+	switch op {
+	case EqualOp:
+		switch {
+		case leftKind == NullKind && rightKind == NullKind:
+			return c.newBool(true)
+
+		case leftKind == NullKind || rightKind == NullKind:
+			return c.newBool(false)
+
+		case leftKind == BoolKind:
+			return c.newBool(c.BoolValue(left) == c.BoolValue(right))
+
+		case leftKind == StringKind:
+			// normalize?
+			return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right)))
+
+		case leftKind == BytesKind:
+			return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+		case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+			// n := c.newNum()
+			return cmpTonode(c, op, c.num(left, op).X.Cmp(&c.num(right, op).X))
+
+		case leftKind == ListKind && rightKind == ListKind:
+			x := c.Elems(left)
+			y := c.Elems(right)
+			if len(x) != len(y) {
+				return c.newBool(false)
+			}
+			for i, e := range x {
+				a, _ := c.Concrete(nil, e, op)
+				b, _ := c.Concrete(nil, y[i], op)
+				if !test(c, EqualOp, a, b) {
+					return c.newBool(false)
+				}
+			}
+			return c.newBool(true)
+		}
+
+	case NotEqualOp:
+		switch {
+		case leftKind == NullKind && rightKind == NullKind:
+			return c.newBool(false)
+
+		case leftKind == NullKind || rightKind == NullKind:
+			return c.newBool(true)
+
+		case leftKind == BoolKind:
+			return c.newBool(c.boolValue(left, op) != c.boolValue(right, op))
+
+		case leftKind == StringKind:
+			// normalize?
+			return cmpTonode(c, op, strings.Compare(c.StringValue(left), c.StringValue(right)))
+
+		case leftKind == BytesKind:
+			return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+		case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+			// n := c.newNum()
+			return cmpTonode(c, op, c.num(left, op).X.Cmp(&c.num(right, op).X))
+
+		case leftKind == ListKind && rightKind == ListKind:
+			x := c.Elems(left)
+			y := c.Elems(right)
+			if len(x) != len(y) {
+				return c.newBool(false)
+			}
+			for i, e := range x {
+				a, _ := c.Concrete(nil, e, op)
+				b, _ := c.Concrete(nil, y[i], op)
+				if !test(c, EqualOp, a, b) {
+					return c.newBool(true)
+				}
+			}
+			return c.newBool(false)
+		}
+
+	case LessThanOp, LessEqualOp, GreaterEqualOp, GreaterThanOp:
+		switch {
+		case leftKind == StringKind && rightKind == StringKind:
+			// normalize?
+			return cmpTonode(c, op, strings.Compare(c.stringValue(left, op), c.stringValue(right, op)))
+
+		case leftKind == BytesKind && rightKind == BytesKind:
+			return cmpTonode(c, op, bytes.Compare(c.bytesValue(left, op), c.bytesValue(right, op)))
+
+		case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+			// n := c.newNum(left, right)
+			return cmpTonode(c, op, c.num(left, op).X.Cmp(&c.num(right, op).X))
+		}
+
+	case BoolAndOp:
+		return c.newBool(c.boolValue(left, op) && c.boolValue(right, op))
+
+	case BoolOrOp:
+		return c.newBool(c.boolValue(left, op) || c.boolValue(right, op))
+
+	case MatchOp:
+		// if y.re == nil {
+		// 	// This really should not happen, but leave in for safety.
+		// 	b, err := Regexp.MatchString(str, x.str)
+		// 	if err != nil {
+		// 		return c.Errf(Src, "error parsing Regexp: %v", err)
+		// 	}
+		// 	return boolTonode(Src, b)
+		// }
+		return c.newBool(c.regexp(right).MatchString(c.stringValue(left, op)))
+
+	case NotMatchOp:
+		return c.newBool(!c.regexp(right).MatchString(c.stringValue(left, op)))
+
+	case AddOp:
+		switch {
+		case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+			return numOp(c, apdCtx.Add, left, right, AddOp)
+
+		case leftKind == StringKind && rightKind == StringKind:
+			return c.NewString(c.StringValue(left) + c.StringValue(right))
+
+		case leftKind == BytesKind && rightKind == BytesKind:
+			ba := c.bytesValue(left, op)
+			bb := c.bytesValue(right, op)
+			b := make([]byte, len(ba)+len(bb))
+			copy(b, ba)
+			copy(b[len(ba):], bb)
+			return c.newBytes(b)
+
+		case leftKind == ListKind && rightKind == ListKind:
+			a := c.Elems(left)
+			b := c.Elems(right)
+			if err := c.Err(); err != nil {
+				return err
+			}
+			n := c.newList(c.src, nil)
+			if err := n.appendListArcs(a); err != nil {
+				return err
+			}
+			if err := n.appendListArcs(b); err != nil {
+				return err
+			}
+			// n.isList = true
+			// n.IsClosed = true
+			return n
+		}
+
+	case SubtractOp:
+		return numOp(c, apdCtx.Sub, left, right, op)
+
+	case MultiplyOp:
+		switch {
+		// float
+		case leftKind&NumKind != 0 && rightKind&NumKind != 0:
+			return numOp(c, apdCtx.Mul, left, right, op)
+
+		case leftKind == StringKind && rightKind == IntKind:
+			const as = "string multiplication"
+			return c.NewString(strings.Repeat(c.stringValue(left, as), int(c.uint64(right, as))))
+
+		case leftKind == IntKind && rightKind == StringKind:
+			const as = "string multiplication"
+			return c.NewString(strings.Repeat(c.stringValue(right, as), int(c.uint64(left, as))))
+
+		case leftKind == BytesKind && rightKind == IntKind:
+			const as = "bytes multiplication"
+			return c.newBytes(bytes.Repeat(c.bytesValue(left, as), int(c.uint64(right, as))))
+
+		case leftKind == IntKind && rightKind == BytesKind:
+			const as = "bytes multiplication"
+			return c.newBytes(bytes.Repeat(c.bytesValue(right, as), int(c.uint64(left, as))))
+
+		case leftKind == ListKind && rightKind == IntKind:
+			left, right = right, left
+			fallthrough
+
+		case leftKind == IntKind && rightKind == ListKind:
+			a := c.Elems(right)
+			n := c.newList(c.src, nil)
+			// n.IsClosed = true
+			index := int64(0)
+			for i := c.uint64(left, "list multiplier"); i > 0; i-- {
+				for _, a := range a {
+					f, _ := MakeLabel(a.Source(), index, IntLabel)
+					n.Arcs = append(n.Arcs, &Vertex{
+						Parent:    n,
+						Label:     f,
+						Conjuncts: a.Conjuncts,
+					})
+					index++
+				}
+			}
+			return n
+		}
+
+	case FloatQuotientOp:
+		if leftKind&NumKind != 0 && rightKind&NumKind != 0 {
+			v := numOp(c, apdCtx.Quo, left, right, op)
+			if n, ok := v.(*Num); ok {
+				n.K = FloatKind
+			}
+			return v
+		}
+
+	case IntDivideOp:
+		if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+			y := c.num(right, op)
+			if y.X.IsZero() {
+				return c.NewErrf("division by zero")
+			}
+			return intOp(c, (*big.Int).Div, c.num(left, op), y)
+		}
+
+	case IntModuloOp:
+		if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+			y := c.num(right, op)
+			if y.X.IsZero() {
+				return c.NewErrf("division by zero")
+			}
+			return intOp(c, (*big.Int).Mod, c.num(left, op), y)
+		}
+
+	case IntQuotientOp:
+		if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+			y := c.num(right, op)
+			if y.X.IsZero() {
+				return c.NewErrf("division by zero")
+			}
+			return intOp(c, (*big.Int).Quo, c.num(left, op), y)
+		}
+
+	case IntRemainderOp:
+		if leftKind&IntKind != 0 && rightKind&IntKind != 0 {
+			y := c.num(right, op)
+			if y.X.IsZero() {
+				return c.NewErrf("division by zero")
+			}
+			return intOp(c, (*big.Int).Rem, c.num(left, op), y)
+		}
+	}
+
+	return c.NewErrf("invalid operands %s and %s to '%s' (type %s and %s)",
+		c.Str(left), c.Str(right), op, left.Kind(), right.Kind())
+}
+
+func cmpTonode(c *OpContext, op Op, r int) Value {
+	result := false
+	switch op {
+	case LessThanOp:
+		result = r == -1
+	case LessEqualOp:
+		result = r != 1
+	case EqualOp, AndOp:
+		result = r == 0
+	case NotEqualOp:
+		result = r != 0
+	case GreaterEqualOp:
+		result = r != -1
+	case GreaterThanOp:
+		result = r == 1
+	}
+	return c.newBool(result)
+}
+
+type numFunc func(z, x, y *apd.Decimal) (apd.Condition, error)
+
+func numOp(c *OpContext, fn numFunc, a, b Value, op Op) Value {
+	var d apd.Decimal
+	x := c.num(a, op)
+	y := c.num(b, op)
+	cond, err := fn(&d, &x.X, &y.X)
+	if err != nil {
+		return c.NewErrf("failed arithmetic: %v", err)
+	}
+	if cond.DivisionByZero() {
+		return c.NewErrf("division by zero")
+	}
+	k := x.Kind() & y.Kind()
+	if k == 0 {
+		k = FloatKind
+	}
+	return c.newNum(&d, k)
+}
+
+type intFunc func(z, x, y *big.Int) *big.Int
+
+func intOp(c *OpContext, fn intFunc, a, b *Num) Value {
+	var d apd.Decimal
+
+	var x, y apd.Decimal
+	_, _ = apdCtx.RoundToIntegralValue(&x, &a.X)
+	if x.Negative {
+		x.Coeff.Neg(&x.Coeff)
+	}
+	_, _ = apdCtx.RoundToIntegralValue(&y, &b.X)
+	if y.Negative {
+		y.Coeff.Neg(&y.Coeff)
+	}
+	fn(&d.Coeff, &x.Coeff, &y.Coeff)
+	if d.Coeff.Sign() < 0 {
+		d.Coeff.Neg(&d.Coeff)
+		d.Negative = true
+	}
+	return c.newNum(&d, IntKind)
+}
diff --git a/internal/core/adt/composite.go b/internal/core/adt/composite.go
index bd5f99b..3246c5d 100644
--- a/internal/core/adt/composite.go
+++ b/internal/core/adt/composite.go
@@ -15,6 +15,8 @@
 package adt
 
 import (
+	"fmt"
+
 	"cuelang.org/go/cue/ast"
 	"cuelang.org/go/cue/errors"
 	"cuelang.org/go/cue/token"
@@ -26,6 +28,30 @@
 type Environment struct {
 	Up     *Environment
 	Vertex *Vertex
+
+	// DynamicLabel is only set when instantiating a field from a pattern
+	// constraint. It is used to resolve label references.
+	DynamicLabel Feature
+
+	// CloseID is a unique number that tracks a group of conjuncts that need
+	// belong to a single originating definition.
+	CloseID uint32
+
+	cache map[Expr]Value
+}
+
+// evalCached is used to look up let expressions. Caching let expressions
+// prevents a possible combinatorial explosion.
+func (e *Environment) evalCached(c *OpContext, x Expr) Value {
+	v, ok := e.cache[x]
+	if !ok {
+		if e.cache == nil {
+			e.cache = map[Expr]Value{}
+		}
+		v = c.eval(x)
+		e.cache[x] = v
+	}
+	return v
 }
 
 // A Vertex is a node in the value tree. It may be a leaf or internal node.
@@ -37,27 +63,171 @@
 // It maintains source information such as a list of conjuncts that contributed
 // to the value.
 type Vertex struct {
-	Parent *Vertex // Do we need this?
+	// Parent links to a parent Vertex. This parent should only be used to
+	// access the parent's Label field to find the relative location within a
+	// tree.
+	Parent *Vertex
 
 	// Label is the feature leading to this vertex.
 	Label Feature
 
+	// status indicates the evaluation progress of this vertex.
+	status VertexStatus
+
 	// Value is the value associated with this vertex. For lists and structs
 	// this is a sentinel value indicating its kind.
 	Value Value
 
+	// ChildErrors is the collection of all errors of children.
+	ChildErrors *Bottom
+
 	// The parent of nodes can be followed to determine the path within the
 	// configuration of this node.
 	// Value  Value
 	Arcs []*Vertex // arcs are sorted in display order.
 
 	// Conjuncts lists the structs that ultimately formed this Composite value.
-	// This includes all selected disjuncts. This information is used to compute
-	// the topological sort of arcs.
+	// This includes all selected disjuncts.
+	//
+	// This value may be nil, in which case the Arcs are considered to define
+	// the final value of this Vertex.
 	Conjuncts []Conjunct
 
 	// Structs is a slice of struct literals that contributed to this value.
+	// This information is used to compute the topological sort of arcs.
 	Structs []*StructLit
+
+	// Closed contains information about how to interpret field labels for the
+	// various conjuncts with respect to which fields are allowed in this
+	// Vertex. If allows all fields if it is nil.
+	// The evaluator will first check existing fields before using this. So for
+	// simple cases, an Acceptor can always return false to close the Vertex.
+	Closed Acceptor
+}
+
+// VertexStatus indicates the evaluation progress of a Vertex.
+type VertexStatus int8
+
+const (
+	// Unprocessed indicates a Vertex has not been processed before.
+	// Value must be nil.
+	Unprocessed VertexStatus = iota
+
+	// Evaluating means that the current Vertex is being evaluated. If this is
+	// encountered it indicates a reference cycle. Value must be nil.
+	Evaluating
+
+	// Partial indicates that the result was only partially evaluated. It will
+	// need to be fully evaluated to get a complete results.
+	//
+	// TODO: this currently requires a renewed computation. Cache the
+	// nodeContext to allow reusing the computations done so far.
+	Partial
+
+	// EvaluatingArcs indicates that the arcs of the Vertex are currently being
+	// evaluated. If this is encountered it indicates a structural cycle.
+	// Value does not have to be nil
+	EvaluatingArcs
+
+	// Finalized means that this node is fully evaluated and that the results
+	// are save to use without further consideration.
+	Finalized
+)
+
+func (v *Vertex) Status() VertexStatus {
+	return v.status
+}
+
+func (v *Vertex) UpdateStatus(s VertexStatus) {
+	if v.status > s+1 {
+		panic(fmt.Sprintf("attempt to regress status from %d to %d", v.Status(), s))
+	}
+	if s == Finalized && v.Value == nil {
+		// panic("not finalized")
+	}
+	v.status = s
+}
+
+func (v *Vertex) IsErr() bool {
+	// if v.Status() > Evaluating {
+	if _, ok := v.Value.(*Bottom); ok {
+		return true
+	}
+	// }
+	return false
+}
+
+func (v *Vertex) Err(c *OpContext, state VertexStatus) *Bottom {
+	c.Unify(c, v, state)
+	if b, ok := v.Value.(*Bottom); ok {
+		return b
+	}
+	return nil
+}
+
+// func (v *Vertex) Evaluate()
+
+func (v *Vertex) Finalize(c *OpContext) {
+	if c == nil {
+		fmt.Println("WOT?")
+	}
+	c.Unify(c, v, Finalized)
+}
+
+func (v *Vertex) AddErr(ctx *OpContext, b *Bottom) {
+	v.Value = CombineErrors(nil, v.Value, b)
+	v.UpdateStatus(Finalized)
+}
+
+func (v *Vertex) SetValue(ctx *OpContext, state VertexStatus, value Value) *Bottom {
+	v.Value = value
+	v.UpdateStatus(state)
+	return nil
+}
+
+// ToVertex wraps v in a new Vertex, if necessary.
+func ToVertex(v Value) *Vertex {
+	switch x := v.(type) {
+	case *Vertex:
+		return x
+	default:
+		n := &Vertex{
+			status: Finalized,
+			Value:  x,
+		}
+		n.AddConjunct(MakeConjunct(nil, v))
+		return n
+	}
+}
+
+// Unwrap returns the possibly non-concrete scalar value of v or nil if v is
+// a list, struct or of undefined type.
+func Unwrap(v Value) Value {
+	x, ok := v.(*Vertex)
+	if !ok {
+		return v
+	}
+	switch x.Value.(type) {
+	case *StructMarker, *ListMarker:
+		return v
+	default:
+		return x.Value
+	}
+}
+
+// Acceptor is a single interface that reports whether feature f is a valid
+// field label for this vertex.
+//
+// TODO: combine this with the StructMarker functionality?
+type Acceptor interface {
+	// Accept reports whether a given field is accepted as output.
+	// Pass an InvalidLabel to determine whether this is always open.
+	Accept(ctx *OpContext, f Feature) bool
+
+	// MatchAndInsert finds the conjuncts for optional fields, pattern
+	// constraints, and additional constraints that match f and inserts them in
+	// arc. Use f is 0 to match all additional constraints only.
+	MatchAndInsert(c *OpContext, arc *Vertex)
 }
 
 func (v *Vertex) Kind() Kind {
@@ -69,13 +239,57 @@
 	return v.Value.Kind()
 }
 
+func (v *Vertex) IsClosed(ctx *OpContext) bool {
+	switch x := v.Value.(type) {
+	case *ListMarker:
+		// TODO: use one mechanism.
+		if x.IsOpen {
+			return false
+		}
+		if v.Closed == nil {
+			return true
+		}
+		return !v.Closed.Accept(ctx, InvalidLabel)
+
+	case *StructMarker:
+		if x.NeedClose {
+			return true
+		}
+		if v.Closed == nil {
+			return false
+		}
+		return !v.Closed.Accept(ctx, InvalidLabel)
+	}
+	return false
+}
+
+func (v *Vertex) Accept(ctx *OpContext, f Feature) bool {
+	if !v.IsClosed(ctx) || v.Lookup(f) != nil {
+		return true
+	}
+	if v.Closed != nil {
+		return v.Closed.Accept(ctx, f)
+	}
+	return false
+}
+
+func (v *Vertex) MatchAndInsert(ctx *OpContext, arc *Vertex) {
+	if v.Closed == nil {
+		return
+	}
+	if !v.Accept(ctx, arc.Label) {
+		return
+	}
+	v.Closed.MatchAndInsert(ctx, arc)
+}
+
 func (v *Vertex) IsList() bool {
 	_, ok := v.Value.(*ListMarker)
 	return ok
 }
 
-// lookup returns the Arc with label f if it exists or nil otherwise.
-func (v *Vertex) lookup(f Feature) *Vertex {
+// Lookup returns the Arc with label f if it exists or nil otherwise.
+func (v *Vertex) Lookup(f Feature) *Vertex {
 	for _, a := range v.Arcs {
 		if a.Label == f {
 			return a
@@ -84,10 +298,22 @@
 	return nil
 }
 
+// Elems returns the regular elements of a list.
+func (v *Vertex) Elems() []*Vertex {
+	// TODO: add bookkeeping for where list arcs start and end.
+	a := make([]*Vertex, 0, len(v.Arcs))
+	for _, x := range v.Arcs {
+		if x.Label.IsInt() {
+			a = append(a, x)
+		}
+	}
+	return a
+}
+
 // GetArc returns a Vertex for the outgoing arc with label f. It creates and
 // ads one if it doesn't yet exist.
 func (v *Vertex) GetArc(f Feature) (arc *Vertex, isNew bool) {
-	arc = v.lookup(f)
+	arc = v.Lookup(f)
 	if arc == nil {
 		arc = &Vertex{Parent: v, Label: f}
 		v.Arcs = append(v.Arcs, arc)
@@ -100,19 +326,32 @@
 
 // AddConjunct adds the given Conjuncts to v if it doesn't already exist.
 func (v *Vertex) AddConjunct(c Conjunct) *Bottom {
+	if v.Value != nil {
+		// This is likely a bug in the evaluator and should not happen.
+		return &Bottom{Err: errors.Newf(token.NoPos, "cannot add conjunct")}
+	}
 	for _, x := range v.Conjuncts {
 		if x == c {
 			return nil
 		}
 	}
-	if v.Value != nil {
-		// This is likely a bug in the evaluator and should not happen.
-		return &Bottom{Err: errors.Newf(token.NoPos, "cannot add conjunct")}
-	}
+
 	v.Conjuncts = append(v.Conjuncts, c)
 	return nil
 }
 
+func (v *Vertex) AddStructs(a ...*StructLit) {
+outer:
+	for _, s := range a {
+		for _, t := range v.Structs {
+			if t == s {
+				continue outer
+			}
+		}
+		v.Structs = append(v.Structs, s)
+	}
+}
+
 // Path computes the sequence of Features leading from the root to of the
 // instance to this Vertex.
 func (v *Vertex) Path() []Feature {
@@ -127,6 +366,23 @@
 	return append(a, v.Label)
 }
 
+func (v *Vertex) appendListArcs(arcs []*Vertex) (err *Bottom) {
+	for _, a := range arcs {
+		// TODO(list): BUG this only works if lists do not have definitions
+		// fields.
+		label, err := MakeLabel(a.Source(), int64(len(v.Arcs)), IntLabel)
+		if err != nil {
+			return &Bottom{Src: a.Source(), Err: err}
+		}
+		v.Arcs = append(v.Arcs, &Vertex{
+			Parent:    v,
+			Label:     label,
+			Conjuncts: a.Conjuncts,
+		})
+	}
+	return nil
+}
+
 // An Conjunct is an Environment-Expr pair. The Environment is the starting
 // point for reference lookup for any reference contained in X.
 type Conjunct struct {
@@ -139,10 +395,14 @@
 // MakeConjunct creates a conjunct from the given environment and node.
 // It panics if x cannot be used as an expression.
 func MakeConjunct(env *Environment, x Node) Conjunct {
+	if env == nil {
+		// TODO: better is to pass one.
+		env = &Environment{}
+	}
 	switch x.(type) {
 	case Expr, interface{ expr() Expr }:
 	default:
-		panic("invalid Node type")
+		panic(fmt.Sprintf("invalid Node type %T", x))
 	}
 	return Conjunct{env, x}
 }
diff --git a/internal/core/adt/context.go b/internal/core/adt/context.go
new file mode 100644
index 0000000..8f9e07e
--- /dev/null
+++ b/internal/core/adt/context.go
@@ -0,0 +1,832 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+	"fmt"
+	"regexp"
+
+	"github.com/cockroachdb/apd/v2"
+	"golang.org/x/text/runes"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/format"
+	"cuelang.org/go/cue/token"
+)
+
+// A Unifier implements a strategy for CUE's unification operation. It must
+// handle the following aspects of CUE evaluation:
+//
+//    - Structural and reference cycles
+//    - Non-monotic validation
+//    - Fixed-point computation of comprehension
+//
+type Unifier interface {
+	// Unify fully unifies all values of a Vertex to completion and stores
+	// the result in the Vertex. If Unify was called on v before it returns
+	// the cached results.
+	Unify(c *OpContext, v *Vertex, state VertexStatus) // error or bool?
+
+	// Evaluate returns the evaluated value associated with v. It may return a
+	// partial result. That is, if v was not yet unified, it may return a
+	// concrete value that must be the result assuming the configuration has no
+	// errors.
+	//
+	// This semantics allows CUE to break reference cycles in a straightforward
+	// manner.
+	//
+	// Vertex v must still be evaluated at some point to catch the underlying
+	// error.
+	//
+	Evaluate(c *OpContext, v *Vertex) Value
+}
+
+// Runtime defines an interface for low-level representation conversion and
+// lookup.
+type Runtime interface {
+	// StringIndexer allows for converting string labels to and from a
+	// canonical numeric representation.
+	StringIndexer
+}
+
+type Config struct {
+	Runtime
+	Unifier
+
+	Format func(Node) string
+}
+
+type config = Config
+
+// New creates an operation context.
+func New(v *Vertex, cfg *Config) *OpContext {
+	if cfg.Runtime == nil {
+		panic("nil Runtime")
+	}
+	if cfg.Unifier == nil {
+		panic("nil Unifier")
+	}
+	ctx := &OpContext{
+		config: *cfg,
+	}
+	if v != nil {
+		ctx.e = &Environment{Up: nil, Vertex: v}
+	}
+	return ctx
+}
+
+// An OpContext associates a Runtime and Unifier to allow evaluating the types
+// defined in this package. It tracks errors provides convenience methods for
+// evaluating values.
+type OpContext struct {
+	config
+
+	e    *Environment
+	src  ast.Node
+	errs *Bottom
+
+	// TODO: remove use of tentative. Should be possible if incomplete
+	// handling is done better.
+	tentative int // set during comprehension evaluation
+}
+
+// If IsTentative is set, evaluation of an arc should not finalize
+// to non-concrete values.
+func (c *OpContext) IsTentative() bool {
+	return c.tentative > 0
+}
+
+func (c *OpContext) Pos() token.Pos {
+	if c.src == nil {
+		return token.NoPos
+	}
+	return c.src.Pos()
+}
+
+func (c *OpContext) Source() ast.Node {
+	return c.src
+}
+
+// NewContext creates an operation context.
+func NewContext(r Runtime, u Unifier, v *Vertex) *OpContext {
+	return New(v, &Config{Runtime: r, Unifier: u})
+}
+
+func (c *OpContext) pos() token.Pos {
+	if c.src == nil {
+		return token.NoPos
+	}
+	return c.src.Pos()
+}
+
+func (c *OpContext) spawn(node *Vertex) *OpContext {
+	sub := *c
+	node.Parent = c.e.Vertex
+	sub.e = &Environment{Up: c.e, Vertex: node}
+	if c.e != nil {
+		sub.e.CloseID = c.e.CloseID
+	}
+	return &sub
+}
+
+func (c *OpContext) Env(upCount int32) *Environment {
+	e := c.e
+	for ; upCount > 0; upCount-- {
+		e = e.Up
+	}
+	return e
+}
+
+func (c *OpContext) relNode(upCount int32) *Vertex {
+	e := c.e
+	for ; upCount > 0; upCount-- {
+		e = e.Up
+	}
+	return e.Vertex
+}
+
+func (c *OpContext) relLabel(upCount int32) Feature {
+	// locate current label.
+	e := c.e
+	for ; upCount > 0; upCount-- {
+		e = e.Up
+	}
+	return e.DynamicLabel
+}
+
+func (c *OpContext) concreteIsPossible(x Expr) bool {
+	if v, ok := x.(Value); ok {
+		if v.Concreteness() > Concrete {
+			c.AddErrf("value can never become concrete")
+			return false
+		}
+	}
+	return true
+}
+
+// HasErr reports whether any error was reported, including whether value
+// was incomplete.
+func (c *OpContext) HasErr() bool {
+	return c.errs != nil
+}
+
+func (c *OpContext) Err() *Bottom {
+	b := c.errs
+	c.errs = nil
+	return b
+}
+
+func (c *OpContext) addErrf(code ErrorCode, pos token.Pos, msg string, args ...interface{}) {
+	for i, a := range args {
+		switch x := a.(type) {
+		case Node:
+			args[i] = c.Str(x)
+		case ast.Node:
+			b, _ := format.Node(x)
+			args[i] = string(b)
+		case Feature:
+			args[i] = x.SelectorString(c.Runtime)
+		}
+	}
+
+	err := errors.Newf(pos, msg, args...)
+	c.addErr(code, err)
+}
+
+func (c *OpContext) addErr(code ErrorCode, err errors.Error) {
+	c.errs = CombineErrors(c.src, c.errs, &Bottom{Code: code, Err: err})
+}
+
+// AddBottom records an error in OpContext.
+func (c *OpContext) AddBottom(b *Bottom) {
+	c.errs = CombineErrors(c.src, c.errs, b)
+}
+
+// AddErr records an error in OpContext. It returns errors collected so far.
+func (c *OpContext) AddErr(err errors.Error) *Bottom {
+	if err != nil {
+		c.errs = CombineErrors(c.src, c.errs, &Bottom{Err: err})
+	}
+	return c.errs
+}
+
+// NewErrf creates a *Bottom value and returns it. The returned uses the
+// current source as the point of origin of the error.
+func (c *OpContext) NewErrf(format string, args ...interface{}) *Bottom {
+	err := errors.Newf(c.pos(), format, args...)
+	return &Bottom{Src: c.src, Err: err, Code: EvalError}
+}
+
+// AddErrf records an error in OpContext. It returns errors collected so far.
+func (c *OpContext) AddErrf(format string, args ...interface{}) *Bottom {
+	return c.AddErr(errors.Newf(c.pos(), format, args...))
+}
+
+func (c *OpContext) validate(v Value) *Bottom {
+	switch x := v.(type) {
+	case *Bottom:
+		return x
+	case *Vertex:
+		v := c.Unifier.Evaluate(c, x)
+		if b, ok := v.(*Bottom); ok {
+			return b
+		}
+	}
+	return nil
+}
+
+type frame struct {
+	env *Environment
+	err *Bottom
+	src ast.Node
+}
+
+func (c *OpContext) PushState(env *Environment, src ast.Node) (saved frame) {
+	saved.env = c.e
+	saved.err = c.errs
+	saved.src = c.src
+
+	c.errs = nil
+	if src != nil {
+		c.src = src
+	}
+	if env != nil {
+		c.e = env
+	}
+
+	return saved
+}
+
+func (c *OpContext) PopState(s frame) *Bottom {
+	err := c.errs
+	c.e = s.env
+	c.errs = s.err
+	c.src = s.src
+	return err
+}
+
+// Resolve finds a node in the tree.
+//
+// Should only be used to insert Conjuncts. TODO: perhaps only return Conjuncts
+// and error.
+func (c *OpContext) Resolve(env *Environment, r Resolver) (*Vertex, *Bottom) {
+	s := c.PushState(env, r.Source())
+
+	arc := r.resolve(c)
+	// TODO: check for cycle errors?
+
+	err := c.PopState(s)
+	if err != nil {
+		return nil, err
+	}
+
+	return arc, err
+}
+
+// Validate calls validates value for the given validator.
+func (c *OpContext) Validate(check Validator, value Value) *Bottom {
+	return check.validate(c, value)
+}
+
+// Yield evaluates a Yielder and calls f for each result.
+func (c *OpContext) Yield(env *Environment, y Yielder, f YieldFunc) *Bottom {
+	s := c.PushState(env, y.Source())
+
+	c.tentative++
+
+	y.yield(c, f)
+
+	c.tentative--
+
+	return c.PopState(s)
+
+}
+
+// Concrete returns the concrete value of x after evaluating it.
+// msg is used to mention the context in which an error occurred, if any.
+func (c *OpContext) Concrete(env *Environment, x Expr, msg interface{}) (result Value, complete bool) {
+
+	v, complete := c.Evaluate(env, x)
+
+	v, ok := c.getDefault(v)
+	if !ok {
+		return v, false
+	}
+
+	if !IsConcrete(v) {
+		complete = false
+		b := c.NewErrf("non-concrete value %v in operand to %s", c.Str(v), msg)
+		b.Code = IncompleteError
+		v = b
+	}
+
+	if !complete {
+		return v, complete
+	}
+
+	return v, true
+}
+
+func (c *OpContext) getDefault(v Value) (result Value, ok bool) {
+	var d *Disjunction
+	switch x := v.(type) {
+	default:
+		return v, true
+
+	case *Vertex:
+		switch t := x.Value.(type) {
+		case *Disjunction:
+			d = t
+
+		case *StructMarker, *ListMarker:
+			return v, true
+
+		default:
+			return t, true
+		}
+
+	case *Disjunction:
+		d = x
+	}
+
+	if d.NumDefaults != 1 {
+		c.addErrf(IncompleteError, c.pos(),
+			"unresolved disjunction %s (type %s)", c.Str(d), d.Kind())
+		return nil, false
+	}
+	return c.getDefault(d.Values[0])
+}
+
+// Evaluate evaluates an expression within the given environment and indicates
+// whether the result is complete. It will always return a non-nil result.
+func (c *OpContext) Evaluate(env *Environment, x Expr) (result Value, complete bool) {
+	s := c.PushState(env, x.Source())
+
+	val := c.eval(x)
+
+	complete = true
+
+	if err, _ := val.(*Bottom); err != nil && err.IsIncomplete() {
+		complete = false
+	}
+	if val == nil {
+		complete = false
+		// TODO ENSURE THIS DOESN"T HAPPEN>
+		val = &Bottom{
+			Code: IncompleteError,
+			Err:  errors.Newf(token.NoPos, "UNANTICIPATED ERROR"),
+		}
+
+	}
+
+	_ = c.PopState(s)
+
+	if !complete || val == nil {
+		return val, false
+	}
+
+	return val, true
+}
+
+// value evaluates expression v within the current environment. The result may
+// be nil if the result is incomplete. value leaves errors untouched to that
+// they can be collected by the caller.
+func (c *OpContext) value(x Expr) (result Value) {
+	v := c.evalState(x, Partial)
+
+	v, _ = c.getDefault(v)
+	return v
+}
+
+func (c *OpContext) eval(v Expr) (result Value) {
+	return c.evalState(v, Partial)
+}
+
+func (c *OpContext) evalState(v Expr, state VertexStatus) (result Value) {
+	savedSrc := c.src
+	c.src = v.Source()
+	err := c.errs
+	c.errs = nil
+
+	defer func() {
+		c.errs = CombineErrors(c.src, c.errs, err)
+		c.errs = CombineErrors(c.src, c.errs, result)
+		if c.errs != nil {
+			result = c.errs
+		}
+		c.src = savedSrc
+	}()
+
+	switch x := v.(type) {
+	case Value:
+		return x
+
+	case Evaluator:
+		v := x.evaluate(c)
+		return v
+
+	case Resolver:
+		arc := x.resolve(c)
+		if c.HasErr() {
+			return nil
+		}
+		if isIncomplete(arc) {
+			if arc != nil {
+				return arc.Value
+			}
+			return nil
+		}
+
+		v := c.Unifier.Evaluate(c, arc)
+		return v
+
+	default:
+		// return nil
+		c.AddErrf("unexpected Expr type %T", v)
+	}
+	return nil
+}
+
+func (c *OpContext) lookup(x *Vertex, pos token.Pos, l Feature) *Vertex {
+	if l == InvalidLabel || x == nil {
+		// TODO: is it possible to have an invalid label here? Maybe through the
+		// API?
+		return &Vertex{}
+	}
+
+	var kind Kind
+	if x.Value != nil {
+		kind = x.Value.Kind()
+	}
+
+	switch kind {
+	case StructKind:
+		if l.Typ() == IntLabel {
+			c.addErrf(0, pos, "invalid struct selector %s (type int)", l)
+		}
+
+	case ListKind:
+		switch {
+		case l.Typ() == IntLabel:
+			switch {
+			case l.Index() < 0:
+				c.addErrf(0, pos, "invalid list index %s (index must be non-negative)", l)
+				return nil
+			case l.Index() > len(x.Arcs):
+				c.addErrf(0, pos, "invalid list index %s (out of bounds)", l)
+				return nil
+			}
+
+		case l.IsDef():
+
+		default:
+			c.addErrf(0, pos, "invalid list index %s (type string)", l)
+			return nil
+		}
+
+	default:
+		// TODO: ?
+		// if !l.IsDef() {
+		// 	c.addErrf(0, nil, "invalid selector %s (must be definition for non-structs)", l)
+		// }
+	}
+
+	a := x.Lookup(l)
+	if a == nil {
+		code := IncompleteError
+		if !x.Accept(c, l) {
+			code = 0
+		}
+		// TODO: if the struct was a literal struct, we can also treat it as
+		// closed and make this a permanent error.
+		c.addErrf(code, pos, "undefined field %s", l.SelectorString(c.Runtime))
+	}
+	return a
+}
+
+func (c *OpContext) Label(x Value) Feature {
+	return labelFromValue(c, x)
+}
+
+func (c *OpContext) typeError(v Value, k Kind) {
+	if isError(v) {
+		return
+	}
+	if !IsConcrete(v) && v.Kind()&k != 0 {
+		c.addErrf(IncompleteError, pos(v),
+			"incomplete %s value '%s'", k, c.Str(v))
+	} else {
+		c.AddErrf("cannot use %s (type %s) as type %s", c.Str(v), v.Kind(), k)
+	}
+}
+
+func (c *OpContext) typeErrorAs(v Value, k Kind, as interface{}) {
+	if as == nil {
+		c.typeError(v, k)
+		return
+	}
+	if isError(v) {
+		return
+	}
+	if !IsConcrete(v) && v.Kind()&k != 0 {
+		c.addErrf(IncompleteError, pos(v),
+			"incomplete %s value '%s' in as", k, c.Str(v), as)
+	} else {
+		c.AddErrf("cannot use %s (type %s) as type %s in %v",
+			c.Str(v), v.Kind(), k, as)
+	}
+}
+
+var emptyNode = &Vertex{}
+
+func pos(x Node) token.Pos {
+	if x.Source() == nil {
+		return token.NoPos
+	}
+	return x.Source().Pos()
+}
+
+func (c *OpContext) node(x Expr, state VertexStatus) *Vertex {
+	v := c.evalState(x, state)
+
+	node, ok := v.(*Vertex)
+	if !ok {
+		if isError(v) {
+			if v == nil {
+				c.addErrf(IncompleteError, pos(x), "incomplete value %s", c.Str(x))
+			}
+			return emptyNode
+		}
+		if v.Kind()&StructKind != 0 {
+			c.addErrf(IncompleteError, pos(x),
+				"incomplete feed source value %s (type %s)",
+				x.Source(), v.Kind())
+		} else {
+			c.addErrf(0, pos(x),
+				"invalid operand %s (found %s, want list or struct)",
+				x.Source(), v.Kind())
+
+		}
+		return emptyNode
+	}
+	return node.Default()
+}
+
+// Elems returns the elements of a list.
+func (c *OpContext) Elems(v Value) []*Vertex {
+	list := c.list(v)
+	return list.Elems()
+}
+
+func (c *OpContext) list(v Value) *Vertex {
+	x, ok := v.(*Vertex)
+	if !ok || !x.IsList() {
+		c.typeError(v, ListKind)
+		return emptyNode
+	}
+	return x
+}
+
+func (c *OpContext) scalar(v Value) Value {
+	v = Unwrap(v)
+	switch v.(type) {
+	case *Null, *Bool, *Num, *String, *Bytes:
+	default:
+		c.typeError(v, ScalarKinds)
+	}
+	return v
+}
+
+var zero = &Num{K: NumKind}
+
+func (c *OpContext) num(v Value, as interface{}) *Num {
+	v = Unwrap(v)
+	if isError(v) {
+		return zero
+	}
+	x, ok := v.(*Num)
+	if !ok {
+		c.typeErrorAs(v, NumKind, as)
+		return zero
+	}
+	return x
+}
+
+func (c *OpContext) Int64(v Value) int64 {
+	v = Unwrap(v)
+	if isError(v) {
+		return 0
+	}
+	x, ok := v.(*Num)
+	if !ok {
+		c.typeError(v, IntKind)
+		return 0
+	}
+	i, err := x.X.Int64()
+	if err != nil {
+		c.AddErrf("number is not an int64: %v", err)
+		return 0
+	}
+	return i
+}
+
+func (c *OpContext) uint64(v Value, as string) uint64 {
+	v = Unwrap(v)
+	if isError(v) {
+		return 0
+	}
+	x, ok := v.(*Num)
+	if !ok {
+		c.typeErrorAs(v, IntKind, as)
+		return 0
+	}
+	if x.X.Negative {
+		// TODO: improve message
+		c.AddErrf("cannot convert negative number to uint64")
+		return 0
+	}
+	if !x.X.Coeff.IsUint64() {
+		// TODO: improve message
+		c.AddErrf("cannot convert number %s to uint64", x.X)
+		return 0
+	}
+	return x.X.Coeff.Uint64()
+}
+
+func (c *OpContext) BoolValue(v Value) bool {
+	return c.boolValue(v, nil)
+}
+
+func (c *OpContext) boolValue(v Value, as interface{}) bool {
+	v = Unwrap(v)
+	if isError(v) {
+		return false
+	}
+	x, ok := v.(*Bool)
+	if !ok {
+		c.typeErrorAs(v, BoolKind, as)
+		return false
+	}
+	return x.B
+}
+
+func (c *OpContext) StringValue(v Value) string {
+	return c.stringValue(v, nil)
+}
+
+func (c *OpContext) stringValue(v Value, as interface{}) string {
+	v = Unwrap(v)
+	if isError(v) {
+		return ""
+	}
+	switch x := v.(type) {
+	case *String:
+		return x.Str
+
+	case *Bytes:
+		return string(runes.ReplaceIllFormed().Bytes(x.B))
+
+	case *Num:
+		return x.X.String()
+
+	default:
+		if as == nil {
+			c.typeError(v, StringKind)
+		} else {
+			c.typeErrorAs(v, StringKind, as)
+		}
+	}
+	return ""
+}
+
+func (c *OpContext) bytesValue(v Value, as interface{}) []byte {
+	v = Unwrap(v)
+	if isError(v) {
+		return nil
+	}
+	x, ok := v.(*Bytes)
+	if !ok {
+		c.typeErrorAs(v, BytesKind, as)
+		return nil
+	}
+	return x.B
+}
+
+var matchNone = regexp.MustCompile("^$")
+
+func (c *OpContext) regexp(v Value) *regexp.Regexp {
+	v = Unwrap(v)
+	if isError(v) {
+		return matchNone
+	}
+	switch x := v.(type) {
+	case *String:
+		if x.RE != nil {
+			return x.RE
+		}
+		// TODO: synchronization
+		p, err := regexp.Compile(x.Str)
+		if err != nil {
+			// FatalError? How to cache error
+			c.AddErrf("invalid regexp: %s", err)
+			x.RE = matchNone
+		} else {
+			x.RE = p
+		}
+		return x.RE
+
+	case *Bytes:
+		if x.RE != nil {
+			return x.RE
+		}
+		// TODO: synchronization
+		p, err := regexp.Compile(string(x.B))
+		if err != nil {
+			c.AddErrf("invalid regexp: %s", err)
+			x.RE = matchNone
+		} else {
+			x.RE = p
+		}
+		return x.RE
+
+	default:
+		c.typeError(v, StringKind|BytesKind)
+		return matchNone
+	}
+}
+
+func (c *OpContext) newNum(d *apd.Decimal, k Kind, sources ...Node) Value {
+	if c.HasErr() {
+		return c.Err()
+	}
+	return &Num{Src: c.src, X: *d, K: k}
+}
+
+func (c *OpContext) NewInt64(n int64, sources ...Node) Value {
+	if c.HasErr() {
+		return c.Err()
+	}
+	d := apd.New(n, 0)
+	return &Num{Src: c.src, X: *d, K: IntKind}
+}
+
+func (c *OpContext) NewString(s string) Value {
+	if c.HasErr() {
+		return c.Err()
+	}
+	return &String{Src: c.src, Str: s}
+}
+
+func (c *OpContext) newBytes(b []byte) Value {
+	if c.HasErr() {
+		return c.Err()
+	}
+	return &Bytes{Src: c.src, B: b}
+}
+
+func (c *OpContext) newBool(b bool) Value {
+	if c.HasErr() {
+		return c.Err()
+	}
+	return &Bool{Src: c.src, B: b}
+}
+
+func (c *OpContext) newList(src ast.Node, parent *Vertex) *Vertex {
+	return &Vertex{Parent: parent, Value: &ListMarker{}}
+}
+
+// Str reports a debug string of x.
+func (c *OpContext) Str(x Node) string {
+	if c.Format == nil {
+		return fmt.Sprintf("%T", x)
+	}
+	return c.Format(x)
+}
+
+// NewList returns a new list for the given values.
+func (c *OpContext) NewList(values ...Value) *Vertex {
+	// TODO: consider making this a literal list instead.
+	list := &ListLit{}
+	v := &Vertex{
+		Conjuncts: []Conjunct{{Env: nil, x: list}},
+	}
+
+	for _, x := range values {
+		list.Elems = append(list.Elems, x)
+	}
+	c.Unify(c, v, Finalized)
+	return v
+}
diff --git a/internal/core/adt/default.go b/internal/core/adt/default.go
new file mode 100644
index 0000000..ae7ca8e
--- /dev/null
+++ b/internal/core/adt/default.go
@@ -0,0 +1,113 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// Default returns the default value or itself if there is no default.
+func Default(v Value) Value {
+	switch x := v.(type) {
+	case *Vertex:
+		return x.Default()
+	case *Disjunction:
+		return x.Default()
+	default:
+		return v
+	}
+}
+
+func (d *Disjunction) Default() Value {
+	switch d.NumDefaults {
+	case 0:
+		return d
+	case 1:
+		return d.Values[0]
+	default:
+		return &Disjunction{
+			Src:         d.Src,
+			Values:      d.Values[:d.NumDefaults],
+			NumDefaults: 0,
+		}
+	}
+}
+
+// Default returns the default value or itself if there is no default.
+func (v *Vertex) Default() *Vertex {
+	d, ok := v.Value.(*Disjunction)
+	if !ok {
+		return v
+	}
+
+	var w *Vertex
+
+	switch d.NumDefaults {
+	case 0:
+		return v
+	case 1:
+		w = d.Values[0]
+	default:
+		x := *v
+		x.Value = &Disjunction{
+			Src:         d.Src,
+			Values:      d.Values[:d.NumDefaults],
+			NumDefaults: 0,
+		}
+		w = &x
+	}
+
+	w.Conjuncts = nil
+	for _, c := range v.Conjuncts {
+		// TODO: preserve field information.
+		expr, _ := stripNonDefaults(c.Expr())
+		w.AddConjunct(MakeConjunct(c.Env, expr))
+	}
+	return w
+}
+
+// TODO: this should go: record preexpanded disjunctions in Vertex.
+func stripNonDefaults(expr Expr) (r Expr, stripped bool) {
+	switch x := expr.(type) {
+	case *DisjunctionExpr:
+		if !x.HasDefaults {
+			return x, false
+		}
+		d := *x
+		d.Values = []Disjunct{}
+		for _, v := range x.Values {
+			if v.Default {
+				d.Values = append(d.Values, v)
+			}
+		}
+		if len(d.Values) == 1 {
+			return d.Values[0].Val, true
+		}
+		return &d, true
+
+	case *BinaryExpr:
+		if x.Op != AndOp {
+			return x, false
+		}
+		a, sa := stripNonDefaults(x.X)
+		b, sb := stripNonDefaults(x.Y)
+		if sa || sb {
+			bin := *x
+			bin.X = a
+			bin.Y = b
+			return &bin, true
+		}
+		return x, false
+
+	default:
+		return x, false
+	}
+}
diff --git a/internal/core/adt/errors.go b/internal/core/adt/errors.go
new file mode 100644
index 0000000..58ddac5
--- /dev/null
+++ b/internal/core/adt/errors.go
@@ -0,0 +1,193 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+// This file contains error encodings.
+//
+//
+// *Bottom:
+//    - an adt.Value
+//    - always belongs to a single vertex.
+//    - does NOT implement error
+//    - marks error code used for control flow
+//
+// errors.Error
+//    - CUE default error
+//    - implements error
+//    - tracks error locations
+//    - has error message details
+//    - supports multiple errors
+//
+
+import (
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/errors"
+)
+
+// ErrorCode indicates the type of error. The type of error may influence
+// control flow. No other aspects of an error may influence control flow.
+type ErrorCode int
+
+const (
+	// An EvalError is a fatal evaluation error.
+	EvalError ErrorCode = iota
+
+	// A UserError is a fatal error originating from the user.
+	UserError
+
+	// NotExistError is used to indicate a value does not exist.
+	// Mostly used for legacy reasons.
+	NotExistError
+
+	// IncompleteError means an evaluation could not complete because of
+	// insufficient information that may still be added later.
+	IncompleteError
+
+	// A CycleError indicates a reference error. It is considered to be
+	// an incomplete error, as reference errors may be broken by providing
+	// a concrete value.
+	CycleError
+)
+
+func (c ErrorCode) String() string {
+	switch c {
+	case EvalError:
+		return "eval"
+	case UserError:
+		return "user"
+	case IncompleteError:
+		return "incomplete"
+	case CycleError:
+		return "cycle"
+	}
+	return "unknown"
+}
+
+// Bottom represents an error or bottom symbol.
+//
+// Although a Bottom node holds control data, it should not be created until the
+// control information already resulted in an error.
+type Bottom struct {
+	Src ast.Node
+	Err errors.Error
+
+	Code         ErrorCode
+	HasRecursive bool
+	ChildError   bool // Err is the error of the child
+	// Value holds the computed value so far in case
+	Value Value
+}
+
+func (x *Bottom) Source() ast.Node        { return x.Src }
+func (x *Bottom) Kind() Kind              { return BottomKind }
+func (x *Bottom) Specialize(k Kind) Value { return x } // XXX remove
+
+func (b *Bottom) IsIncomplete() bool {
+	if b == nil {
+		return false
+	}
+	return b.Code == IncompleteError || b.Code == CycleError
+}
+
+// isLiteralBottom reports whether x is an error originating from a user.
+func isLiteralBottom(x Expr) bool {
+	b, ok := x.(*Bottom)
+	return ok && b.Code == UserError
+}
+
+// isError reports whether v is an error or nil.
+func isError(v Value) bool {
+	if v == nil {
+		return true
+	}
+	_, ok := v.(*Bottom)
+	return ok
+}
+
+// isIncomplete reports whether v is associated with an incomplete error.
+func isIncomplete(v *Vertex) bool {
+	if v == nil {
+		return true
+	}
+	if b, ok := v.Value.(*Bottom); ok {
+		return b.IsIncomplete()
+	}
+	return false
+}
+
+// AddChildError updates x to record an error that occurred in one of
+// its descendent arcs. The resulting error will record the worst error code of
+// the current error or recursive error.
+//
+// If x is not already an error, the value is recorded in the error for
+// reference.
+//
+func (v *Vertex) AddChildError(recursive *Bottom) {
+	v.ChildErrors = CombineErrors(nil, v.ChildErrors, recursive)
+	if recursive.IsIncomplete() {
+		return
+	}
+	x := v.Value
+	err, _ := x.(*Bottom)
+	if err == nil {
+		v.Value = &Bottom{
+			Code:         recursive.Code,
+			Value:        x,
+			HasRecursive: true,
+			ChildError:   true,
+			Err:          recursive.Err,
+		}
+		return
+	}
+
+	err.HasRecursive = true
+	if err.Code > recursive.Code {
+		err.Code = recursive.Code
+	}
+
+	v.Value = err
+}
+
+// CombineErrors combines two errors that originate at the same Vertex.
+func CombineErrors(src ast.Node, x, y Value) *Bottom {
+	a, _ := x.(*Bottom)
+	b, _ := y.(*Bottom)
+
+	switch {
+	case a != nil && b != nil:
+	case a != nil:
+		return a
+	case b != nil:
+		return b
+	default:
+		return nil
+	}
+
+	if a.Code != b.Code {
+		if a.Code > b.Code {
+			a, b = b, a
+		}
+
+		if b.Code >= IncompleteError {
+			return a
+		}
+	}
+
+	return &Bottom{
+		Src:  src,
+		Err:  errors.Append(a.Err, b.Err),
+		Code: a.Code,
+	}
+}
diff --git a/internal/core/adt/expr.go b/internal/core/adt/expr.go
index 0a607ec..2346519 100644
--- a/internal/core/adt/expr.go
+++ b/internal/core/adt/expr.go
@@ -15,6 +15,8 @@
 package adt
 
 import (
+	"bytes"
+	"fmt"
 	"regexp"
 
 	"github.com/cockroachdb/apd/v2"
@@ -35,6 +37,13 @@
 
 func (x *StructLit) Source() ast.Node { return x.Src }
 
+func (x *StructLit) evaluate(c *OpContext) Value {
+	e := c.Env(0)
+	v := &Vertex{Conjuncts: []Conjunct{{e, x}}}
+	c.Unifier.Unify(c, v, Finalized) // TODO: also partial okay?
+	return v
+}
+
 // FIELDS
 //
 // Fields can also be used as expressions whereby the value field is the
@@ -58,7 +67,12 @@
 	Value Expr
 }
 
-func (x *Field) Source() ast.Node { return x.Src }
+func (x *Field) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
 // An OptionalField represents an optional regular field.
 //
@@ -70,7 +84,12 @@
 	Value Expr
 }
 
-func (x *OptionalField) Source() ast.Node { return x.Src }
+func (x *OptionalField) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
 // A BulkOptionalField represents a set of optional field.
 //
@@ -83,7 +102,12 @@
 	Label  Feature // for reference and formatting
 }
 
-func (x *BulkOptionalField) Source() ast.Node { return x.Src }
+func (x *BulkOptionalField) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
 // A Ellipsis represents a set of optional fields of a given type.
 //
@@ -94,7 +118,12 @@
 	Value Expr
 }
 
-func (x *Ellipsis) Source() ast.Node { return x.Src }
+func (x *Ellipsis) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
 // A DynamicField represents a regular field for which the key is computed.
 //
@@ -111,9 +140,12 @@
 	return x.Src.Optional != token.NoPos
 }
 
-func (x *DynamicField) Source() ast.Node { return x.Src }
-
-// Expressions
+func (x *DynamicField) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
 // A ListLit represents an unevaluated list literal.
 //
@@ -126,18 +158,19 @@
 	Elems []Elem
 }
 
-func (x *ListLit) Source() ast.Node { return x.Src }
-
-// -- Literals
-
-// Bottom represents an error or bottom symbol.
-type Bottom struct {
-	Src ast.Node
-	Err errors.Error
+func (x *ListLit) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
 }
 
-func (x *Bottom) Source() ast.Node { return x.Src }
-func (x *Bottom) Kind() Kind       { return BottomKind }
+func (x *ListLit) evaluate(c *OpContext) Value {
+	e := c.Env(0)
+	v := &Vertex{Conjuncts: []Conjunct{{e, x}}}
+	c.Unifier.Unify(c, v, Finalized) // TODO: also partial okay?
+	return v
+}
 
 // Null represents null. It can be used as a Value and Expr.
 type Null struct {
@@ -163,9 +196,37 @@
 	X   apd.Decimal // Is integer if the apd.Decimal is an integer.
 }
 
+// TODO: do we need this?
+// func NewNumFromString(src ast.Node, s string) Value {
+// 	n := &Num{Src: src, K: IntKind}
+// 	if strings.ContainsAny(s, "eE.") {
+// 		n.K = FloatKind
+// 	}
+// 	_, _, err := n.X.SetString(s)
+// 	if err != nil {
+// 		pos := token.NoPos
+// 		if src != nil {
+// 			pos = src.Pos()
+// 		}
+// 		return &Bottom{Err: errors.Newf(pos, "invalid number: %v", err)}
+// 	}
+// 	return n
+// }
+
 func (x *Num) Source() ast.Node { return x.Src }
 func (x *Num) Kind() Kind       { return x.K }
 
+// TODO: do we still need this?
+// func (x *Num) Specialize(k Kind) Value {
+// 	k = k & x.K
+// 	if k == x.K {
+// 		return x
+// 	}
+// 	y := *x
+// 	y.K = k
+// 	return &y
+// }
+
 // String is a string value. It can be used as a Value and Expr.
 type String struct {
 	Src ast.Node
@@ -186,11 +247,12 @@
 func (x *Bytes) Source() ast.Node { return x.Src }
 func (x *Bytes) Kind() Kind       { return BytesKind }
 
-// -- composites: the evaluated fields of a composite are recorded in the arc
+// Composites: the evaluated fields of a composite are recorded in the arc
 // vertices.
 
 type ListMarker struct {
-	Src ast.Node
+	Src    ast.Node
+	IsOpen bool
 }
 
 func (x *ListMarker) Source() ast.Node { return x.Src }
@@ -198,15 +260,15 @@
 func (x *ListMarker) node()            {}
 
 type StructMarker struct {
-	Closed bool
+	// NeedClose is used to signal that the evaluator should close this struct.
+	// It is only set by the close builtin.
+	NeedClose bool
 }
 
 func (x *StructMarker) Source() ast.Node { return nil }
 func (x *StructMarker) Kind() Kind       { return StructKind }
 func (x *StructMarker) node()            {}
 
-// -- top types
-
 // Top represents all possible values. It can be used as a Value and Expr.
 type Top struct{ Src *ast.Ident }
 
@@ -226,8 +288,24 @@
 	K   Kind
 }
 
-func (x *BasicType) Source() ast.Node { return x.Src }
-func (x *BasicType) Kind() Kind       { return x.K }
+func (x *BasicType) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+func (x *BasicType) Kind() Kind { return x.K }
+
+// TODO: do we still need this?
+// func (x *BasicType) Specialize(k Kind) Value {
+// 	k = x.K & k
+// 	if k == x.K {
+// 		return x
+// 	}
+// 	y := *x
+// 	y.K = k
+// 	return &y
+// }
 
 // TODO: should we use UnaryExpr for Bound now we have BoundValue?
 
@@ -242,7 +320,31 @@
 	Expr Expr
 }
 
-func (x *BoundExpr) Source() ast.Node { return x.Src }
+func (x *BoundExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *BoundExpr) evaluate(ctx *OpContext) Value {
+	if v, ok := x.Expr.(Value); ok {
+		if v == nil || v.Concreteness() > Concrete {
+			return ctx.NewErrf("bound has fixed non-concrete value")
+		}
+		return &BoundValue{x.Src, x.Op, v}
+	}
+	v := ctx.value(x.Expr)
+	if isError(v) {
+		return v
+	}
+	if v.Concreteness() > Concrete {
+		ctx.addErrf(IncompleteError, ctx.pos(),
+			"non-concrete value %s for bound %s", ctx.Str(x.Expr), x.Op)
+		return nil
+	}
+	return &BoundValue{x.Src, x.Op, v}
+}
 
 // A BoundValue is a fully evaluated unary comparator that can be used to
 // validate other values.
@@ -254,13 +356,45 @@
 	Src   ast.Expr
 	Op    Op
 	Value Value
-	K     Kind
 }
 
 func (x *BoundValue) Source() ast.Node { return x.Src }
-func (x *BoundValue) Kind() Kind       { return x.K }
+func (x *BoundValue) Kind() Kind {
+	k := x.Value.Kind()
+	switch k {
+	case IntKind, FloatKind, NumKind:
+		return NumKind
 
-// -- References
+	case NullKind:
+		if x.Op == NotEqualOp {
+			return TopKind &^ NullKind
+		}
+	}
+	return k
+}
+
+func (x *BoundValue) validate(c *OpContext, y Value) *Bottom {
+	a := y // Can be list or struct.
+	b := c.scalar(x.Value)
+	if c.HasErr() {
+		return c.Err()
+	}
+
+	switch v := BinOp(c, x.Op, a, b).(type) {
+	case *Bottom:
+		return v
+
+	case *Bool:
+		if v.B {
+			return nil
+		}
+		return c.NewErrf("invalid value %v (out of bound %s)",
+			c.Str(y), c.Str(x))
+
+	default:
+		panic(fmt.Sprintf("unsupported type %T", v))
+	}
+}
 
 // A FieldReference represents a lexical reference to a field.
 //
@@ -272,7 +406,18 @@
 	Label   Feature
 }
 
-func (x *FieldReference) Source() ast.Node { return x.Src }
+func (x *FieldReference) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *FieldReference) resolve(c *OpContext) *Vertex {
+	n := c.relNode(x.UpCount)
+	pos := pos(x)
+	return c.lookup(n, pos, x.Label)
+}
 
 // A LabelReference refers to the string or integer value of a label.
 //
@@ -285,7 +430,25 @@
 
 // TODO: should this implement resolver at all?
 
-func (x *LabelReference) Source() ast.Node { return x.Src }
+func (x *LabelReference) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *LabelReference) evaluate(ctx *OpContext) Value {
+	label := ctx.relLabel(x.UpCount)
+	if label == 0 {
+		// There is no label. This may happen if a LabelReference is evaluated
+		// outside of the context of a parent node, for instance if an
+		// "additional" items or properties is evaluated in isolation.
+		//
+		// TODO: this should return the pattern of the label.
+		return &BasicType{K: StringKind}
+	}
+	return label.ToValue(ctx)
+}
 
 // A DynamicReference is like a LabelReference, but with a computed label.
 //
@@ -306,7 +469,21 @@
 	Alias Feature
 }
 
-func (x *DynamicReference) Source() ast.Node { return x.Src }
+func (x *DynamicReference) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *DynamicReference) resolve(ctx *OpContext) *Vertex {
+	e := ctx.Env(x.UpCount)
+	frame := ctx.PushState(e, x.Src)
+	v := ctx.value(x.Label)
+	ctx.PopState(frame)
+	f := ctx.Label(v)
+	return ctx.lookup(e.Vertex, pos(x), f)
+}
 
 // An ImportReference refers to an imported package.
 //
@@ -320,7 +497,25 @@
 	Label      Feature // for informative purposes
 }
 
-func (x *ImportReference) Source() ast.Node { return x.Src }
+func (x *ImportReference) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+// TODO: imports
+// func (x *ImportReference) resolve(c *context, e *environment) (arc, *Bottom) {
+// 	return c.r.lookupImport(e, x.importPath)
+// }
+
+// func (x *ImportReference) eval(c *context, e *environment) envVal {
+// 	arc, err := lookup(e, e.node, x.label)
+// 	if err != nil {
+// 		return err
+// 	}
+// 	return envVal{e, arc.eval()}
+// }
 
 // A LetReference evaluates a let expression in its original environment.
 //
@@ -333,7 +528,26 @@
 	X       Expr
 }
 
-func (x *LetReference) Source() ast.Node { return x.Src }
+func (x *LetReference) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *LetReference) resolve(c *OpContext) *Vertex {
+	e := c.Env(x.UpCount)
+	label := e.Vertex.Label
+	// Anonymous arc.
+	return &Vertex{Parent: nil, Label: label, Conjuncts: []Conjunct{{e, x.X}}}
+}
+
+func (x *LetReference) evaluate(c *OpContext) Value {
+	e := c.Env(x.UpCount)
+
+	// Not caching let expressions may lead to exponential behavior.
+	return e.evalCached(c, x.X)
+}
 
 // A SelectorExpr looks up a fixed field in an expression.
 //
@@ -345,7 +559,17 @@
 	Sel Feature
 }
 
-func (x *SelectorExpr) Source() ast.Node { return x.Src }
+func (x *SelectorExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *SelectorExpr) resolve(c *OpContext) *Vertex {
+	n := c.node(x.X, Partial)
+	return c.lookup(n, x.Src.Sel.NamePos, x.Sel)
+}
 
 // IndexExpr is like a selector, but selects an index.
 //
@@ -357,7 +581,20 @@
 	Index Expr
 }
 
-func (x *IndexExpr) Source() ast.Node { return x.Src }
+func (x *IndexExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *IndexExpr) resolve(ctx *OpContext) *Vertex {
+	// TODO: support byte index.
+	n := ctx.node(x.X, Partial)
+	i := ctx.value(x.Index)
+	f := ctx.Label(i)
+	return ctx.lookup(n, x.Src.Index.Pos(), f)
+}
 
 // A SliceExpr represents a slice operation. (Not currently in spec.)
 //
@@ -371,7 +608,85 @@
 	Stride Expr
 }
 
-func (x *SliceExpr) Source() ast.Node { return x.Src }
+func (x *SliceExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *SliceExpr) evaluate(c *OpContext) Value {
+	// TODO: strides
+
+	v := c.value(x.X)
+	const as = "slice index"
+
+	switch v := v.(type) {
+	case nil:
+		c.addErrf(IncompleteError, c.pos(),
+			"non-concrete slice subject %s", c.Str(x.X))
+		return nil
+	case *Vertex:
+		if !v.IsList() {
+			break
+		}
+
+		var (
+			lo = uint64(0)
+			hi = uint64(len(v.Arcs))
+		)
+		if x.Lo != nil {
+			lo = c.uint64(c.value(x.Lo), as)
+		}
+		if x.Hi != nil {
+			hi = c.uint64(c.value(x.Hi), as)
+			if hi > uint64(len(v.Arcs)) {
+				return c.NewErrf("index %d out of range", hi)
+			}
+		}
+		if lo > hi {
+			return c.NewErrf("invalid slice index: %d > %d", lo, hi)
+		}
+
+		n := c.newList(c.src, v.Parent)
+		for i, a := range v.Arcs[lo:hi] {
+			label, err := MakeLabel(a.Source(), int64(i), IntLabel)
+			if err != nil {
+				c.AddBottom(&Bottom{Src: a.Source(), Err: err})
+				return nil
+			}
+			n.Arcs = append(n.Arcs, &Vertex{
+				Label:     label,
+				Conjuncts: a.Conjuncts,
+			})
+		}
+		return n
+
+	case *Bytes:
+		var (
+			lo = uint64(0)
+			hi = uint64(len(v.B))
+		)
+		if x.Lo != nil {
+			lo = c.uint64(c.value(x.Lo), as)
+		}
+		if x.Hi != nil {
+			hi = c.uint64(c.value(x.Hi), as)
+			if hi > uint64(len(v.B)) {
+				return c.NewErrf("index %d out of range", hi)
+			}
+		}
+		if lo > hi {
+			return c.NewErrf("invalid slice index: %d > %d", lo, hi)
+		}
+		return c.newBytes(v.B[lo:hi])
+	}
+
+	if isError(v) {
+		return v
+	}
+	return c.NewErrf("cannot slice %v (type %s)", c.Str(v), v.Kind())
+}
 
 // An Interpolation is a string interpolation.
 //
@@ -383,7 +698,34 @@
 	Parts []Expr // odd: strings, even sources
 }
 
-func (x *Interpolation) Source() ast.Node { return x.Src }
+func (x *Interpolation) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *Interpolation) evaluate(c *OpContext) Value {
+	buf := bytes.Buffer{}
+	for _, e := range x.Parts {
+		v := c.value(e)
+		s := c.StringValue(v)
+		buf.WriteString(s)
+	}
+	if err := c.Err(); err != nil {
+		err = &Bottom{
+			Code: err.Code,
+			Err:  errors.Wrapf(err.Err, pos(x), "invalid interpolation"),
+		}
+		// c.AddBottom(err)
+		// return nil
+		return err
+	}
+	// if k == bytesKind {
+	// 	return &BytesLit{x.source, buf.String(), nil}
+	// }
+	return &String{x.Src, buf.String(), nil}
+}
 
 // UnaryExpr is a unary expression.
 //
@@ -396,7 +738,55 @@
 	X   Expr
 }
 
-func (x *UnaryExpr) Source() ast.Node { return x.Src }
+func (x *UnaryExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *UnaryExpr) evaluate(c *OpContext) Value {
+	if !c.concreteIsPossible(x.X) {
+		return nil
+	}
+	v := c.value(x.X)
+	if isError(v) {
+		return v
+	}
+
+	op := x.Op
+	k := kind(v)
+	expectedKind := k
+	switch op {
+	case SubtractOp:
+		if v, ok := v.(*Num); ok {
+			f := *v
+			f.X.Neg(&v.X)
+			f.Src = x.Src
+			return &f
+		}
+		expectedKind = NumKind
+
+	case AddOp:
+		if v, ok := v.(*Num); ok {
+			// TODO: wrap in thunk to save position of '+'?
+			return v
+		}
+		expectedKind = NumKind
+
+	case NotOp:
+		if v, ok := v.(*Bool); ok {
+			return &Bool{x.Src, !v.B}
+		}
+		expectedKind = BoolKind
+	}
+	if k&expectedKind != BottomKind {
+		c.addErrf(IncompleteError, pos(x.X),
+			"operand %s of '%s' not concrete (was %s)", c.Str(x), op, k)
+		return nil
+	}
+	return c.NewErrf("invalid operation %s%s (%s %s)", op, c.Str(x), op, k)
+}
 
 // BinaryExpr is a binary expression.
 //
@@ -410,9 +800,64 @@
 	Y   Expr
 }
 
-func (x *BinaryExpr) Source() ast.Node { return x.Src }
+func (x *BinaryExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
 
-// -- builtins
+func (x *BinaryExpr) evaluate(c *OpContext) Value {
+	env := c.Env(0)
+	if x.Op == AndOp {
+		// Anonymous Arc
+		v := Vertex{Conjuncts: []Conjunct{{env, x}}}
+		return c.Unifier.Evaluate(c, &v)
+	}
+
+	if !c.concreteIsPossible(x.X) || !c.concreteIsPossible(x.Y) {
+		return nil
+	}
+
+	left, _ := c.Concrete(env, x.X, x.Op)
+	right, _ := c.Concrete(env, x.Y, x.Op)
+
+	leftKind := kind(left)
+	rightKind := kind(right)
+
+	// TODO: allow comparing to a literal Bottom only. Find something more
+	// principled perhaps. One should especially take care that two values
+	// evaluating to Bottom don't evaluate to true. For now we check for
+	// Bottom here and require that one of the values be a Bottom literal.
+	if isLiteralBottom(x.X) || isLiteralBottom(x.Y) {
+		if b := c.validate(left); b != nil {
+			left = b
+		}
+		if b := c.validate(right); b != nil {
+			right = b
+		}
+		switch x.Op {
+		case EqualOp:
+			return &Bool{x.Src, leftKind == rightKind}
+		case NotEqualOp:
+			return &Bool{x.Src, leftKind != rightKind}
+		}
+	}
+
+	if err := CombineErrors(x.Src, left, right); err != nil {
+		return err
+	}
+
+	if err := c.Err(); err != nil {
+		return err
+	}
+
+	value := BinOp(c, x.Op, left, right)
+	if n, ok := value.(*Vertex); ok && n.IsList() {
+		n.UpdateStatus(Partial)
+	}
+	return value
+}
 
 // A CallExpr represents a call to a builtin.
 //
@@ -425,7 +870,18 @@
 	Args []Expr
 }
 
-func (x *CallExpr) Source() ast.Node { return x.Src }
+func (x *CallExpr) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *CallExpr) evaluate(c *OpContext) Value {
+	c.addErrf(0, pos(x), "cannot call non-function %s (type %s)",
+		x.Fun, "nil")
+	return nil
+}
 
 // A BuiltinValidator is a Value that results from evaluation a partial call
 // to a builtin (using CallExpr).
@@ -434,13 +890,21 @@
 //
 type BuiltinValidator struct {
 	Src  *ast.CallExpr
-	Fun  Expr    // TODO: should probably be builtin.
+	Fun  Expr
 	Args []Value // any but the first value
-	// call *builtin // function must return a bool
 }
 
-func (x *BuiltinValidator) Source() ast.Node { return x.Src }
-func (x *BuiltinValidator) Kind() Kind       { return TopKind }
+func (x *BuiltinValidator) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+func (x *BuiltinValidator) Kind() Kind { return TopKind }
+
+func (x *BuiltinValidator) validate(c *OpContext, v Value) *Bottom {
+	return nil
+}
 
 // A Disjunction represents a disjunction, where each disjunct may or may not
 // be marked as a default.
@@ -464,6 +928,15 @@
 	return x.Src
 }
 
+func (x *DisjunctionExpr) evaluate(c *OpContext) Value {
+	e := c.Env(0)
+	v := &Vertex{Conjuncts: []Conjunct{{e, x}}}
+	c.Unifier.Unify(c, v, Finalized) // TODO: also partial okay?
+	// TODO: if the disjunction result originated from a literal value, we may
+	// consider the result closed to create more permanent errors.
+	return v
+}
+
 // A Conjunction is a conjunction of values that cannot be represented as a
 // single value. It is the result of unification.
 type Conjunction struct {
@@ -471,8 +944,14 @@
 	Values []Value
 }
 
-func (x *Conjunction) Source() ast.Node { return nil }
-func (x *Conjunction) Kind() Kind       { return TopKind }
+func (x *Conjunction) Source() ast.Node { return x.Src }
+func (x *Conjunction) Kind() Kind {
+	k := TopKind
+	for _, v := range x.Values {
+		k &= v.Kind()
+	}
+	return k
+}
 
 // A disjunction is a disjunction of values. It is the result of expanding
 // a DisjunctionExpr if the expression cannot be represented as a single value.
@@ -511,7 +990,36 @@
 	Dst    Yielder
 }
 
-func (x *ForClause) Source() ast.Node { return x.Syntax }
+func (x *ForClause) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Syntax
+}
+
+func (x *ForClause) yield(c *OpContext, f YieldFunc) {
+	n := c.node(x.Src, Finalized)
+	for _, a := range n.Arcs {
+		if !a.Label.IsRegular() {
+			continue
+		}
+
+		n := &Vertex{Arcs: []*Vertex{
+			{Label: x.Value, Conjuncts: a.Conjuncts}, // TODO: only needed if value label != _
+		}}
+		if x.Key != 0 {
+			v := &Vertex{Label: x.Key}
+			key := a.Label.ToValue(c)
+			v.AddConjunct(MakeConjunct(c.Env(0), key))
+			n.Arcs = append(n.Arcs, v)
+		}
+
+		x.Dst.yield(c.spawn(n), f)
+		if c.HasErr() {
+			break
+		}
+	}
+}
 
 // An IfClause represents an if clause of a comprehension. It can be used
 // as a struct or list element.
@@ -524,7 +1032,18 @@
 	Dst       Yielder
 }
 
-func (x *IfClause) Source() ast.Node { return x.Src }
+func (x *IfClause) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *IfClause) yield(ctx *OpContext, f YieldFunc) {
+	if ctx.BoolValue(ctx.value(x.Condition)) {
+		x.Dst.yield(ctx, f)
+	}
+}
 
 // An LetClause represents a let clause in a comprehension.
 //
@@ -537,11 +1056,32 @@
 	Dst   Yielder
 }
 
-func (x *LetClause) Source() ast.Node { return x.Src }
+func (x *LetClause) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *LetClause) yield(c *OpContext, f YieldFunc) {
+	n := &Vertex{Arcs: []*Vertex{
+		{Label: x.Label, Conjuncts: []Conjunct{{c.Env(0), x.Expr}}},
+	}}
+	x.Dst.yield(c.spawn(n), f)
+}
 
 // A ValueClause represents the value part of a comprehension.
 type ValueClause struct {
 	*StructLit
 }
 
-func (x *ValueClause) Source() ast.Node { return x.Src }
+func (x *ValueClause) Source() ast.Node {
+	if x.Src == nil {
+		return nil
+	}
+	return x.Src
+}
+
+func (x *ValueClause) yield(op *OpContext, f YieldFunc) {
+	f(op.Env(0), x.StructLit)
+}
diff --git a/internal/core/adt/feature.go b/internal/core/adt/feature.go
index 684a74e..bc2a32c 100644
--- a/internal/core/adt/feature.go
+++ b/internal/core/adt/feature.go
@@ -16,6 +16,7 @@
 
 import (
 	"strconv"
+	"strings"
 
 	"cuelang.org/go/cue/ast"
 	"cuelang.org/go/cue/errors"
@@ -27,6 +28,8 @@
 // representation of an integer or string label as well as a label type.
 type Feature uint32
 
+// TODO: create labels such that list are sorted first (or last with index.)
+
 // InvalidLabel is an encoding of an erroneous label.
 const InvalidLabel Feature = 0x7 // 0xb111
 
@@ -68,9 +71,121 @@
 	}
 }
 
+// StringValue reports the string value of f, which must be a string label.
+func (f Feature) StringValue(index StringIndexer) string {
+	if !f.IsString() {
+		panic("not a string label")
+	}
+	x := f.Index()
+	return index.IndexToString(int64(x))
+}
+
+// ToValue converts a label to a value, which will be a Num for integer labels
+// and a String for string labels. It panics when f is not a regular label.
+func (f Feature) ToValue(ctx *OpContext) Value {
+	if !f.IsRegular() {
+		panic("not a regular label")
+	}
+	if f.IsInt() {
+		return ctx.NewInt64(int64(f.Index()))
+	}
+	x := f.Index()
+	str := ctx.IndexToString(int64(x))
+	return ctx.NewString(str)
+}
+
+// StringLabel converts s to a string label.
+func (c *OpContext) StringLabel(s string) Feature {
+	return labelFromValue(c, &String{Str: s})
+}
+
+// MakeStringLabel creates a label for the given string.
+func MakeStringLabel(r StringIndexer, s string) Feature {
+	i := r.StringToIndex(s)
+
+	// TODO: set position if it exists.
+	f, err := MakeLabel(nil, i, StringLabel)
+	if err != nil {
+		panic("out of free string slots")
+	}
+	return f
+}
+
+// MakeIdentLabel creates a label for the given identifier.
+func MakeIdentLabel(r StringIndexer, s string) Feature {
+	i := r.StringToIndex(s)
+	t := StringLabel
+	switch {
+	case strings.HasPrefix(s, "#_"):
+		t = HiddenDefinitionLabel
+	case strings.HasPrefix(s, "#"):
+		t = DefinitionLabel
+	case strings.HasPrefix(s, "_"):
+		t = HiddenLabel
+	}
+	f, err := MakeLabel(nil, i, t)
+	if err != nil {
+		panic("out of free string slots")
+	}
+	return f
+}
+
+const msgGround = "invalid non-ground value %s (must be concrete %s)"
+
+func labelFromValue(ctx *OpContext, v Value) Feature {
+	var i int64
+	var t FeatureType
+	if isError(v) {
+		return InvalidLabel
+	}
+	switch v.Kind() {
+	case IntKind, NumKind:
+		x, _ := v.(*Num)
+		if x == nil {
+			ctx.addErrf(IncompleteError, pos(v), msgGround, v, "int")
+			return InvalidLabel
+		}
+		t = IntLabel
+		var err error
+		i, err = x.X.Int64()
+		if err != nil || x.K != IntKind {
+			ctx.AddErrf("invalid label %v: %v", v, err)
+			return InvalidLabel
+		}
+		if i < 0 {
+			ctx.AddErrf("invalid negative index %s", ctx.Str(x))
+			return InvalidLabel
+		}
+
+	case StringKind:
+		x, _ := v.(*String)
+		if x == nil {
+			ctx.addErrf(IncompleteError, pos(v), msgGround, v, "string")
+			return InvalidLabel
+		}
+		t = StringLabel
+		i = ctx.StringToIndex(x.Str)
+
+	default:
+		ctx.AddErrf("invalid label type %v", v.Kind())
+		return InvalidLabel
+	}
+
+	// TODO: set position if it exists.
+	f, err := MakeLabel(nil, i, t)
+	if err != nil {
+		ctx.AddErr(err)
+	}
+	return f
+}
+
 // MakeLabel creates a label. It reports an error if the index is out of range.
-func MakeLabel(p token.Pos, index int64, f FeatureType) (Feature, errors.Error) {
+func MakeLabel(src ast.Node, index int64, f FeatureType) (Feature, errors.Error) {
 	if 0 > index || index > MaxIndex {
+		p := token.NoPos
+		if src != nil {
+			p = src.Pos()
+		}
 		return InvalidLabel,
 			errors.Newf(p, "int label out of range (%d not >=0 and <= %d)",
 				index, MaxIndex)
diff --git a/internal/core/adt/kind.go b/internal/core/adt/kind.go
index cd82b4e..ddc3cbb 100644
--- a/internal/core/adt/kind.go
+++ b/internal/core/adt/kind.go
@@ -80,6 +80,13 @@
 		IntKind | FloatKind | StringKind | BytesKind
 )
 
+func kind(v Value) Kind {
+	if v == nil {
+		return BottomKind
+	}
+	return v.Kind()
+}
+
 // IsAnyOf reports whether k is any of the given kinds.
 //
 // For instances, k.IsAnyOf(String|Bytes) reports whether k overlaps with
diff --git a/internal/core/adt/simplify.go b/internal/core/adt/simplify.go
new file mode 100644
index 0000000..aa3eb58
--- /dev/null
+++ b/internal/core/adt/simplify.go
@@ -0,0 +1,195 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+	"github.com/cockroachdb/apd/v2"
+)
+
+// SimplifyBounds collapses bounds if possible. The bound values must be
+// concrete. It returns nil if the bound values cannot be collapsed.
+//
+// k represents additional type constraints, such as `int`.
+func SimplifyBounds(ctx *OpContext, k Kind, x, y *BoundValue) Value {
+	xv := x.Value
+	yv := y.Value
+
+	cmp, xCat := opInfo(x.Op)
+	_, yCat := opInfo(y.Op)
+
+	// k := x.Kind() & y.Kind()
+
+	switch {
+	case xCat == yCat:
+		if x.Op == NotEqualOp || x.Op == MatchOp || x.Op == NotMatchOp {
+			if test(ctx, EqualOp, xv, yv) {
+				return x
+			}
+			break // unify the two bounds
+		}
+
+		// xCat == yCat && x.Op != NotEqualOp
+		// > a & >= b
+		//    > a   if a >= b
+		//    >= b  if a <  b
+		// > a & > b
+		//    > a   if a >= b
+		//    > b   if a <  b
+		// >= a & > b
+		//    >= a   if a > b
+		//    > b    if a <= b
+		// >= a & >= b
+		//    >= a   if a > b
+		//    >= b   if a <= b
+		// inverse is true as well.
+
+		// Tighten bound.
+		if test(ctx, cmp, xv, yv) {
+			return x
+		}
+		return y
+
+	case xCat == -yCat:
+		if xCat == -1 {
+			x, y = y, x
+		}
+		a, aOK := xv.(*Num)
+		b, bOK := yv.(*Num)
+
+		if !aOK || !bOK {
+			break
+		}
+
+		var d, lo, hi apd.Decimal
+		lo.Set(&a.X)
+		hi.Set(&b.X)
+		if k&FloatKind == 0 {
+			// Readjust bounds for integers.
+			if x.Op == GreaterEqualOp {
+				// >=3.4  ==>  >=4
+				_, _ = apd.BaseContext.Ceil(&lo, &a.X)
+			} else {
+				// >3.4  ==>  >3
+				_, _ = apd.BaseContext.Floor(&lo, &a.X)
+			}
+			if y.Op == LessEqualOp {
+				// <=2.3  ==>  <= 2
+				_, _ = apd.BaseContext.Floor(&hi, &b.X)
+			} else {
+				// <2.3   ==>  < 3
+				_, _ = apd.BaseContext.Ceil(&hi, &b.X)
+			}
+		}
+
+		cond, err := apd.BaseContext.Sub(&d, &hi, &lo)
+		if cond.Inexact() || err != nil {
+			break
+		}
+
+		// attempt simplification
+		// numbers
+		// >=a & <=b
+		//     a   if a == b
+		//     _|_ if a < b
+		// >=a & <b
+		//     _|_ if b <= a
+		// >a  & <=b
+		//     _|_ if b <= a
+		// >a  & <b
+		//     _|_ if b <= a
+
+		// integers
+		// >=a & <=b
+		//     a   if b-a == 0
+		//     _|_ if a < b
+		// >=a & <b
+		//     a   if b-a == 1
+		//     _|_ if b <= a
+		// >a  & <=b
+		//     b   if b-a == 1
+		//     _|_ if b <= a
+		// >a  & <b
+		//     a+1 if b-a == 2
+		//     _|_ if b <= a
+
+		switch diff, err := d.Int64(); {
+		case err != nil:
+
+		case diff == 1:
+			if k&FloatKind == 0 {
+				if x.Op == GreaterEqualOp && y.Op == LessThanOp {
+					return ctx.newNum(&lo, k&NumKind, x, y)
+				}
+				if x.Op == GreaterThanOp && y.Op == LessEqualOp {
+					return ctx.newNum(&hi, k&NumKind, x, y)
+				}
+			}
+
+		case diff == 2:
+			if k&FloatKind == 0 && x.Op == GreaterThanOp && y.Op == LessThanOp {
+				_, _ = apd.BaseContext.Add(&d, d.SetInt64(1), &lo)
+				return ctx.newNum(&d, k&NumKind, x, y)
+
+			}
+
+		case diff == 0:
+			if x.Op == GreaterEqualOp && y.Op == LessEqualOp {
+				return ctx.newNum(&lo, k&NumKind, x, y)
+			}
+			fallthrough
+
+		case d.Negative:
+			return ctx.NewErrf("bounds %v %v", ctx.Str(x), ctx.Str(y))
+		}
+
+	case x.Op == NotEqualOp:
+		if !test(ctx, y.Op, xv, yv) {
+			return y
+		}
+
+	case y.Op == NotEqualOp:
+		if !test(ctx, x.Op, yv, xv) {
+			return x
+		}
+	}
+	return nil
+}
+
+func opInfo(op Op) (cmp Op, norm int) {
+	switch op {
+	case GreaterThanOp:
+		return GreaterEqualOp, 1
+	case GreaterEqualOp:
+		return GreaterThanOp, 1
+	case LessThanOp:
+		return LessEqualOp, -1
+	case LessEqualOp:
+		return LessThanOp, -1
+	case NotEqualOp:
+		return NotEqualOp, 0
+	case MatchOp:
+		return MatchOp, 2
+	case NotMatchOp:
+		return NotMatchOp, 3
+	}
+	panic("cue: unreachable")
+}
+
+func test(ctx *OpContext, op Op, a, b Value) bool {
+	if b, ok := BinOp(ctx, op, a, b).(*Bool); ok {
+		return b.B
+	}
+	return false
+}
diff --git a/internal/core/compile/compile.go b/internal/core/compile/compile.go
index 018b99b..eacd993 100644
--- a/internal/core/compile/compile.go
+++ b/internal/core/compile/compile.go
@@ -24,7 +24,6 @@
 	"cuelang.org/go/cue/token"
 	"cuelang.org/go/internal"
 	"cuelang.org/go/internal/core/adt"
-	"cuelang.org/go/internal/core/runtime"
 	"golang.org/x/xerrors"
 )
 
@@ -37,7 +36,7 @@
 // the packages names are consistent.
 //
 // Files may return a completed parse even if it has errors.
-func Files(cfg *Config, r *runtime.Runtime, files ...*ast.File) (*adt.Vertex, errors.Error) {
+func Files(cfg *Config, r adt.Runtime, files ...*ast.File) (*adt.Vertex, errors.Error) {
 	c := &compiler{index: r}
 
 	v := c.compileFiles(files)
@@ -74,7 +73,7 @@
 		Message: errors.NewMessage(format, args),
 	}
 	c.errs = errors.Append(c.errs, err)
-	return &adt.Bottom{}
+	return &adt.Bottom{Err: err}
 }
 
 func (c *compiler) path() []string {
@@ -387,7 +386,7 @@
 				name, isIdent, err := ast.LabelName(lab)
 				if err == nil && isIdent {
 					idx := c.index.StringToIndex(name)
-					label, _ = adt.MakeLabel(x.Pos(), idx, adt.DefinitionLabel)
+					label, _ = adt.MakeLabel(x, idx, adt.DefinitionLabel)
 				}
 			}
 
@@ -667,7 +666,11 @@
 		return slice
 
 	case *ast.BottomLit:
-		return &adt.Bottom{Src: n}
+		return &adt.Bottom{
+			Src:  n,
+			Code: adt.UserError,
+			Err:  errors.Newf(n.Pos(), "from source"),
+		}
 
 	case *ast.BadExpr:
 		return c.errf(n, "invalid expression")
@@ -787,6 +790,8 @@
 	d.Values = append(d.Values, adt.Disjunct{Val: c.expr(n), Default: mark})
 }
 
+// TODO(perf): validate that regexps are cached at the right time.
+
 func (c *compiler) parse(l *ast.BasicLit) (n adt.Expr) {
 	s := l.Value
 	if s == "" {
diff --git a/internal/core/compile/label.go b/internal/core/compile/label.go
index 1c03298..4320252 100644
--- a/internal/core/compile/label.go
+++ b/internal/core/compile/label.go
@@ -42,7 +42,7 @@
 		case strings.HasPrefix(s, "_"):
 			t = adt.HiddenLabel
 		}
-		f, err := adt.MakeLabel(n.Pos(), i, t)
+		f, err := adt.MakeLabel(n, i, t)
 		if err != nil {
 			c.errf(n, "invalid identifier label: %v", err)
 			return adt.InvalidLabel
@@ -60,7 +60,7 @@
 			}
 
 			i := int64(index.StringToIndex(norm.NFC.String(s)))
-			f, err := adt.MakeLabel(n.Pos(), i, adt.StringLabel)
+			f, err := adt.MakeLabel(n, i, adt.StringLabel)
 			if err != nil {
 				c.errf(n, msg, err)
 			}
@@ -85,7 +85,7 @@
 				return adt.InvalidLabel
 			}
 
-			f, err := adt.MakeLabel(n.Pos(), i, adt.IntLabel)
+			f, err := adt.MakeLabel(n, i, adt.IntLabel)
 			if err != nil {
 				c.errf(n, msg, err)
 				return adt.InvalidLabel
@@ -98,7 +98,7 @@
 
 		default: // keywords (null, true, false, for, in, if, let)
 			i := index.StringToIndex(x.Kind.String())
-			f, err := adt.MakeLabel(n.Pos(), i, adt.StringLabel)
+			f, err := adt.MakeLabel(n, i, adt.StringLabel)
 			if err != nil {
 				c.errf(n, "invalid string label: %v", err)
 			}
diff --git a/internal/core/compile/predeclared.go b/internal/core/compile/predeclared.go
index fcadc36..6c54d85 100644
--- a/internal/core/compile/predeclared.go
+++ b/internal/core/compile/predeclared.go
@@ -59,6 +59,12 @@
 	return nil
 }
 
+// LookupRange returns a CUE expressions for the given predeclared identifier
+// representing a range, such as uint8, int128, and float64.
+func LookupRange(name string) adt.Expr {
+	return predefinedRanges[name]
+}
+
 var predefinedRanges = map[string]adt.Expr{
 	"rune":  mkIntRange("0", strconv.Itoa(0x10FFFF)),
 	"int8":  mkIntRange("-128", "127"),
@@ -90,7 +96,13 @@
 	),
 }
 
-// TODO: use an adt.BoundValue here.
+func init() {
+	for k, v := range predefinedRanges {
+		predefinedRanges["__"+k] = v
+	}
+}
+
+// TODO: use an adt.BoundValue here. and conjunctions here.
 
 func mkUint() adt.Expr {
 	from := newBound(adt.GreaterEqualOp, adt.IntKind, parseInt("0"))
@@ -107,25 +119,38 @@
 func mkIntRange(a, b string) adt.Expr {
 	from := newBound(adt.GreaterEqualOp, adt.IntKind, parseInt(a))
 	to := newBound(adt.LessEqualOp, adt.IntKind, parseInt(b))
-	return &adt.BinaryExpr{nil, adt.AndOp, from, to}
+	ident := ast.NewIdent("__int")
+	src := ast.NewBinExpr(token.AND, ident, from.Src, to.Src)
+	return &adt.Conjunction{
+		Src: src,
+		Values: []adt.Value{
+			&adt.BasicType{Src: ident, K: adt.IntKind}, from, to,
+		},
+	}
 }
 
 func mkFloatRange(a, b string) adt.Expr {
 	from := newBound(adt.GreaterEqualOp, adt.NumKind, parseFloat(a))
 	to := newBound(adt.LessEqualOp, adt.NumKind, parseFloat(b))
-	return &adt.BinaryExpr{nil, adt.AndOp, from, to}
+	src := ast.NewBinExpr(token.AND, from.Src, to.Src)
+	return &adt.Conjunction{Src: src, Values: []adt.Value{from, to}}
 }
 
 func newBound(op adt.Op, k adt.Kind, v adt.Value) *adt.BoundValue {
-	return &adt.BoundValue{Op: op, Value: v}
+	src := &ast.UnaryExpr{Op: op.Token(), X: v.Source().(ast.Expr)}
+	return &adt.BoundValue{Src: src, Op: op, Value: v}
 }
 
 func parseInt(s string) *adt.Num {
-	return parseNum(adt.IntKind, s)
+	n := parseNum(adt.IntKind, s)
+	n.Src = &ast.BasicLit{Kind: token.INT, Value: s}
+	return n
 }
 
 func parseFloat(s string) *adt.Num {
-	return parseNum(adt.FloatKind, s)
+	n := parseNum(adt.FloatKind, s)
+	n.Src = &ast.BasicLit{Kind: token.FLOAT, Value: s}
+	return n
 }
 
 func parseNum(k adt.Kind, s string) *adt.Num {
diff --git a/internal/core/debug/compact.go b/internal/core/debug/compact.go
index 5be3f10..67a4da9 100644
--- a/internal/core/debug/compact.go
+++ b/internal/core/debug/compact.go
@@ -133,8 +133,7 @@
 		w.string(`_|_`)
 		if x.Err != nil {
 			w.string("(")
-			msg, args := x.Err.Msg()
-			w.string(fmt.Sprintf(msg, args...))
+			w.string(x.Err.Error())
 			w.string(")")
 		}
 
diff --git a/internal/core/debug/debug.go b/internal/core/debug/debug.go
index b739bea..6209b83 100644
--- a/internal/core/debug/debug.go
+++ b/internal/core/debug/debug.go
@@ -109,39 +109,63 @@
 		}
 
 		kindStr := kind.String()
-		kindStr = strings.ReplaceAll(kindStr, "{...}", "struct")
-		kindStr = strings.ReplaceAll(kindStr, "[...]", "list")
+
+		// TODO: replace with showing full closedness data.
+		if x.IsClosed(nil) {
+			if kind == adt.ListKind || kind == adt.StructKind {
+				kindStr = "#" + kindStr
+			}
+		}
 
 		fmt.Fprintf(w, "(%s){", kindStr)
 
-		if x.Value != nil && kind&^(adt.StructKind|adt.ListKind) != 0 {
-			w.string(" ")
-			w.node(x.Value)
-			w.string(" }")
-			return
-		}
-
 		saved := w.indent
 		w.indent += "  "
+		defer func() { w.indent = saved }()
 
-		if b, ok := x.Value.(*adt.Bottom); ok {
+		switch v := x.Value.(type) {
+		case nil:
+		case *adt.Bottom:
+			// TODO: reuse bottom.
 			saved := w.indent
 			w.indent += "// "
 			w.string("\n")
-			w.string(strings.TrimSpace(errors.Details(b.Err, &errors.Config{
-				Cwd:     w.cfg.Cwd,
-				ToSlash: true,
-			})))
+			fmt.Fprintf(w, "[%v]", v.Code)
+			if !v.ChildError {
+				msg := errors.Details(v.Err, &errors.Config{
+					Cwd:     w.cfg.Cwd,
+					ToSlash: true,
+				})
+				msg = strings.TrimSpace(msg)
+				if msg != "" {
+					w.string(" ")
+					w.string(msg)
+				}
+			}
 			w.indent = saved
+
+		case *adt.StructMarker, *adt.ListMarker:
+			// if len(x.Arcs) == 0 {
+			// 	// w.string("}")
+			// 	// return
+			// }
+
+		default:
+			if len(x.Arcs) == 0 {
+				w.string(" ")
+				w.node(x.Value)
+				w.string(" }")
+				return
+			}
+			w.string("\n")
+			w.node(x.Value)
 		}
 
-		if len(x.Arcs) > 0 {
-			for _, a := range x.Arcs {
-				w.string("\n")
-				w.label(a.Label)
-				w.string(": ")
-				w.node(a)
-			}
+		for _, a := range x.Arcs {
+			w.string("\n")
+			w.label(a.Label)
+			w.string(": ")
+			w.node(a)
 		}
 
 		if x.Value == nil {
diff --git a/internal/core/eval/closed.go b/internal/core/eval/closed.go
new file mode 100644
index 0000000..10cee11
--- /dev/null
+++ b/internal/core/eval/closed.go
@@ -0,0 +1,340 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+// The file implements the majority of the closed struct semantics.
+// The data is recorded in the Closed field of a Vertex.
+//
+// Each vertex has a set of conjuncts that make up the values of the vertex.
+// Each Conjunct may originate from various sources, like an embedding, field
+// definition or regular value. For the purpose of computing the value, the
+// source of the conjunct is irrelevant. The origin does matter, however, if
+// for determining whether a field is allowed in a closed struct. The Closed
+// field keeps track of the kind of origin for this purpose.
+//
+// More precisely, the CloseDef struct explains how the conjuncts of an arc
+// were combined and define a logical expression on the field sets
+// computed for each conjunct.
+//
+// While evaluating each conjunct, nodeContext keeps track what changes need to
+// be made to ClosedDef based on the evaluation of the current conjuncts.
+// For instance, if a field references a definition, all other previous
+// checks are useless, as the newly referred to definitions define an upper
+// bound and will contain all the information that is necessary to determine
+// whether a field may be included.
+//
+// Most of the logic in this file concerns itself with the combination of
+// multiple CloseDef values as well as traversing the structure to validate
+// whether an arc is allowed. The actual fieldSet logic is in optional.go
+// The overal control and use of the functionality in this file is used
+// in eval.go.
+
+import (
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/token"
+	"cuelang.org/go/internal/core/adt"
+)
+
+// acceptor implements adt.Acceptor.
+//
+// Note that it keeps track of whether it represents a closed struct. An
+// acceptor is also used to associate an CloseDef with a Vertex, and not
+// all CloseDefs represent a closed struct: a value that contains embeddings may
+// eventually turn into a closed struct. Consider
+//
+//    a: {
+//       b
+//       d: e: int
+//    }
+//    b: d: {
+//       #A & #B
+//    }
+//
+// At the point of evaluating `a`, the struct is not yet closed. However,
+// descending into `d` will trigger the inclusion of defintions which in turn
+// causes the struct to be closed. At this point, it is important to know that
+// `b` originated from an embedding, as otherwise `e` may not be allowed.
+//
+type acceptor struct {
+	tree     *CloseDef
+	fields   []fieldSet
+	isClosed bool
+	isList   bool
+	openList bool
+}
+
+func (a *acceptor) Accept(c *adt.OpContext, f adt.Feature) bool {
+	if a.isList {
+		return a.openList
+	}
+	if !a.isClosed {
+		return true
+	}
+	if f == adt.InvalidLabel {
+		return false
+	}
+	if f.IsInt() {
+		return a.openList
+	}
+	return a.verifyArcAllowed(c, f) == nil
+}
+
+func (a *acceptor) MatchAndInsert(c *adt.OpContext, v *adt.Vertex) {
+	for _, fs := range a.fields {
+		fs.MatchAndInsert(c, v)
+	}
+}
+
+// CloseDef defines how individual FieldSets (corresponding to conjuncts)
+// combine to determine whether a field is contained in a closed set.
+//
+// Nodes with a non-empty List and IsAnd is false represent embeddings.
+// The ID is the node that contained the embedding in that case.
+//
+// Nodes with a non-empty List and IsAnd is true represent conjunctions of
+// definitions. In this case, a field must be contained in each definition.
+//
+// If a node has both conjunctions of definitions and embeddings, only the
+// former are maintained. Conjunctions of definitions define an upper bound
+// of the set of allowed fields in that case and the embeddings will not add
+// any value.
+type CloseDef struct {
+	ID    uint32
+	IsAnd bool
+	List  []*CloseDef
+}
+
+// isOr reports whether this is a node representing embeddings.
+func isOr(c *CloseDef) bool {
+	return len(c.List) > 0 && !c.IsAnd
+}
+
+// updateClosed transforms c into a new node with all non-AND nodes with an
+// ID matching one in replace substituted with the replace value.
+//
+// Vertex only keeps track of a flat list of conjuncts and does not keep track
+// of the hierarchy of how these were derived. This function allows rewriting
+// a CloseDef tree based on replacement information gathered during evaluation
+// of this flat list.
+//
+func updateClosed(c *CloseDef, replace map[uint32]*CloseDef) *CloseDef { // used in eval.go
+	switch {
+	case c == nil:
+		and := []*CloseDef{}
+		for _, c := range replace {
+			and = append(and, c)
+		}
+		switch len(and) {
+		case 0:
+		case 1:
+			c = and[0]
+		default:
+			c = &CloseDef{IsAnd: true, List: and}
+		}
+		// needClose
+	case len(replace) > 0:
+		c = updateClosedRec(c, replace)
+	}
+	return c
+}
+
+func updateClosedRec(c *CloseDef, replace map[uint32]*CloseDef) *CloseDef {
+	if c == nil {
+		return nil
+	}
+
+	// If c is a leaf or AND node, replace it outright. If both are an OR node,
+	// merge the lists.
+	if len(c.List) == 0 || !c.IsAnd {
+		if sub := replace[c.ID]; sub != nil {
+			if isOr(sub) && isOr(c) {
+				sub.List = append(sub.List, c.List...)
+			}
+			return sub
+		}
+	}
+
+	changed := false
+	buf := make([]*CloseDef, len(c.List))
+	k := 0
+	for _, c := range c.List {
+		n := updateClosedRec(c, replace)
+		changed = changed || n != c
+		if n != nil {
+			buf[k] = n
+			k++
+		}
+	}
+	if !changed {
+		return c
+	}
+
+	if k == 1 {
+		return buf[0]
+	}
+
+	return &CloseDef{ID: c.ID, IsAnd: c.IsAnd, List: buf[:k]}
+}
+
+// UpdateReplace is called after evaluating a conjunct at the top of the arc
+// to update the replacement information with the gathered CloseDef info.
+func (n *nodeContext) updateReplace(env *adt.Environment) { // used in eval.go
+	if n.newClose == nil {
+		return
+	}
+
+	if n.replace == nil {
+		n.replace = make(map[uint32]*CloseDef)
+	}
+
+	id := uint32(0)
+	if env != nil {
+		id = env.CloseID
+	}
+
+	n.replace[id] = updateClose(n.replace[id], n.newClose)
+	n.newClose = nil
+}
+
+// appendList creates a new CloseDef with the elements of the list of orig
+// and updated appended. It will take the ID of orig. It does not alter
+// either orig or update.
+func appendLists(orig, update *CloseDef) *CloseDef {
+	list := make([]*CloseDef, len(orig.List)+len(update.List))
+	copy(list[copy(list, orig.List):], update.List)
+	c := *orig
+	c.List = list
+	return &c
+}
+
+// updateClose merges update into orig without altering either.
+//
+// The merge takes into account whether it is an embedding node or not.
+// Most notably, if an "And" node is combined with an embedding, the
+// embedding information may be discarded.
+func updateClose(orig, update *CloseDef) *CloseDef {
+	switch {
+	case orig == nil:
+		return update
+	case isOr(orig):
+		if !isOr(update) {
+			return update
+		}
+		return appendLists(orig, update)
+	case isOr(update):
+		return orig
+	case len(orig.List) == 0 && len(update.List) == 0:
+		return &CloseDef{IsAnd: true, List: []*CloseDef{orig, update}}
+	case len(orig.List) == 0:
+		update.List = append(update.List, orig)
+		return update
+	default: // isAnd(orig)
+		return appendLists(orig, update)
+	}
+}
+
+func (n *nodeContext) addAnd(c *CloseDef) { // used in eval.go
+	switch {
+	case n.newClose == nil:
+		n.newClose = c
+	case isOr(n.newClose):
+		n.newClose = c
+	case len(n.newClose.List) == 0:
+		n.newClose = &CloseDef{
+			IsAnd: true,
+			List:  []*CloseDef{n.newClose, c},
+		}
+	default:
+		n.newClose.List = append(n.newClose.List, c)
+	}
+}
+
+func (n *nodeContext) addOr(parentID uint32, c *CloseDef) { // used in eval.go
+	switch {
+	case n.newClose == nil:
+		d := &CloseDef{ID: parentID, List: []*CloseDef{{ID: parentID}}}
+		if c != nil {
+			d.List = append(d.List, c)
+		}
+		n.newClose = d
+	case isOr(n.newClose):
+		d := n.newClose
+		if c != nil {
+			d.List = append(d.List, c)
+		}
+	}
+}
+
+// verifyArcAllowed checks whether f is an allowed label within the current
+// node. It traverses c considering the "or" semantics of embeddings and the
+// "and" semantics of conjunctions. It generates an error if a field is not
+// allowed.
+func (n *acceptor) verifyArcAllowed(ctx *adt.OpContext, f adt.Feature) *adt.Bottom {
+	filter := f.IsString() || f == adt.InvalidLabel
+	if filter && !n.verifyArcRecursive(ctx, n.tree, f) {
+		label := f.SelectorString(ctx)
+		return &adt.Bottom{
+			Err: errors.Newf(token.NoPos, "field `%s` not allowed", label),
+		}
+	}
+	return nil
+}
+
+func (n *acceptor) verifyArcRecursive(ctx *adt.OpContext, c *CloseDef, f adt.Feature) bool {
+	if len(c.List) == 0 {
+		return n.verifyDefinition(ctx, c.ID, f)
+	}
+	if c.IsAnd {
+		for _, c := range c.List {
+			if !n.verifyArcRecursive(ctx, c, f) {
+				return false
+			}
+		}
+		return true
+	}
+	for _, c := range c.List {
+		if n.verifyArcRecursive(ctx, c, f) {
+			return true
+		}
+	}
+	return false
+}
+
+// verifyDefintion reports whether f is a valid member for any of the fieldSets
+// with the same closeID.
+func (n *acceptor) verifyDefinition(ctx *adt.OpContext, closeID uint32, f adt.Feature) (ok bool) {
+	for _, o := range n.fields {
+		if o.env.CloseID != closeID {
+			continue
+		}
+
+		if len(o.additional) > 0 || o.isOpen {
+			return true
+		}
+
+		for _, g := range o.fields {
+			if f == g.label {
+				return true
+			}
+		}
+
+		for _, b := range o.bulk {
+			if b.check.Match(ctx, f) {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/internal/core/eval/closed_test.go b/internal/core/eval/closed_test.go
new file mode 100644
index 0000000..144ecce
--- /dev/null
+++ b/internal/core/eval/closed_test.go
@@ -0,0 +1,135 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+import (
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func TestRewriteClosed(t *testing.T) {
+	testCases := []struct {
+		desc    string
+		close   *CloseDef
+		replace map[uint32]*CloseDef
+		want    *CloseDef
+	}{{
+		desc: "a: #A & #B",
+		close: &CloseDef{
+			ID: 1,
+		},
+		replace: map[uint32]*CloseDef{
+			1: {ID: 1, IsAnd: true, List: []*CloseDef{{ID: 2}, {ID: 3}}},
+		},
+		want: &CloseDef{
+			ID:    0x01,
+			IsAnd: true,
+			List:  []*CloseDef{{ID: 2}, {ID: 3}},
+		},
+	}, {
+		// Eliminate an embedding for which there are no more entries.
+		// 	desc: "eliminateOneEmbedding",
+		// 	close: &CloseDef{
+		// 		ID: 0,
+		// 		List: []*CloseDef{
+		// 			{ID: 2},
+		// 			{ID: 3},
+		// 		},
+		// 	},
+		// 	replace: map[uint32]*CloseDef{2: nil},
+		// 	want:    &CloseDef{ID: 2},
+		// }, {
+		// Do not eliminate an embedding that has a replacement.
+		desc: "eliminateOneEmbeddingByMultiple",
+		close: &CloseDef{
+			ID: 0,
+			List: []*CloseDef{
+				{ID: 2},
+				{ID: 3},
+			},
+		},
+		replace: map[uint32]*CloseDef{
+			2: nil,
+			3: {ID: 3, IsAnd: true, List: []*CloseDef{{ID: 4}, {ID: 5}}},
+		},
+		want: &CloseDef{
+			ID: 0x00,
+			List: []*CloseDef{
+				{ID: 2},
+				{ID: 3, IsAnd: true, List: []*CloseDef{{ID: 4}, {ID: 5}}},
+			},
+		},
+	}, {
+		// Select b within a
+		// a: {      // ID: 0
+		//     #A    // ID: 1
+		//     #B    // ID: 2
+		//     b: #C // ID: 0
+		// }
+		// #C: {
+		//     b: #D // ID: 3
+		// }
+		//
+		desc: "embeddingOverruledByField",
+		close: &CloseDef{
+			ID: 0,
+			List: []*CloseDef{
+				{ID: 1},
+				{ID: 2},
+				{ID: 0},
+			},
+		},
+		replace: map[uint32]*CloseDef{0: {ID: 3}},
+		want:    &CloseDef{ID: 3},
+	}, {
+		// Select b within a
+		// a: {      // ID: 0
+		//     #A    // ID: 1
+		//     #B    // ID: 2
+		//     b: #C // ID: 0
+		// }
+		// #C: {
+		//     b: #D & #E // ID: 3 & 4
+		// }
+		//
+		desc: "embeddingOverruledByMultiple",
+		close: &CloseDef{
+			ID: 0,
+			List: []*CloseDef{
+				{ID: 1},
+				{ID: 2},
+				{ID: 0},
+			},
+		},
+		replace: map[uint32]*CloseDef{
+			0: {IsAnd: true, List: []*CloseDef{{ID: 3}, {ID: 4}}},
+		},
+		want: &CloseDef{
+			ID:    0,
+			IsAnd: true,
+			List:  []*CloseDef{{ID: 3}, {ID: 4}},
+		},
+	}}
+
+	for _, tc := range testCases {
+		t.Run(tc.desc, func(t *testing.T) {
+			got := updateClosed(tc.close, tc.replace)
+			if !cmp.Equal(got, tc.want) {
+				t.Error(cmp.Diff(got, tc.want))
+			}
+		})
+	}
+}
diff --git a/internal/core/eval/disjunct.go b/internal/core/eval/disjunct.go
new file mode 100644
index 0000000..db5df3c
--- /dev/null
+++ b/internal/core/eval/disjunct.go
@@ -0,0 +1,376 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+import (
+	"sort"
+
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/token"
+	"cuelang.org/go/internal/core/adt"
+)
+
+// Nodes man not reenter a disjunction.
+//
+// Copy one layer deep; throw away items on failure.
+
+// DISJUNCTION ALGORITHM
+//
+// The basic concept of the algorithm is to use backtracking to find valid
+// disjunctions. The algorithm can stop if two matching disjuncts are found
+// where one does not subsume the other.
+//
+// At a later point, we can introduce a filter step to filter out possible
+// disjuncts based on, say, discriminator fields or field exclusivity (oneOf
+// fields in Protobuf).
+//
+// To understand the details of the algorithm, it is important to understand
+// some properties of disjunction.
+//
+//
+// EVALUATION OF A DISJUNCTION IS SELF CONTAINED
+//
+// In other words, fields outside of a disjunction cannot bind to values within
+// a disjunction whilst evaluating that disjunction. This allows the computation
+// of disjunctions to be isolated from side effects.
+//
+// The intuition behind this is as follows: as a disjunction is not a concrete
+// value, it is not possible to lookup a field within a disjunction if it has
+// not yet been evaluated. So if a reference within a disjunction that is needed
+// to disambiguate that disjunction refers to a field outside the scope of the
+// disjunction which, in turn, refers to a field within the disjunction, this
+// results in a cycle error. We achieve this by not removing the cycle marker of
+// the Vertex of the disjunction until the disjunction is resolved.
+//
+// Note that the following disjunct is still allowed:
+//
+//    a: 1
+//    b: a
+//
+// Even though `a` refers to the root of the disjunction, it does not _select
+// into_ the disjunction. Implementation-wise, it also doesn't have to, as the
+// respective vertex is available within the Environment. Referencing a node
+// outside the disjunction that in turn selects the disjunction root, however,
+// will result in a detected cycle.
+//
+// As usual, cycle detection should be interpreted marked as incomplete, so that
+// the referring node will not be fixed to an error prematurely.
+//
+//
+// SUBSUMPTION OF AMBIGUOUS DISJUNCTS
+//
+// A disjunction can be evaluated to a concrete value if only one disjunct
+// remains. Aside from disambiguating through unification failure, disjuncts
+// may also be disambiguated by taking the least specific of two disjuncts.
+// For instance, if a subsumes b, then the result of disjunction may be a.
+//
+//   NEW ALGORITHM NO LONGER VERIFIES SUBSUMPTION. SUBSUMPTION IS INHERENTLY
+//   IMPRECISE (DUE TO BULK OPTIONAL FIELDS). OTHER THAN THAT, FOR SCALAR VALUES
+//   IT JUST MEANS THERE IS AMBIGUITY, AND FOR STRUCTS IT CAN LEAD TO STRANGE
+//   CONSEQUENCES.
+//
+//   USE EQUALITY INSTEAD:
+//     - Undefined == error for optional fields.
+//     - So only need to check exact labels for vertices.
+
+type envDisjunct struct {
+	env         *adt.Environment
+	values      []disjunct
+	numDefaults int
+	cloneID     uint32
+	isEmbed     bool
+}
+
+type disjunct struct {
+	expr      adt.Expr
+	isDefault bool
+}
+
+func (n *nodeContext) addDisjunction(env *adt.Environment, x *adt.DisjunctionExpr, cloneID uint32, isEmbed bool) {
+	a := []disjunct{}
+
+	numDefaults := 0
+	for _, v := range x.Values {
+		isDef := v.Default // || n.hasDefaults(env, v.Val)
+		if isDef {
+			numDefaults++
+		}
+		a = append(a, disjunct{v.Val, isDef})
+	}
+
+	sort.SliceStable(a, func(i, j int) bool {
+		return !a[j].isDefault && a[i].isDefault != a[j].isDefault
+	})
+
+	n.disjunctions = append(n.disjunctions,
+		envDisjunct{env, a, numDefaults, cloneID, isEmbed})
+}
+
+func (n *nodeContext) addDisjunctionValue(env *adt.Environment, x *adt.Disjunction, cloneID uint32, isEmbed bool) {
+	a := []disjunct{}
+
+	for i, v := range x.Values {
+		a = append(a, disjunct{v, i < x.NumDefaults})
+	}
+
+	n.disjunctions = append(n.disjunctions,
+		envDisjunct{env, a, x.NumDefaults, cloneID, isEmbed})
+}
+
+func (n *nodeContext) updateResult() (isFinal bool) {
+	n.postDisjunct()
+
+	if n.hasErr() {
+		return n.isFinal
+	}
+
+	d := n.nodeShared.disjunct
+	if d == nil {
+		d = &adt.Disjunction{}
+		n.nodeShared.disjunct = d
+	}
+
+	result := *n.node
+	if result.Value == nil {
+		result.Value = n.getValidators()
+	}
+
+	for _, v := range d.Values {
+		if Equal(n.ctx, v, &result) {
+			return isFinal
+		}
+	}
+
+	p := &result
+	d.Values = append(d.Values, p)
+	if n.defaultMode == isDefault {
+		// Keep defaults sorted first.
+		i := d.NumDefaults
+		j := i + 1
+		copy(d.Values[j:], d.Values[i:])
+		d.Values[i] = p
+		d.NumDefaults = j
+	}
+
+	// return n.isFinal
+
+	switch {
+	case !n.nodeShared.hasResult():
+
+	case n.nodeShared.isDefault() && n.defaultMode != isDefault:
+		return n.isFinal
+
+	case !n.nodeShared.isDefault() && n.defaultMode == isDefault:
+
+	default:
+		if Equal(n.ctx, n.node, n.result()) {
+			return n.isFinal
+		}
+
+		// TODO: Compute fancy error message.
+		n.nodeShared.resultNode = n
+		// n.nodeShared.result.AddErr(n.ctx, &adt.Bottom{
+		// 	Code: adt.IncompleteError,
+		// 	Err:  errors.Newf(n.ctx.Pos(), "ambiguous disjunction"),
+		// })
+		n.nodeShared.result_.Arcs = nil
+		n.nodeShared.result_.Structs = nil
+		return n.isFinal // n.defaultMode == isDefault
+	}
+
+	n.nodeShared.resultNode = n
+	n.nodeShared.setResult(n.node)
+
+	return n.isFinal
+}
+
+func (n *nodeContext) tryDisjuncts() (finished bool) {
+	if !n.insertDisjuncts() || !n.updateResult() {
+		if !n.isFinal {
+			return false // More iterations to do.
+		}
+	}
+
+	if n.nodeShared.hasResult() {
+		return true // found something
+	}
+
+	if len(n.disjunctions) > 0 {
+		b := &adt.Bottom{
+			// TODO(errors): we should not make this error worse by discarding
+			// the type or error. Using IncompleteError is a compromise. But
+			// really we should keep track of the errors and return a more
+			// accurate result here.
+			Code: adt.IncompleteError,
+			Err:  errors.Newf(token.NoPos, "empty disjunction"),
+		}
+		n.node.AddErr(n.ctx, b)
+	}
+	return true
+}
+
+// TODO: add proper conjuncts for the ones used by the disjunctions to replace
+// the original source.
+//
+func (n *nodeContext) insertDisjuncts() (inserted bool) {
+	p := 0
+	inserted = true
+
+	disjunctions := []envDisjunct{}
+
+	// fmt.Println("----", debug.NodeString(n.ctx, n.node, nil))
+	for _, d := range n.disjunctions {
+		disjunctions = append(disjunctions, d)
+
+		sub := len(n.disjunctions)
+		defMode, ok := n.insertSingleDisjunct(p, d, false)
+		p++
+		if !ok {
+			inserted = false
+			break
+		}
+
+		subMode := []defaultMode{}
+		for ; sub < len(n.disjunctions); sub++ {
+			d := n.disjunctions[sub]
+			disjunctions = append(disjunctions, d)
+			mode, ok := n.insertSingleDisjunct(p, d, true)
+			p++
+			if !ok {
+				inserted = false
+				break
+			}
+			subMode = append(subMode, mode)
+		}
+		for i := len(subMode) - 1; i >= 0; i-- {
+			defMode = combineSubDefault(defMode, subMode[i])
+		}
+
+		// fmt.Println("RESMODE", defMode, combineDefault(n.defaultMode, defMode))
+
+		n.defaultMode = combineDefault(n.defaultMode, defMode)
+	}
+
+	// Find last disjunction at which there is no overflow.
+	for ; p > 0 && n.stack[p-1]+1 >= len(disjunctions[p-1].values); p-- {
+	}
+	if p > 0 {
+		// Increment a valid position and set all subsequent entries to 0.
+		n.stack[p-1]++
+		n.stack = n.stack[:p]
+	}
+	return inserted
+}
+
+func (n *nodeContext) insertSingleDisjunct(p int, d envDisjunct, isSub bool) (mode defaultMode, ok bool) {
+	if p >= len(n.stack) {
+		n.stack = append(n.stack, 0)
+	}
+
+	k := n.stack[p]
+	v := d.values[k]
+	n.isFinal = n.isFinal && k == len(d.values)-1
+	c := adt.MakeConjunct(d.env, v.expr)
+	n.addExprConjunct(c, d.cloneID, d.isEmbed)
+
+	for n.expandOne() {
+	}
+
+	switch {
+	case d.numDefaults == 0:
+		mode = maybeDefault
+	case v.isDefault:
+		mode = isDefault
+	default:
+		mode = notDefault
+	}
+
+	return mode, !n.hasErr()
+}
+
+// Default rules from spec:
+//
+// U1: (v1, d1) & v2       => (v1&v2, d1&v2)
+// U2: (v1, d1) & (v2, d2) => (v1&v2, d1&d2)
+//
+// D1: (v1, d1) | v2       => (v1|v2, d1)
+// D2: (v1, d1) | (v2, d2) => (v1|v2, d1|d2)
+//
+// M1: *v        => (v, v)
+// M2: *(v1, d1) => (v1, d1)
+// or
+// M2: *(v1, d1) => (v1, v1)
+// or
+// M2: *(v1, d1) => v1 if d1 == _|_
+// M2:              d1 otherwise
+//
+// def + maybe -> def
+// not + maybe -> def
+// not + def   -> def
+
+type defaultMode int
+
+const (
+	maybeDefault defaultMode = iota
+	notDefault
+	isDefault
+)
+
+func combineSubDefault(a, b defaultMode) defaultMode {
+	switch {
+	case a == maybeDefault && b == maybeDefault:
+		return maybeDefault
+	case a == maybeDefault && b == notDefault:
+		return notDefault
+	case a == maybeDefault && b == isDefault:
+		return isDefault
+	case a == notDefault && b == maybeDefault:
+		return notDefault
+	case a == notDefault && b == notDefault:
+		return notDefault
+	case a == notDefault && b == isDefault:
+		return isDefault
+	case a == isDefault && b == maybeDefault:
+		return isDefault
+	case a == isDefault && b == notDefault:
+		return notDefault
+	case a == isDefault && b == isDefault:
+		return isDefault
+	default:
+		panic("unreachable")
+	}
+}
+
+func combineDefault(a, b defaultMode) defaultMode {
+	if a > b {
+		a, b = b, a
+	}
+	switch {
+	case a == maybeDefault && b == maybeDefault:
+		return maybeDefault
+	case a == maybeDefault && b == notDefault:
+		return notDefault
+	case a == maybeDefault && b == isDefault:
+		return isDefault
+	case a == notDefault && b == notDefault:
+		return notDefault
+	case a == notDefault && b == isDefault:
+		return notDefault
+	case a == isDefault && b == isDefault:
+		return isDefault
+	default:
+		panic("unreachable")
+	}
+}
diff --git a/internal/core/eval/equality.go b/internal/core/eval/equality.go
new file mode 100644
index 0000000..cb9f9da
--- /dev/null
+++ b/internal/core/eval/equality.go
@@ -0,0 +1,134 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+import "cuelang.org/go/internal/core/adt"
+
+func Equal(ctx *adt.OpContext, v, w adt.Value) bool {
+	if x, ok := v.(*adt.Vertex); ok {
+		return equalVertex(ctx, x, w)
+	}
+	if y, ok := w.(*adt.Vertex); ok {
+		return equalVertex(ctx, y, v)
+	}
+	return equalTerminal(ctx, v, w)
+}
+
+func equalVertex(ctx *adt.OpContext, x *adt.Vertex, v adt.Value) bool {
+	y, ok := v.(*adt.Vertex)
+	if !ok {
+		return false
+	}
+	if x == y {
+		return true
+	}
+	if len(x.Arcs) != len(y.Arcs) {
+		return false
+	}
+	if len(x.Arcs) == 0 && len(y.Arcs) == 0 {
+		return equalTerminal(ctx, x.Value, y.Value)
+	}
+
+loop1:
+	for _, a := range x.Arcs {
+		for _, b := range y.Arcs {
+			if a.Label == b.Label {
+				if !Equal(ctx, a, b) {
+					return false
+				}
+				continue loop1
+			}
+		}
+		return false
+	}
+
+	// We do not need to do the following check, because of the pigeon-hole principle.
+	// loop2:
+	// 	for _, b := range y.Arcs {
+	// 		for _, a := range x.Arcs {
+	// 			if a.Label == b.Label {
+	// 				continue loop2
+	// 			}
+	// 		}
+	// 		return false
+	// 	}
+
+	return equalTerminal(ctx, x.Value, y.Value)
+}
+
+func equalTerminal(ctx *adt.OpContext, v, w adt.Value) bool {
+	if v == w {
+		return true
+	}
+	switch x := v.(type) {
+	case *adt.Num, *adt.String, *adt.Bool, *adt.Bytes:
+		if b, ok := adt.BinOp(ctx, adt.EqualOp, v, w).(*adt.Bool); ok {
+			return b.B
+		}
+		return false
+
+	// TODO: for the remainder we are dealing with non-concrete values, so we
+	// could also just not bother.
+
+	case *adt.BoundValue:
+		if y, ok := w.(*adt.BoundValue); ok {
+			return x.Op == y.Op && Equal(ctx, x.Value, y.Value)
+		}
+
+	case *adt.BasicType:
+		if y, ok := w.(*adt.BasicType); ok {
+			return x.K == y.K
+		}
+
+	case *adt.Conjunction:
+		y, ok := w.(*adt.Conjunction)
+		if !ok || len(x.Values) != len(y.Values) {
+			return false
+		}
+		// always ordered the same
+		for i, xe := range x.Values {
+			if !Equal(ctx, xe, y.Values[i]) {
+				return false
+			}
+		}
+		return true
+
+	case *adt.Disjunction:
+		// The best way to compute this is with subsumption, but even that won't
+		// be too accurate. Assume structural equivalence for now.
+		y, ok := w.(*adt.Disjunction)
+		if !ok || len(x.Values) != len(y.Values) {
+			return false
+		}
+		for i, xe := range x.Values {
+			if !Equal(ctx, xe, y.Values[i]) {
+				return false
+			}
+		}
+		return true
+
+	case *adt.ListMarker:
+		_, ok := w.(*adt.ListMarker)
+		return ok
+
+	case *adt.StructMarker:
+		_, ok := w.(*adt.StructMarker)
+		return ok
+
+	case *adt.BuiltinValidator:
+	}
+
+	return false
+}
diff --git a/internal/core/eval/eval.go b/internal/core/eval/eval.go
new file mode 100644
index 0000000..28fe99b
--- /dev/null
+++ b/internal/core/eval/eval.go
@@ -0,0 +1,1504 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package eval contains the high level CUE evaluation strategy.
+//
+// CUE allows for a significant amount of freedom in order of evaluation due to
+// the commutativity of the unification operation. This package implements one
+// of the possible strategies.
+package eval
+
+// TODO:
+//   - result should be nodeContext: this allows optionals info to be extracted
+//     and computed.
+//
+
+import (
+	"fmt"
+
+	"cuelang.org/go/cue/ast"
+	"cuelang.org/go/cue/errors"
+	"cuelang.org/go/cue/token"
+	"cuelang.org/go/internal/core/adt"
+	"cuelang.org/go/internal/core/debug"
+)
+
+func Evaluate(r adt.Runtime, v *adt.Vertex) {
+	format := func(n adt.Node) string {
+		return debug.NodeString(r, n, printConfig)
+	}
+	e := New(r)
+	c := adt.New(v, &adt.Config{
+		Runtime: r,
+		Unifier: e,
+		Format:  format,
+	})
+	e.Unify(c, v, adt.Finalized)
+}
+
+func New(r adt.Runtime) *Evaluator {
+	return &Evaluator{r: r, index: r}
+}
+
+// TODO: Note: NewContext takes essentially a cue.Value. By making this
+// type more central, we can perhaps avoid context creation.
+
+func NewContext(r adt.Runtime, v *adt.Vertex) *adt.OpContext {
+	e := New(r)
+	return e.NewContext(v)
+}
+
+var printConfig = &debug.Config{Compact: true}
+
+func (e *Evaluator) NewContext(v *adt.Vertex) *adt.OpContext {
+	format := func(n adt.Node) string {
+		return debug.NodeString(e.r, n, printConfig)
+	}
+	return adt.New(v, &adt.Config{
+		Runtime: e.r,
+		Unifier: e,
+		Format:  format,
+	})
+}
+
+var structSentinel = &adt.StructMarker{}
+
+var incompleteSentinel = &adt.Bottom{
+	Code: adt.IncompleteError,
+	Err:  errors.Newf(token.NoPos, "incomplete"),
+}
+
+type Evaluator struct {
+	r       adt.Runtime
+	index   adt.StringIndexer
+	closeID uint32
+}
+
+func (e *Evaluator) nextID() uint32 {
+	e.closeID++
+	return e.closeID
+}
+
+func (e *Evaluator) Eval(v *adt.Vertex) errors.Error {
+	if v.Value == nil {
+		ctx := adt.NewContext(e.r, e, v)
+		e.Unify(ctx, v, adt.Finalized)
+	}
+
+	// extract error if needed.
+	return nil
+}
+
+// Evaluate is used to evaluate a sub expression while evaluating a Vertex
+// with Unify. It may or may not return the original Vertex. It may also
+// terminate evaluation early if it has enough evidence that a certain value
+// can be the only value in a valid configuration. This means that an error
+// may go undetected at this point, as long as it is caught later.
+//
+func (e *Evaluator) Evaluate(c *adt.OpContext, v *adt.Vertex) adt.Value {
+	var resultValue adt.Value
+	if v.Value == nil {
+		save := *v
+		// Use node itself to allow for cycle detection.
+		s := e.evalVertex(c, v, adt.Partial)
+
+		if d := s.disjunct; d != nil && len(d.Values) > 1 && d.NumDefaults != 1 {
+			v.Value = d
+			v.Arcs = nil
+			v.Structs = nil // TODO: maybe not do this.
+			// The conjuncts will have too much information. Better have no
+			// information than incorrect information.
+			for _, d := range d.Values {
+				d.Conjuncts = nil
+			}
+		}
+
+		resultValue = v.Value
+
+		result := s.result()
+		*v = save
+
+		if result.Value != nil {
+			*v = *result
+			resultValue = result.Value
+		}
+
+		// TODO: this seems unnecessary as long as we have a better way
+		// to handle incomplete, and perhaps referenced. nodes.
+		if c.IsTentative() && isStruct(v) {
+			// TODO(perf): do something more efficient perhaps? This discards
+			// the computed arcs so far. Instead, we could have a separate
+			// marker to accumulate results. As this only happens within
+			// comprehensions, the effect is likely minimal, though.
+			arcs := v.Arcs
+			*v = save
+			return &adt.Vertex{
+				Parent: v.Parent,
+				Value:  &adt.StructMarker{},
+				Arcs:   arcs,
+			}
+		}
+		// *v = save // DO NOT ADD.
+		err, _ := resultValue.(*adt.Bottom)
+		// BEFORE RESTORING, copy the value to return one
+		// with the temporary arcs.
+		if !s.done() && (err == nil || err.IsIncomplete()) {
+			// Clear values afterwards
+			*v = save
+		}
+		if !s.done() && s.hasDisjunction() {
+			return &adt.Bottom{Code: adt.IncompleteError}
+		}
+		if s.hasResult() {
+			if b, _ := v.Value.(*adt.Bottom); b != nil {
+				*v = save
+				return b
+			}
+			// TODO: Only use result when not a cycle.
+			v = result
+		}
+		// TODO: Store if concrete and fully resolved.
+
+	} else {
+		b, _ := v.Value.(*adt.Bottom)
+		if b != nil {
+			return b
+		}
+	}
+
+	switch v.Value.(type) {
+	case nil:
+		// Error saved in result.
+		return resultValue // incomplete
+
+	case *adt.ListMarker, *adt.StructMarker:
+		return v
+
+	default:
+		return v.Value
+	}
+}
+
+// Unify implements adt.Unifier.
+//
+// May not evaluate the entire value, but just enough to be able to compute.
+//
+// Phase one: record everything concrete
+// Phase two: record incomplete
+// Phase three: record cycle.
+func (e *Evaluator) Unify(c *adt.OpContext, v *adt.Vertex, state adt.VertexStatus) {
+	// defer c.PopVertex(c.PushVertex(v))
+
+	if state <= v.Status()+1 {
+		return
+	}
+
+	if x := v.Value; x != nil {
+		// if state == adt.Partial || x == cycle {
+		// 	return
+		// }
+		return
+	}
+
+	n := e.evalVertex(c, v, state)
+
+	switch d := n.disjunct; {
+	case d != nil && len(d.Values) == 1:
+		*v = *(d.Values[0])
+
+	case d != nil && len(d.Values) > 0:
+		v.Value = d
+		v.Arcs = nil
+		v.Structs = nil
+		// The conjuncts will have too much information. Better have no
+		// information than incorrect information.
+		for _, d := range d.Values {
+			d.Conjuncts = nil
+		}
+
+	default:
+		if r := n.result(); r.Value != nil {
+			*v = *r
+		}
+	}
+
+	// Else set it to something.
+
+	if v.Value == nil {
+		panic("errer")
+	}
+
+	// Check whether result is done.
+}
+
+// evalVertex computes the vertex results. The state indicates the minimum
+// status to which this vertex should be evaluated. It should be either
+// adt.Finalized or adt.Partial.
+func (e *Evaluator) evalVertex(c *adt.OpContext, v *adt.Vertex, state adt.VertexStatus) *nodeShared {
+	// fmt.Println(debug.NodeString(c.StringIndexer, v, nil))
+	shared := &nodeShared{
+		ctx:   c,
+		eval:  e,
+		node:  v,
+		stack: nil, // silence linter
+	}
+	saved := *v
+
+	for i := 0; ; i++ {
+
+		// Clear any remaining error.
+		if err := c.Err(); err != nil {
+			panic("uncaught error")
+		}
+
+		// Set the cache to a cycle error to ensure a cyclic reference will result
+		// in an error if applicable. A cyclic error may be ignored for
+		// non-expression references. The cycle error may also be removed as soon
+		// as there is evidence what a correct value must be, but before all
+		// validation has taken place.
+		*v = saved
+		v.Value = cycle
+		v.UpdateStatus(adt.Evaluating)
+
+		// If the result is a struct, it needs to be closed if:
+		//   1) this node introduces a definition
+		//   2) this node is a child of a node that introduces a definition,
+		//      recursively.
+		//   3) this node embeds a closed struct.
+		needClose := v.Label.IsDef()
+
+		n := &nodeContext{
+			kind:       adt.TopKind,
+			nodeShared: shared,
+			needClose:  needClose,
+
+			// These get cleared upon proof to the contrary.
+			// isDefault: true,
+			isFinal: true,
+		}
+
+		closeID := uint32(0)
+
+		for _, x := range v.Conjuncts {
+			closeID := closeID
+			// TODO: needed for reentrancy. Investigate usefulness for cycle
+			// detection.
+			if x.Env != nil && x.Env.CloseID != 0 {
+				closeID = x.Env.CloseID
+			}
+			n.addExprConjunct(x, closeID, true)
+		}
+
+		if i == 0 {
+			// Use maybeSetCache for cycle breaking
+			for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() {
+			}
+			if v.Status() > adt.Evaluating && state <= adt.Partial {
+				// We have found a partial result. There may still be errors
+				// down the line which may result from further evaluating this
+				// field, but that will be caught when evaluating this field
+				// for real.
+				shared.setResult(v)
+				return shared
+			}
+			if !n.done() && len(n.disjunctions) > 0 && isEvaluating(v) {
+				// We disallow entering computations of disjunctions with
+				// incomplete data.
+				b := c.NewErrf("incomplete cause disjunction")
+				b.Code = adt.IncompleteError
+				v.SetValue(n.ctx, adt.Finalized, b)
+				shared.setResult(v)
+				return shared
+			}
+		}
+
+		// Handle disjunctions. If there are no disjunctions, this call is
+		// equivalent to calling n.postDisjunct.
+		if n.tryDisjuncts() {
+			if v.Value == nil {
+				v.Value = n.getValidators()
+			}
+
+			break
+		}
+	}
+
+	return shared
+}
+
+func isStruct(v *adt.Vertex) bool {
+	_, ok := v.Value.(*adt.StructMarker)
+	return ok
+}
+
+func (n *nodeContext) postDisjunct() {
+	ctx := n.ctx
+
+	// Use maybeSetCache for cycle breaking
+	for n.maybeSetCache(); n.expandOne(); n.maybeSetCache() {
+	}
+
+	// TODO: preparation for association lists:
+	// We assume that association types may not be created dynamically for now.
+	// So we add lists
+	n.addLists(ctx)
+
+	switch err := n.getErr(); {
+	case err != nil:
+		n.node.Value = err
+		n.errs = nil
+
+	default:
+		if isEvaluating(n.node) {
+			// TODO: this does not yet validate all values.
+
+			if !n.done() { // && !ctx.IsTentative() {
+				// collect incomplete errors.
+				// 	len(n.ifClauses) == 0 &&
+				// 	len(n.forClauses) == 0 &&
+				var err *adt.Bottom // n.incomplete
+				// err = n.incomplete
+				for _, d := range n.dynamicFields {
+					x, _ := ctx.Concrete(d.env, d.field.Key, "dynamic field")
+					b, _ := x.(*adt.Bottom)
+					err = adt.CombineErrors(nil, err, b)
+				}
+				for _, c := range n.forClauses {
+					f := func(env *adt.Environment, st *adt.StructLit) {}
+					err = adt.CombineErrors(nil, err, ctx.Yield(c.env, c.yield, f))
+				}
+				for _, x := range n.exprs {
+					x, _ := ctx.Evaluate(x.Env, x.Expr())
+					b, _ := x.(*adt.Bottom)
+					err = adt.CombineErrors(nil, err, b)
+				}
+				if err == nil {
+					// safeguard.
+					err = incompleteSentinel
+				}
+				n.node.Value = err
+			} else {
+				n.node.Value = nil
+			}
+		}
+
+		// We are no longer evaluating.
+		n.node.UpdateStatus(adt.Partial)
+
+		// Either set to Conjunction or error.
+		var v adt.Value = n.node.Value
+		kind := n.kind
+		markStruct := false
+		if n.isStruct {
+			if kind != 0 && kind&adt.StructKind == 0 {
+				n.node.Value = &adt.Bottom{
+					Err: errors.Newf(token.NoPos,
+						"conflicting values struct and %s", n.kind),
+				}
+			}
+			markStruct = true
+		} else if len(n.node.Structs) > 0 {
+			markStruct = kind&adt.StructKind != 0 && !n.hasTop
+		}
+		if v == nil && markStruct {
+			kind = adt.StructKind
+			n.node.Value = &adt.StructMarker{}
+			v = n.node
+		}
+		if v != nil && adt.IsConcrete(v) {
+			if n.scalar != nil {
+				kind = n.scalar.Kind()
+			}
+			if v.Kind()&^kind != 0 {
+				p := token.NoPos
+				if src := v.Source(); src != nil {
+					p = src.Pos()
+				}
+				n.addErr(errors.Newf(p,
+					// TODO(err): position of all value types.
+					"conflicting types",
+				))
+			}
+			if n.lowerBound != nil {
+				if b := ctx.Validate(n.lowerBound, v); b != nil {
+					n.addBottom(b)
+				}
+			}
+			if n.upperBound != nil {
+				if b := ctx.Validate(n.upperBound, v); b != nil {
+					n.addBottom(b)
+				}
+			}
+			for _, v := range n.checks {
+				if b := ctx.Validate(v, n.node); b != nil {
+					n.addBottom(b)
+				}
+			}
+
+		} else if !ctx.IsTentative() {
+			n.node.Value = n.getValidators()
+		}
+		// else if v == nil {
+		// 	n.node.Value = incompleteSentinel
+		// }
+
+		if v == nil {
+			break
+		}
+
+		switch {
+		case v.Kind() == adt.ListKind:
+			for _, a := range n.node.Arcs {
+				if a.Label.Typ() == adt.StringLabel {
+					n.addErr(errors.Newf(token.NoPos,
+						// TODO(err): add positions for list and arc definitions.
+						"list may not have regular fields"))
+				}
+			}
+
+			// case !isStruct(n.node) && v.Kind() != adt.BottomKind:
+			// 	for _, a := range n.node.Arcs {
+			// 		if a.Label.IsRegular() {
+			// 			n.addErr(errors.Newf(token.NoPos,
+			// 				// TODO(err): add positions of non-struct values and arcs.
+			// 				"cannot combine scalar values with arcs"))
+			// 		}
+			// 	}
+		}
+	}
+
+	var c *CloseDef
+	if a, _ := n.node.Closed.(*acceptor); a != nil {
+		c = a.tree
+		n.needClose = n.needClose || a.isClosed
+	}
+
+	updated := updateClosed(c, n.replace)
+	if updated == nil && n.needClose {
+		updated = &CloseDef{}
+	}
+
+	// TODO retrieve from env.
+
+	if err := n.getErr(); err != nil {
+		if b, _ := n.node.Value.(*adt.Bottom); b != nil {
+			err = adt.CombineErrors(nil, b, err)
+		}
+		n.node.Value = err
+		// TODO: add return: if evaluation of arcs is important it can be done
+		// later. Logically we're done.
+	}
+
+	m := &acceptor{
+		tree:     updated,
+		fields:   n.optionals,
+		isClosed: n.needClose,
+		openList: n.openList,
+		isList:   n.node.IsList(),
+	}
+	if updated != nil || len(n.optionals) > 0 {
+		n.node.Closed = m
+	}
+
+	// Visit arcs recursively to validate and compute error.
+	for _, a := range n.node.Arcs {
+		if updated != nil {
+			a.Closed = m
+		}
+		if updated != nil && m.isClosed {
+			if err := m.verifyArcAllowed(n.ctx, a.Label); err != nil {
+				n.node.Value = err
+			}
+			// TODO: use continue to not process already failed fields,
+			// or at least don't record recursive error.
+			// continue
+		}
+		// Call UpdateStatus here to be absolutely sure the status is set
+		// correctly and that we are not regressing.
+		n.node.UpdateStatus(adt.EvaluatingArcs)
+		n.eval.Unify(ctx, a, adt.Finalized)
+		if err, _ := a.Value.(*adt.Bottom); err != nil {
+			n.node.AddChildError(err)
+		}
+	}
+
+	n.node.UpdateStatus(adt.Finalized)
+}
+
+// TODO: this is now a sentinel. Use a user-facing error that traces where
+// the cycle originates.
+var cycle = &adt.Bottom{
+	Err:  errors.Newf(token.NoPos, "cycle error"),
+	Code: adt.CycleError,
+}
+
+func isEvaluating(v *adt.Vertex) bool {
+	isCycle := v.Status() == adt.Evaluating
+	if isCycle != (v.Value == cycle) {
+		panic(fmt.Sprintf("cycle data of sync %d vs %#v", v.Status(), v.Value))
+	}
+	return isCycle
+}
+
+type nodeShared struct {
+	eval *Evaluator
+	ctx  *adt.OpContext
+	sub  []*adt.Environment // Environment cache
+	node *adt.Vertex
+
+	// Disjunction handling
+	disjunct   *adt.Disjunction
+	resultNode *nodeContext
+	result_    adt.Vertex
+	stack      []int
+}
+
+func (n *nodeShared) result() *adt.Vertex {
+	return &n.result_
+}
+
+func (n *nodeShared) setResult(v *adt.Vertex) {
+	n.result_ = *v
+}
+
+func (n *nodeShared) hasResult() bool {
+	return n.resultNode != nil //|| n.hasResult_
+	// return n.resultNode != nil || n.hasResult_
+}
+
+func (n *nodeShared) done() bool {
+	// if d := n.disjunct; d == nil || len(n.disjunct.Values) == 0 {
+	// 	return false
+	// }
+	if n.resultNode == nil {
+		return false
+	}
+	return n.resultNode.done()
+}
+
+func (n *nodeShared) hasDisjunction() bool {
+	if n.resultNode == nil {
+		return false
+	}
+	return len(n.resultNode.disjunctions) > 0
+}
+
+func (n *nodeShared) isDefault() bool {
+	if n.resultNode == nil {
+		return false
+	}
+	return n.resultNode.defaultMode == isDefault
+}
+
+// A nodeContext is used to collate all conjuncts of a value to facilitate
+// unification. Conceptually order of unification does not matter. However,
+// order has relevance when performing checks of non-monotic properities. Such
+// checks should only be performed once the full value is known.
+type nodeContext struct {
+	*nodeShared
+
+	// TODO:
+	// filter *adt.Vertex a subset of composite with concrete fields for
+	// bloom-like filtering of disjuncts. We should first verify, however,
+	// whether some breath-first search gives sufficient performance, as this
+	// should already ensure a quick-fail for struct disjunctions with
+	// discriminators.
+
+	// Current value (may be under construction)
+	scalar adt.Value // TODO: use Value in node.
+
+	// Concrete conjuncts
+	kind       adt.Kind
+	lowerBound *adt.BoundValue // > or >=
+	upperBound *adt.BoundValue // < or <=
+	checks     []adt.Validator // BuiltinValidator, other bound values.
+	errs       *adt.Bottom
+	incomplete *adt.Bottom
+
+	// Struct information
+	dynamicFields []envDynamic
+	ifClauses     []envYield
+	forClauses    []envYield
+	optionals     []fieldSet // env + field
+	// NeedClose:
+	// - node starts definition
+	// - embeds a definition
+	// - parent node is closing
+	needClose bool
+	openList  bool
+	isStruct  bool
+	hasTop    bool
+	newClose  *CloseDef
+	// closeID   uint32 // from parent, or if not exist, new if introducing a def.
+	replace map[uint32]*CloseDef
+
+	// Expression conjuncts
+	lists  []envList
+	vLists []*adt.Vertex
+	exprs  []conjunct
+
+	// Disjunction handling
+	disjunctions []envDisjunct
+	defaultMode  defaultMode
+	isFinal      bool
+}
+
+func (n *nodeContext) done() bool {
+	return len(n.dynamicFields) == 0 &&
+		len(n.ifClauses) == 0 &&
+		len(n.forClauses) == 0 &&
+		len(n.exprs) == 0
+}
+
+// hasErr is used to determine if an evaluation path, for instance a single
+// path after expanding all disjunctions, has an error.
+func (n *nodeContext) hasErr() bool {
+	if n.node.ChildErrors != nil {
+		return true
+	}
+	if n.node.Status() > adt.Evaluating && n.node.IsErr() {
+		return true
+	}
+	return n.ctx.HasErr() || n.errs != nil
+}
+
+func (n *nodeContext) getErr() *adt.Bottom {
+	n.errs = adt.CombineErrors(nil, n.errs, n.ctx.Err())
+	return n.errs
+}
+
+// getValidators sets the vertex' Value in case there was no concrete value.
+func (n *nodeContext) getValidators() adt.Value {
+	ctx := n.ctx
+
+	a := []adt.Value{}
+	// if n.node.Value != nil {
+	// 	a = append(a, n.node.Value)
+	// }
+	kind := adt.TopKind
+	if n.lowerBound != nil {
+		a = append(a, n.lowerBound)
+		kind &= n.lowerBound.Kind()
+	}
+	if n.upperBound != nil {
+		a = append(a, n.upperBound)
+		kind &= n.upperBound.Kind()
+	}
+	for _, c := range n.checks {
+		// Drop !=x if x is out of bounds with another bound.
+		if b, _ := c.(*adt.BoundValue); b != nil && b.Op == adt.NotEqualOp {
+			if n.upperBound != nil &&
+				adt.SimplifyBounds(ctx, n.kind, n.upperBound, b) != nil {
+				continue
+			}
+			if n.lowerBound != nil &&
+				adt.SimplifyBounds(ctx, n.kind, n.lowerBound, b) != nil {
+				continue
+			}
+		}
+		a = append(a, c)
+		kind &= c.Kind()
+	}
+	if kind&^n.kind != 0 {
+		a = append(a, &adt.BasicType{K: n.kind})
+	}
+
+	var v adt.Value
+	switch len(a) {
+	case 0:
+		// Src is the combined input.
+		v = &adt.BasicType{K: n.kind}
+
+		// TODO: Change to isStruct?
+		if len(n.node.Structs) > 0 {
+			// n.isStruct = true
+			v = structSentinel
+
+		}
+
+	case 1:
+		v = a[0].(adt.Value)
+
+	default:
+		v = &adt.Conjunction{Values: a}
+	}
+
+	return v
+}
+
+func (n *nodeContext) maybeSetCache() {
+	if n.node.Status() > adt.Evaluating { // n.node.Value != nil
+		return
+	}
+	if n.scalar != nil {
+		n.node.SetValue(n.ctx, adt.Partial, n.scalar)
+	}
+	if n.errs != nil {
+		n.node.SetValue(n.ctx, adt.Partial, n.errs)
+	}
+}
+
+type conjunct struct {
+	adt.Conjunct
+	closeID uint32
+	top     bool
+}
+
+type envDynamic struct {
+	env   *adt.Environment
+	field *adt.DynamicField
+}
+
+type envYield struct {
+	env   *adt.Environment
+	yield adt.Yielder
+}
+
+type envList struct {
+	env     *adt.Environment
+	list    *adt.ListLit
+	n       int64 // recorded length after evaluator
+	elipsis *adt.Ellipsis
+}
+
+func (n *nodeContext) addBottom(b *adt.Bottom) {
+	n.errs = adt.CombineErrors(nil, n.errs, b)
+}
+
+func (n *nodeContext) addErr(err errors.Error) {
+	if err != nil {
+		n.errs = adt.CombineErrors(nil, n.errs, &adt.Bottom{
+			Err: err,
+		})
+	}
+}
+
+// addExprConjuncts will attempt to evaluate an adt.Expr and insert the value
+// into the nodeContext if successful or queue it for later evaluation if it is
+// incomplete or is not value.
+func (n *nodeContext) addExprConjunct(v adt.Conjunct, def uint32, top bool) {
+	env := v.Env
+	if env != nil && env.CloseID != def {
+		e := *env
+		e.CloseID = def
+		env = &e
+	}
+	switch x := v.Expr().(type) {
+	case adt.Value:
+		n.addValueConjunct(env, x)
+
+	case *adt.BinaryExpr:
+		if x.Op == adt.AndOp {
+			n.addExprConjunct(adt.MakeConjunct(env, x.X), def, false)
+			n.addExprConjunct(adt.MakeConjunct(env, x.Y), def, false)
+		} else {
+			n.evalExpr(v, def, top)
+		}
+
+	case *adt.StructLit:
+		n.addStruct(env, x, def, top)
+
+	case *adt.ListLit:
+		n.lists = append(n.lists, envList{env: env, list: x})
+
+	case *adt.DisjunctionExpr:
+		if n.disjunctions != nil {
+			_ = n.disjunctions
+		}
+		n.addDisjunction(env, x, def, top)
+
+	default:
+		// Must be Resolver or Evaluator.
+		n.evalExpr(v, def, top)
+	}
+
+	if top {
+		n.updateReplace(v.Env)
+	}
+}
+
+// evalExpr is only called by addExprConjunct.
+func (n *nodeContext) evalExpr(v adt.Conjunct, closeID uint32, top bool) {
+	// Require an Environment.
+	ctx := n.ctx
+
+	switch x := v.Expr().(type) {
+	case adt.Resolver:
+		arc, err := ctx.Resolve(v.Env, x)
+		if err != nil {
+			if err.IsIncomplete() {
+				n.incomplete = adt.CombineErrors(nil, n.incomplete, err)
+			} else {
+				n.addBottom(err)
+				break
+			}
+		}
+		if arc == nil {
+			n.exprs = append(n.exprs, conjunct{v, closeID, top})
+			break
+		}
+
+		// If this is a cycle error, we have reached a fixed point and adding
+		// conjuncts at this point will not change the value. Also, continuing
+		// to pursue this value will result in an infinite loop.
+		//
+		// TODO: add a mechanism so that the computation will only have to be
+		// one once?
+		if isEvaluating(arc) {
+			break
+		}
+
+		// TODO: detect structural cycles here. A structural cycle can occur
+		// if it is not a reference cycle, but refers to a parent. node.
+		// This should only be allowed if it is unified with a finite structure.
+
+		if arc.Label.IsDef() {
+			n.insertClosed(arc)
+		} else {
+			for _, a := range arc.Conjuncts {
+				n.addExprConjunct(a, closeID, top)
+			}
+		}
+
+	case adt.Evaluator:
+		// adt.Interpolation, adt.UnaryExpr, adt.BinaryExpr, adt.CallExpr
+		val, complete := ctx.Evaluate(v.Env, v.Expr())
+		if !complete {
+			n.exprs = append(n.exprs, conjunct{v, closeID, top})
+			break
+		}
+
+		if v, ok := val.(*adt.Vertex); ok {
+			// Handle generated disjunctions (as in the 'or' builtin).
+			// These come as a Vertex, but should not be added as a value.
+			b, ok := v.Value.(*adt.Bottom)
+			if ok && b.IsIncomplete() && len(v.Conjuncts) > 0 {
+				for _, c := range v.Conjuncts {
+					n.addExprConjunct(c, closeID, top)
+				}
+				break
+			}
+		}
+
+		// TODO: insert in vertex as well
+		n.addValueConjunct(v.Env, val)
+
+	default:
+		panic(fmt.Sprintf("unknown expression of type %T", x))
+	}
+}
+
+func (n *nodeContext) insertClosed(arc *adt.Vertex) {
+	id := n.eval.nextID()
+	n.needClose = true
+
+	current := n.newClose
+	n.newClose = nil
+
+	for _, a := range arc.Conjuncts {
+		n.addExprConjunct(a, id, false)
+	}
+
+	current, n.newClose = n.newClose, current
+
+	if current == nil {
+		current = &CloseDef{ID: id}
+	}
+	n.addAnd(current)
+}
+
+func (n *nodeContext) addValueConjunct(env *adt.Environment, v adt.Value) {
+	if x, ok := v.(*adt.Vertex); ok {
+		needClose := false
+		if isStruct(x) {
+			n.isStruct = true
+			// TODO: find better way to mark as struct.
+			// For instance, we may want to add a faux
+			// Structlit for topological sort.
+			// return
+
+			if x.IsClosed(n.ctx) {
+				needClose = true
+			}
+
+			n.node.AddStructs(x.Structs...)
+		}
+
+		if len(x.Conjuncts) > 0 {
+			if needClose {
+				n.insertClosed(x)
+				return
+			}
+			for _, c := range x.Conjuncts {
+				n.addExprConjunct(c, 0, false) // Pass from eval
+			}
+			return
+		}
+
+		if x.IsList() {
+			n.vLists = append(n.vLists, x)
+			return
+		}
+
+		// TODO: evaluate value?
+		switch v := x.Value.(type) {
+		case *adt.ListMarker:
+			panic("unreachable")
+
+		case *adt.StructMarker:
+			for _, a := range x.Arcs {
+				// TODO, insert here as
+				n.insertField(a.Label, adt.MakeConjunct(nil, a))
+				// sub, _ := n.node.GetArc(a.Label)
+				// sub.Add(a)
+			}
+
+		default:
+			n.addValueConjunct(env, v)
+
+			for _, a := range x.Arcs {
+				// TODO, insert here as
+				n.insertField(a.Label, adt.MakeConjunct(nil, a))
+				// sub, _ := n.node.GetArc(a.Label)
+				// sub.Add(a)
+			}
+		}
+
+		return
+		// TODO: Use the Closer to close other fields as well?
+	}
+
+	if b, ok := v.(*adt.Bottom); ok {
+		n.addBottom(b)
+		return
+	}
+
+	ctx := n.ctx
+	kind := n.kind & v.Kind()
+	if kind == adt.BottomKind {
+		// TODO: how to get other conflicting values?
+		n.addErr(errors.Newf(token.NoPos,
+			"invalid value %s (mismatched types %s and %s)",
+			ctx.Str(v), v.Kind(), n.kind))
+		return
+	}
+	n.kind = kind
+
+	switch x := v.(type) {
+	case *adt.Disjunction:
+		n.addDisjunctionValue(env, x, 0, true)
+
+	case *adt.Conjunction:
+		for _, x := range x.Values {
+			n.addValueConjunct(env, x)
+		}
+
+	case *adt.Top:
+		n.hasTop = true
+		// TODO: Is this correct. Needed for elipsis, but not sure for others.
+		n.optionals = append(n.optionals, fieldSet{env: env, isOpen: true})
+
+	case *adt.BasicType:
+
+	case *adt.BoundValue:
+		switch x.Op {
+		case adt.LessThanOp, adt.LessEqualOp:
+			if y := n.upperBound; y != nil {
+				n.upperBound = nil
+				n.addValueConjunct(env, adt.SimplifyBounds(ctx, n.kind, x, y))
+				return
+			}
+			n.upperBound = x
+
+		case adt.GreaterThanOp, adt.GreaterEqualOp:
+			if y := n.lowerBound; y != nil {
+				n.lowerBound = nil
+				n.addValueConjunct(env, adt.SimplifyBounds(ctx, n.kind, x, y))
+				return
+			}
+			n.lowerBound = x
+
+		case adt.EqualOp, adt.NotEqualOp, adt.MatchOp, adt.NotMatchOp:
+			n.checks = append(n.checks, x)
+			return
+		}
+
+	case adt.Validator:
+		n.checks = append(n.checks, x)
+
+	case *adt.Vertex:
+	// handled above.
+
+	case adt.Value: // *NullLit, *BoolLit, *NumLit, *StringLit, *BytesLit
+		if y := n.scalar; y != nil {
+			if b, ok := adt.BinOp(ctx, adt.EqualOp, x, y).(*adt.Bool); !ok || !b.B {
+				n.addErr(errors.Newf(ctx.Pos(), "incompatible values %s and %s", ctx.Str(x), ctx.Str(y)))
+			}
+			// TODO: do we need to explicitly add again?
+			// n.scalar = nil
+			// n.addValueConjunct(c, adt.BinOp(c, adt.EqualOp, x, y))
+			break
+		}
+		n.scalar = x
+
+	default:
+		panic(fmt.Sprintf("unknown value type %T", x))
+	}
+
+	if n.lowerBound != nil && n.upperBound != nil {
+		if u := adt.SimplifyBounds(ctx, n.kind, n.lowerBound, n.upperBound); u != nil {
+			n.lowerBound = nil
+			n.upperBound = nil
+			n.addValueConjunct(env, u)
+		}
+	}
+}
+
+// addStruct collates the declarations of a struct.
+//
+// addStruct fulfills two additional pivotal functions:
+//   1) Implement vertex unification (this happends through De Bruijn indices
+//      combined with proper set up of Environments).
+//   2) Implied closedness for definitions.
+//
+func (n *nodeContext) addStruct(
+	env *adt.Environment,
+	s *adt.StructLit,
+	newDef uint32,
+	top bool) {
+
+	ctx := n.ctx
+	n.node.AddStructs(s)
+
+	// Inherit closeID from environment, unless this is a new definition.
+	closeID := newDef
+	if closeID == 0 && env != nil {
+		closeID = env.CloseID
+	}
+
+	// NOTE: This is a crucial point in the code:
+	// Unification derferencing happens here. The child nodes are set to
+	// an Environment linked to the current node. Together with the De Bruijn
+	// indices, this determines to which Vertex a reference resolves.
+
+	// TODO(perf): consider using environment cache:
+	// var childEnv *adt.Environment
+	// for _, s := range n.nodeCache.sub {
+	// 	if s.Up == env {
+	// 		childEnv = s
+	// 	}
+	// }
+	childEnv := &adt.Environment{
+		Up:      env,
+		Vertex:  n.node,
+		CloseID: closeID,
+	}
+
+	var hasOther, hasBulk adt.Node
+
+	opt := fieldSet{env: childEnv}
+
+	for _, d := range s.Decls {
+		switch x := d.(type) {
+		case *adt.Field:
+			opt.MarkField(ctx, x)
+			// handle in next iteration.
+
+		case *adt.OptionalField:
+			opt.AddOptional(ctx, x)
+
+		case *adt.DynamicField:
+			hasOther = x
+			n.dynamicFields = append(n.dynamicFields, envDynamic{childEnv, x})
+			opt.AddDynamic(ctx, childEnv, x)
+
+		case *adt.ForClause:
+			hasOther = x
+			n.forClauses = append(n.forClauses, envYield{childEnv, x})
+
+		case adt.Yielder:
+			hasOther = x
+			n.ifClauses = append(n.ifClauses, envYield{childEnv, x})
+
+		case adt.Expr:
+			// push and opo embedding type.
+			id := n.eval.nextID()
+
+			current := n.newClose
+			n.newClose = nil
+
+			hasOther = x
+			n.addExprConjunct(adt.MakeConjunct(childEnv, x), id, false)
+
+			current, n.newClose = n.newClose, current
+
+			if current == nil {
+				current = &CloseDef{ID: id} // TODO: isClosed?
+			} else {
+				// n.needClose = true
+			}
+			n.addOr(closeID, current)
+
+		case *adt.BulkOptionalField:
+			hasBulk = x
+			opt.AddBulk(ctx, x)
+
+		case *adt.Ellipsis:
+			hasBulk = x
+			opt.AddEllipsis(ctx, x)
+
+		default:
+			panic("unreachable")
+		}
+	}
+
+	if hasBulk != nil && hasOther != nil {
+		n.addErr(errors.Newf(token.NoPos, "cannot mix bulk optional fields with dynamic fields, embeddings, or comprehensions within the same struct"))
+	}
+
+	// Apply existing fields
+	for _, arc := range n.node.Arcs {
+		// Reuse adt.Acceptor interface.
+		opt.MatchAndInsert(ctx, arc)
+	}
+
+	n.optionals = append(n.optionals, opt)
+
+	for _, d := range s.Decls {
+		switch x := d.(type) {
+		case *adt.Field:
+			n.insertField(x.Label, adt.MakeConjunct(childEnv, x))
+		}
+	}
+}
+
+func (n *nodeContext) insertField(f adt.Feature, x adt.Conjunct) *adt.Vertex {
+	ctx := n.ctx
+	arc, isNew := n.node.GetArc(f)
+
+	if f.IsString() {
+		n.isStruct = true
+	}
+
+	// TODO: disallow adding conjuncts when cache set?
+	arc.AddConjunct(x)
+
+	if isNew {
+		for _, o := range n.optionals {
+			o.MatchAndInsert(ctx, arc)
+		}
+	}
+	return arc
+}
+
+// expandOne adds dynamic fields to a node until a fixed point is reached.
+// On each iteration, dynamic fields that cannot resolve due to incomplete
+// values are skipped. They will be retried on the next iteration until no
+// progress can be made. Note that a dynamic field may add more dynamic fields.
+//
+// forClauses are processed after all other clauses. A struct may be referenced
+// before it is complete, meaning that fields added by other forms of injection
+// may influence the result of a for clause _after_ it has already been
+// processed. We could instead detect such insertion and feed it to the
+// ForClause to generate another entry or have the for clause be recomputed.
+// This seems to be too complicated and lead to iffy edge cases.
+// TODO(error): detect when a field is added to a struct that is already used
+// in a for clause.
+func (n *nodeContext) expandOne() (done bool) {
+	if n.done() {
+		return false
+	}
+
+	var progress bool
+
+	if progress = n.injectDynamic(); progress {
+		return true
+	}
+
+	if n.ifClauses, progress = n.injectEmbedded(n.ifClauses); progress {
+		return true
+	}
+
+	if n.forClauses, progress = n.injectEmbedded(n.forClauses); progress {
+		return true
+	}
+
+	// Do expressions after comprehensions, as comprehensions can never
+	// refer to embedded scalars, whereas expressions may refer to generated
+	// fields if we were to allow attributes to be defined alongside
+	// scalars.
+	exprs := n.exprs
+	n.exprs = n.exprs[:0]
+	for _, x := range exprs {
+		n.addExprConjunct(x.Conjunct, x.closeID, x.top)
+
+		// collect and and or
+	}
+	if len(n.exprs) < len(exprs) {
+		return true
+	}
+
+	// No progress, report error later if needed: unification with
+	// disjuncts may resolve this later later on.
+	return false
+}
+
+// injectDynamic evaluates and inserts dynamic declarations.
+func (n *nodeContext) injectDynamic() (progress bool) {
+	ctx := n.ctx
+	k := 0
+
+	a := n.dynamicFields
+	for _, d := range n.dynamicFields {
+		var f adt.Feature
+		v, complete := ctx.Evaluate(d.env, d.field.Key)
+		if !complete {
+			a[k] = d
+			k++
+			continue
+		}
+		if b, _ := v.(*adt.Bottom); b != nil {
+			n.addValueConjunct(nil, b)
+			continue
+		}
+		f = ctx.Label(v)
+		n.insertField(f, adt.MakeConjunct(d.env, d.field))
+	}
+
+	progress = k < len(n.dynamicFields)
+
+	n.dynamicFields = a[:k]
+
+	return progress
+}
+
+// injectEmbedded evaluates and inserts embeddings. It first evaluates all
+// embeddings before inserting the results to ensure that the order of
+// evaluation does not matter.
+func (n *nodeContext) injectEmbedded(all []envYield) (a []envYield, progress bool) {
+	ctx := n.ctx
+	type envStruct struct {
+		env *adt.Environment
+		s   *adt.StructLit
+	}
+	var sa []envStruct
+	f := func(env *adt.Environment, st *adt.StructLit) {
+		sa = append(sa, envStruct{env, st})
+	}
+
+	k := 0
+	for _, d := range all {
+		sa = sa[:0]
+
+		if err := ctx.Yield(d.env, d.yield, f); err != nil {
+			if err.IsIncomplete() {
+				all[k] = d
+				k++
+			} else {
+				// continue to collect other errors.
+				n.addBottom(err)
+			}
+			continue
+		}
+
+		for _, st := range sa {
+			n.addStruct(st.env, st.s, 0, true)
+		}
+	}
+
+	return all[:k], k < len(all)
+}
+
+// addLists
+//
+// TODO: association arrays:
+// If an association array marker was present in a struct, create a struct node
+// instead of a list node. In either case, a node may only have list fields
+// or struct fields and not both.
+//
+// addLists should be run after the fixpoint expansion:
+//    - it enforces that comprehensions may not refer to the list itself
+//    - there may be no other fields within the list.
+//
+// TODO(embeddedScalars): for embedded scalars, there should be another pass
+// of evaluation expressions after expanding lists.
+func (n *nodeContext) addLists(c *adt.OpContext) {
+	if len(n.lists) == 0 && len(n.vLists) == 0 {
+		return
+	}
+
+	for _, a := range n.node.Arcs {
+		if t := a.Label.Typ(); t == adt.StringLabel {
+			n.addErr(errors.Newf(token.NoPos, "conflicting types list and struct"))
+		}
+	}
+
+	// fmt.Println(len(n.lists), "ELNE")
+
+	isOpen := true
+	max := 0
+	var maxNode adt.Expr
+
+	for _, l := range n.vLists {
+		elems := l.Elems()
+		isClosed := l.IsClosed(c)
+
+		switch {
+		case len(elems) < max:
+			if isClosed {
+				n.invalidListLength(len(elems), max, l, maxNode)
+				continue
+			}
+
+		case len(elems) > max:
+			if !isOpen {
+				n.invalidListLength(max, len(elems), maxNode, l)
+				continue
+			}
+			isOpen = !isClosed
+			max = len(elems)
+			maxNode = l
+
+		case isClosed:
+			isOpen = false
+			maxNode = l
+		}
+
+		for _, a := range elems {
+			if a.Conjuncts == nil {
+				n.insertField(a.Label, adt.MakeConjunct(nil, a.Value))
+				continue
+			}
+			for _, c := range a.Conjuncts {
+				n.insertField(a.Label, c)
+			}
+		}
+	}
+
+outer:
+	for i, l := range n.lists {
+		index := int64(0)
+		hasComprehension := false
+		for j, elem := range l.list.Elems {
+			switch x := elem.(type) {
+			case adt.Yielder:
+				err := c.Yield(l.env, x, func(e *adt.Environment, st *adt.StructLit) {
+					label, err := adt.MakeLabel(x.Source(), index, adt.IntLabel)
+					n.addErr(err)
+					index++
+					n.insertField(label, adt.MakeConjunct(e, st))
+				})
+				hasComprehension = true
+				if err.IsIncomplete() {
+
+				}
+
+			case *adt.Ellipsis:
+				if j != len(l.list.Elems)-1 {
+					n.addErr(errors.Newf(token.NoPos,
+						"ellipsis must be last element in list"))
+				}
+
+				n.lists[i].elipsis = x
+
+			default:
+				label, err := adt.MakeLabel(x.Source(), index, adt.IntLabel)
+				n.addErr(err)
+				index++ // TODO: don't use insertField.
+				n.insertField(label, adt.MakeConjunct(l.env, x))
+			}
+
+			// Terminate early n case of runaway comprehension.
+			if !isOpen && int(index) > max {
+				n.invalidListLength(max, int(index), maxNode, l.list)
+				continue outer
+			}
+		}
+
+		switch closed := n.lists[i].elipsis == nil; {
+		case int(index) < max:
+			if closed {
+				n.invalidListLength(int(index), max, l.list, maxNode)
+				continue
+			}
+
+		case int(index) > max,
+			closed && isOpen,
+			(!closed == isOpen) && !hasComprehension:
+			max = int(index)
+			maxNode = l.list
+			isOpen = !closed
+		}
+
+		n.lists[i].n = index
+	}
+
+	// add additionalItem values to list and construct optionals.
+	elems := n.node.Elems()
+	for _, l := range n.vLists {
+		a, _ := l.Closed.(*acceptor)
+		if a == nil {
+			continue
+		}
+
+		newElems := l.Elems()
+		if len(newElems) >= len(elems) {
+			continue // error generated earlier, if applicable.
+		}
+
+		n.optionals = append(n.optionals, a.fields...)
+
+		for _, arc := range elems[len(newElems):] {
+			l.MatchAndInsert(c, arc)
+		}
+	}
+
+	for _, l := range n.lists {
+		if l.elipsis == nil {
+			continue
+		}
+
+		f := fieldSet{env: l.env}
+		f.AddEllipsis(c, l.elipsis)
+
+		n.optionals = append(n.optionals, f)
+
+		for _, arc := range elems[l.n:] {
+			f.MatchAndInsert(c, arc)
+		}
+	}
+
+	sources := []ast.Expr{}
+	// Add conjuncts for additional items.
+	for _, l := range n.lists {
+		if l.elipsis == nil {
+			continue
+		}
+		if src, _ := l.elipsis.Source().(ast.Expr); src != nil {
+			sources = append(sources, src)
+		}
+	}
+
+	n.openList = isOpen
+
+	n.node.SetValue(c, adt.Partial, &adt.ListMarker{
+		Src:    ast.NewBinExpr(token.AND, sources...),
+		IsOpen: isOpen,
+	})
+}
+
+func (n *nodeContext) invalidListLength(na, nb int, a, b adt.Expr) {
+	n.addErr(errors.Newf(n.ctx.Pos(),
+		"incompatible list lengths (%d and %d)", na, nb))
+}
diff --git a/internal/core/eval/eval_test.go b/internal/core/eval/eval_test.go
new file mode 100644
index 0000000..9dea9a5
--- /dev/null
+++ b/internal/core/eval/eval_test.go
@@ -0,0 +1,156 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+import (
+	"flag"
+	"fmt"
+	"testing"
+
+	"cuelang.org/go/cue/format"
+	"cuelang.org/go/cue/parser"
+	"cuelang.org/go/internal/core/adt"
+	"cuelang.org/go/internal/core/compile"
+	"cuelang.org/go/internal/core/debug"
+	"cuelang.org/go/internal/core/runtime"
+	"cuelang.org/go/internal/cuetxtar"
+	"cuelang.org/go/pkg/strings"
+)
+
+var (
+	update = flag.Bool("update", false, "update the test files")
+	todo   = flag.Bool("todo", false, "run tests marked with #todo-compile")
+)
+
+func TestEval(t *testing.T) {
+	test := cuetxtar.TxTarTest{
+		Root:   "../../../cue/testdata",
+		Name:   "eval",
+		Update: *update,
+		Skip:   alwaysSkip,
+		ToDo:   needFix,
+	}
+
+	if *todo {
+		test.ToDo = nil
+	}
+
+	r := runtime.New()
+
+	test.Run(t, func(t *cuetxtar.Test) {
+		a := t.ValidInstances()
+
+		v, err := compile.Files(nil, r, a[0].Files...)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		e := Evaluator{
+			r:     r,
+			index: r,
+		}
+
+		err = e.Eval(v)
+		t.WriteErrors(err)
+
+		if v == nil {
+			return
+		}
+
+		debug.WriteNode(t, r, v, &debug.Config{Cwd: t.Dir})
+		fmt.Fprintln(t)
+	})
+}
+
+var alwaysSkip = map[string]string{
+	"compile/erralias": "compile error",
+}
+
+var needFix = map[string]string{
+	"fulleval/048_dont_pass_incomplete_values_to_builtins": "import",
+	"fulleval/050_json_Marshaling_detects_incomplete":      "import",
+	"fulleval/051_detectIncompleteYAML":                    "import",
+	"fulleval/052_detectIncompleteJSON":                    "import",
+	"fulleval/056_issue314":                                "import",
+	"resolve/013_custom_validators":                        "import",
+
+	"export/027": "cycle",
+	"export/028": "cycle",
+	"export/030": "cycle",
+
+	"cycle/025_cannot_resolve_references_that_would_be_ambiguous": "cycle",
+
+	"export/020":                  "builtin",
+	"resolve/034_closing_structs": "builtin",
+	"resolve/048_builtins":        "builtin",
+
+	"fulleval/027_len_of_incomplete_types": "builtin",
+
+	"fulleval/032_or_builtin_should_not_fail_on_non-concrete_empty_list": "builtin",
+
+	"fulleval/049_alias_reuse_in_nested_scope": "builtin",
+	"fulleval/053_issue312":                    "builtin",
+}
+
+// TestX is for debugging. Do not delete.
+func TestX(t *testing.T) {
+	t.Skip()
+	in := `
+	// max: >99 | *((5|*1) & 5)
+	// *( 5 | *_|_ )
+	// 1 | *((5|*1) & 5)
+
+
+	max: >= (num+0) | * (num+0)
+	res: !=4 | * 1
+	num:  *(1+(res+0)) | >(res+0)
+
+    // (1 | *2 | 3) & (1 | 2 | *3)
+
+	// m1: (*1 | (*2 | 3)) & (>=2 & <=3)
+	// m2: (*1 | (*2 | 3)) & (2 | 3)
+	// m3: (*1 | *(*2 | 3)) & (2 | 3)
+	// b: (*"a" | "b") | "c"
+	// {a: 1} | {b: 2}
+	`
+
+	if strings.TrimSpace(in) == "" {
+		t.Skip()
+	}
+
+	file, err := parser.ParseFile("TestX", in)
+	if err != nil {
+		t.Fatal(err)
+	}
+	r := runtime.New()
+
+	b, err := format.Node(file)
+	_, _ = b, err
+	// fmt.Println(string(b), err)
+
+	v, err := compile.Files(nil, r, file)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ctx := NewContext(r, v)
+
+	ctx.Unify(ctx, v, adt.Finalized)
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	t.Error(debug.NodeString(r, v, nil))
+}
diff --git a/internal/core/eval/optionals.go b/internal/core/eval/optionals.go
new file mode 100644
index 0000000..1dbd376
--- /dev/null
+++ b/internal/core/eval/optionals.go
@@ -0,0 +1,236 @@
+// Copyright 2020 CUE Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eval
+
+// TODO: rename this file to fieldset.go
+
+import "cuelang.org/go/internal/core/adt"
+
+// fieldSet represents the fields for a single struct literal, along
+// the constraints of fields that may be added.
+type fieldSet struct {
+	// TODO: look at consecutive identical environments to figure out
+	// what belongs to same definition?
+	env *adt.Environment
+
+	// field marks the optional conjuncts of all explicit fields.
+	// Required fields are marked as empty
+	fields []field
+
+	// literal map[adt.Feature][]adt.Node
+
+	// excluded are all literal fields that already exist.
+	bulk       []bulkField
+	additional []adt.Expr
+	isOpen     bool // has a ...
+}
+
+type field struct {
+	label    adt.Feature
+	optional []adt.Node
+}
+
+type bulkField struct {
+	check fieldMatcher
+	expr  adt.Node // *adt.BulkOptionalField // Conjunct
+}
+
+func (o *fieldSet) Accept(c *adt.OpContext, f adt.Feature) bool {
+	if len(o.additional) > 0 {
+		return true
+	}
+	if o.fieldIndex(f) >= 0 {
+		return true
+	}
+	for _, b := range o.bulk {
+		if b.check.Match(c, f) {
+			return true
+		}
+	}
+	return false
+}
+
+// MatchAndInsert finds matching optional parts for a given Arc and adds its
+// conjuncts. Bulk fields are only applied if no fields match, and additional
+// constraints are only added if neither regular nor bulk fields match.
+func (o *fieldSet) MatchAndInsert(c *adt.OpContext, arc *adt.Vertex) {
+	env := o.env
+
+	// Match normal fields
+	p := 0
+	for ; p < len(o.fields); p++ {
+		if o.fields[p].label == arc.Label {
+			break
+		}
+	}
+	if p < len(o.fields) {
+		for _, e := range o.fields[p].optional {
+			arc.AddConjunct(adt.MakeConjunct(env, e))
+		}
+		return
+	}
+
+	if !arc.Label.IsRegular() {
+		return
+	}
+
+	bulkEnv := *env
+	bulkEnv.DynamicLabel = arc.Label
+
+	// match bulk optional fields / pattern properties
+	matched := false
+	for _, f := range o.bulk {
+		if f.check.Match(c, arc.Label) {
+			matched = true
+			if f.expr != nil {
+				arc.AddConjunct(adt.MakeConjunct(&bulkEnv, f.expr))
+			}
+		}
+	}
+	if matched {
+		return
+	}
+
+	// match others
+	for _, x := range o.additional {
+		arc.AddConjunct(adt.MakeConjunct(env, x))
+	}
+}
+
+func (o *fieldSet) fieldIndex(f adt.Feature) int {
+	for i := range o.fields {
+		if o.fields[i].label == f {
+			return i
+		}
+	}
+	return -1
+}
+
+func (o *fieldSet) MarkField(c *adt.OpContext, x *adt.Field) {
+	if o.fieldIndex(x.Label) < 0 {
+		o.fields = append(o.fields, field{label: x.Label})
+	}
+}
+
+func (o *fieldSet) AddOptional(c *adt.OpContext, x *adt.OptionalField) {
+	p := o.fieldIndex(x.Label)
+	if p < 0 {
+		p = len(o.fields)
+		o.fields = append(o.fields, field{label: x.Label})
+	}
+	o.fields[p].optional = append(o.fields[p].optional, x)
+}
+
+func (o *fieldSet) AddDynamic(c *adt.OpContext, env *adt.Environment, x *adt.DynamicField) {
+	// not in bulk: count as regular field?
+	o.bulk = append(o.bulk, bulkField{dynamicMatcher{env, x.Key}, nil})
+}
+
+func (o *fieldSet) AddBulk(c *adt.OpContext, x *adt.BulkOptionalField) {
+	v, ok := c.Evaluate(o.env, x.Filter)
+	if !ok {
+		// TODO: handle dynamically
+		return
+	}
+	switch f := v.(type) {
+	case *adt.Num:
+		// Just assert an error. Lists have not been expanded yet at
+		// this point, so there is no need to check for existing
+		//fields.
+		l, err := adt.MakeLabel(x.Src, c.Int64(f), adt.IntLabel)
+		if err != nil {
+			c.AddErr(err)
+			return
+		}
+		o.bulk = append(o.bulk, bulkField{labelMatcher(l), x})
+
+	case *adt.Top:
+		o.bulk = append(o.bulk, bulkField{typeMatcher(adt.TopKind), x})
+
+	case *adt.BasicType:
+		o.bulk = append(o.bulk, bulkField{typeMatcher(f.K), x})
+
+	case *adt.String:
+		l := c.Label(f)
+		o.bulk = append(o.bulk, bulkField{labelMatcher(l), x})
+
+	case adt.Validator:
+		o.bulk = append(o.bulk, bulkField{validateMatcher{f}, x})
+
+	default:
+		// TODO(err): not allowed type
+	}
+}
+
+func (o *fieldSet) AddEllipsis(c *adt.OpContext, x *adt.Ellipsis) {
+	expr := x.Value
+	if x.Value == nil {
+		o.isOpen = true
+		expr = &adt.Top{}
+	}
+	o.additional = append(o.additional, expr)
+}
+
+type fieldMatcher interface {
+	Match(c *adt.OpContext, f adt.Feature) bool
+}
+
+type labelMatcher adt.Feature
+
+func (m labelMatcher) Match(c *adt.OpContext, f adt.Feature) bool {
+	return adt.Feature(m) == f
+}
+
+type typeMatcher adt.Kind
+
+func (m typeMatcher) Match(c *adt.OpContext, f adt.Feature) bool {
+	switch f.Typ() {
+	case adt.StringLabel:
+		return adt.Kind(m)&adt.StringKind != 0
+
+	case adt.IntLabel:
+		return adt.Kind(m)&adt.IntKind != 0
+	}
+	return false
+}
+
+type validateMatcher struct {
+	adt.Validator
+}
+
+func (m validateMatcher) Match(c *adt.OpContext, f adt.Feature) bool {
+	v := f.ToValue(c)
+	return c.Validate(m.Validator, v) == nil
+}
+
+type dynamicMatcher struct {
+	env  *adt.Environment
+	expr adt.Expr
+}
+
+func (m dynamicMatcher) Match(c *adt.OpContext, f adt.Feature) bool {
+	if !f.IsRegular() || !f.IsString() {
+		return false
+	}
+	v, ok := c.Evaluate(m.env, m.expr)
+	if !ok {
+		return false
+	}
+	s, ok := v.(*adt.String)
+	if !ok {
+		return false
+	}
+	return f.SelectorString(c) == s.Str
+}