doc/tutorial/kubernetes: update to new syntax

Change-Id: I3f03e5f7a91fc918ec4e946f5cdae214c35828ce
Reviewed-on: https://cue-review.googlesource.com/c/cue/+/3863
Reviewed-by: Marcel van Lohuizen <mpvl@golang.org>
diff --git a/doc/tutorial/basics/2_types/90_templates.txt b/doc/tutorial/basics/2_types/90_templates.txt
index dac0e95..03d201d 100644
--- a/doc/tutorial/basics/2_types/90_templates.txt
+++ b/doc/tutorial/basics/2_types/90_templates.txt
@@ -6,14 +6,18 @@
 description = ""
 
 -- text.md --
-<!-- jba: this is not in the spec, aside from the TemplateLabel grammar rule. -->
+One of CUE's most powerful features is the ability
+to specify optional fields in bulk.
+This allows one to specify constraints,
+or templates,
+to be unified with each field of a struct.
 
-One of CUE's most powerful features is templates.
-A template defines a value to be unified with each field of a struct.
-
-The template's identifier (in angular brackets) is bound to name of each
-of its sibling fields and is visible within the template value
-that is unified with each of the siblings.
+An optional field set is an expression in square brackets
+to specify to which fields to apply a constraint
+(currently only `string`, or all fields is supported).
+Using an alias in the square brackets binds the
+label of the matched field to the given identifier,
+which can then be used within the template.
 
 -- templates.cue --
 // The following struct is unified with all elements in job.
diff --git a/doc/tutorial/kubernetes/README.md b/doc/tutorial/kubernetes/README.md
index 80ca7b0..551d3cd 100644
--- a/doc/tutorial/kubernetes/README.md
+++ b/doc/tutorial/kubernetes/README.md
@@ -145,7 +145,7 @@
 
 apiVersion: "v1"
 kind:       "ConfigMap"
-metadata name: "prometheus"
+metadata: name: "prometheus"
 data: {
     "alert.rules": """
         groups:
@@ -173,10 +173,10 @@
 
 import "encoding/yaml"
 
-configMap prometheus: {
+configMap: prometheus: {
     apiVersion: "v1"
     kind:       "ConfigMap"
-    metadata name: "prometheus"
+    metadata: name: "prometheus"
     data: {
         "alert.rules": yaml.Marshal(_cue_alert_rules)
         _cue_alert_rules: {
@@ -247,7 +247,7 @@
 $ cat <<EOF > kube.cue
 package kube
 
-service <ID>: {
+service: [ID=_]: {
     apiVersion: "v1"
     kind:       "Service"
     metadata: {
@@ -269,7 +269,7 @@
     }
 }
 
-deployment <ID>: {
+deployment: [ID=_]: {
     apiVersion: "extensions/v1beta1"
     kind:       "Deployment"
     metadata name: ID
@@ -290,8 +290,8 @@
 EOF
 ```
 
-By replacing the service and deployment name with `<ID>` we have changed the
-definition into a template.
+By replacing the service and deployment name with `[ID=_]` we have changed the
+definition into a template matching any field.
 CUE bind the field name to `ID` as a result.
 During importing we used `metadata.name` as a key for the object names,
 so we can now set this field to `ID`.
@@ -422,42 +422,42 @@
 ```
 $ cat <<EOF >> kube.cue
 
-daemonSet <ID>: _spec & {
+daemonSet: [ID=_]: _spec & {
     apiVersion: "extensions/v1beta1"
     kind:       "DaemonSet"
     Name ::     ID
 }
 
-statefulSet <ID>: _spec & {
+statefulSet: [ID=_]: _spec & {
     apiVersion: "apps/v1beta1"
     kind:       "StatefulSet"
     Name ::     ID
 }
 
-deployment <ID>: _spec & {
+deployment: [ID=_]: _spec & {
     apiVersion: "extensions/v1beta1"
     kind:       "Deployment"
     Name ::     ID
     spec replicas: *1 | int
 }
 
-configMap <ID>: {
-    metadata name: ID
-    metadata labels component: Component
+configMap: [ID=_]: {
+    metadata: name: ID
+    metadata: labels: component: Component
 }
 
 _spec: {
     Name :: string
 
-    metadata name: Name
-    metadata labels component: Component
-    spec template: {
-        metadata labels: {
+    metadata: name: Name
+    metadata: labels: component: Component
+    spec: template: {
+        metadata: labels: {
             app:       Name
             component: Component
             domain:    "prod"
         }
-        spec containers: [{name: Name}]
+        spec: containers: [{name: Name}]
     }
 }
 EOF
@@ -483,17 +483,17 @@
 
 // Define the _export option and set the default to true
 // for all ports defined in all containers.
-_spec spec template spec containers: [...{
+_spec: spec: template: spec: containers: [...{
     ports: [...{
         _export: *true | false // include the port in the service
     }]
 }]
 
 for x in [deployment, daemonSet, statefulSet] for k, v in x {
-    service "\(k)": {
-        spec selector: v.spec.template.metadata.labels
+    service: "\(k)": {
+        spec: selector: v.spec.template.metadata.labels
 
-        spec ports: [ {
+        spec: ports: [ {
             Port = p.containerPort // Port is an alias
             port:       *Port | int
             targetPort: *Port | int
@@ -536,17 +536,17 @@
 ```
 $ cat <<EOF >> infra/events/kube.cue
 
-deployment events spec template spec containers: [{ ports: [{_export: false}, _] }]
+deployment: events: spec: template: spec: containers: [{ ports: [{_export: false}, _] }]
 EOF
 
 $ cat <<EOF >> infra/tasks/kube.cue
 
-deployment tasks spec template spec containers: [{ ports: [{_export: false}, _] }]
+deployment: tasks: spec: template: spec: containers: [{ ports: [{_export: false}, _] }]
 EOF
 
 $ cat <<EOF >> infra/watcher/kube.cue
 
-deployment watcher spec template spec containers: [{ ports: [{_export: false}, _] }]
+deployment: watcher: spec: template: spec: containers: [{ ports: [{_export: false}, _] }]
 EOF
 ```
 In practice it would be more proper form to add this field in the original
@@ -575,7 +575,7 @@
 $ head frontend/breaddispatcher/kube.cue
 package kube
 
-deployment breaddispatcher: {
+deployment: breaddispatcher: {
     spec: {
         template: {
             metadata: {
@@ -587,8 +587,8 @@
 $ head -7 frontend/breaddispatcher/kube.cue
 package kube
 
-deployment breaddispatcher spec template: {
-    metadata annotations: {
+deployment: breaddispatcher: spec: template: {
+    metadata: annotations: {
         "prometheus.io.scrape": "true"
         "prometheus.io.port":   "7080"
     }
@@ -622,12 +622,12 @@
 ```
 $ cat <<EOF >> frontend/kube.cue
 
-deployment <X> spec template: {
-    metadata annotations: {
+deployment: [string]: spec: template: {
+    metadata: annotations: {
         "prometheus.io.scrape": "true"
         "prometheus.io.port":   "\(spec.containers[0].ports[0].containerPort)"
     }
-    spec containers: [{
+    spec: containers: [{
         ports: [{containerPort: *7080 | int}] // 7080 is the default
     }]
 }
@@ -670,9 +670,9 @@
 ```
 $ cat <<EOF >> kitchen/kube.cue
 
-deployment <Name> spec template: {
-    metadata annotations "prometheus.io.scrape": "true"
-    spec containers: [{
+deployment: [string]: spec: template: {
+    metadata: annotations: "prometheus.io.scrape": "true"
+    spec: containers: [{
         ports: [{
             containerPort: 8080
         }]
@@ -702,18 +702,18 @@
 ```
 $ cat <<EOF >> kitchen/kube.cue
 
-deployment <ID> spec template spec: {
+deployment: [ID=_]: spec: template: spec: {
     hasDisks :: *true | bool
 
     // field comprehension using just "if"
     if hasDisks {
         volumes: [{
             name: *"\(ID)-disk" | string
-            gcePersistentDisk pdName: *"\(ID)-disk" | string
-            gcePersistentDisk fsType: "ext4"
+            gcePersistentDisk: pdName: *"\(ID)-disk" | string
+            gcePersistentDisk: fsType: "ext4"
         }, {
             name: *"secret-\(ID)" | string
-            secret secretName: *"\(ID)-secrets" | string
+            secret: secretName: *"\(ID)-secrets" | string
         }, ...]
 
         containers: [{
@@ -732,7 +732,7 @@
 
 $ cat <<EOF >> kitchen/souschef/kube.cue
 
-deployment souschef spec template spec: {
+deployment: souschef: spec: template: spec: {
     hasDisks :: false
 }
 
@@ -854,11 +854,11 @@
 objects: [ x for v in objectSets for x in v ]
 
 objectSets: [
-    service,
-    deployment,
-    statefulSet,
-    daemonSet,
-    configMap
+	service,
+	deployment,
+	statefulSet,
+	daemonSet,
+	configMap,
 ]
 EOF
 ```
@@ -883,15 +883,15 @@
 	"tool/file"
 )
 
-command ls: {
-	task print: cli.Print & {
+command: ls: {
+	task: print: cli.Print & {
 		text: tabwriter.Write([
 			"\(x.kind)  \t\(x.metadata.labels.component)  \t\(x.metadata.name)"
 			for x in objects
 		])
 	}
 
-	task write: file.Create & {
+	task: write: file.Create & {
 		filename: "foo.txt"
 		contents: task.print.text
 	}
@@ -967,8 +967,8 @@
 	"tool/cli"
 )
 
-command dump: {
-	task print: cli.Print & {
+command: dump: {
+	task: print: cli.Print & {
 		text: yaml.MarshalStream(objects)
 	}
 }
@@ -1001,14 +1001,14 @@
 	"tool/cli"
 )
 
-command create: {
-	task kube: exec.Run & {
+command: create: {
+	task: kube: exec.Run & {
 		cmd:    "kubectl create --dry-run -f -"
 		stdin:  yaml.MarshalStream(objects)
 		stdout: string
 	}
 
-	task display: cli.Print & {
+	task: display: cli.Print & {
 		text: task.kube.stdout
 	}
 }
@@ -1063,10 +1063,10 @@
   apps_v1beta1 "k8s.io/api/apps/v1beta1"
 )
 
-service <Name>: v1.Service
-deployment <Name>: extensions_v1beta1.Deployment
-daemonSet <Name>: extensions_v1beta1.DaemonSet
-statefulSet <Name>: apps_v1beta1.StatefulSet
+service: [string]:     v1.Service
+deployment: [string]:  extensions_v1beta1.Deployment
+daemonSet: [string]:   extensions_v1beta1.DaemonSet
+statefulSet: [string]: apps_v1beta1.StatefulSet
 EOF
 ```
 
@@ -1108,7 +1108,7 @@
 // file cloud.cue
 package cloud
 
-service <Name>: {
+service: [Name=_]: {
     name: *Name | string // the name of the service
 
     ...
@@ -1119,7 +1119,7 @@
     }
 }
 
-deployment <Name>: {
+deployment: [Name=_]: {
     name: *Name | string
    ...
 }
@@ -1149,7 +1149,7 @@
 rather just have a `deployment` allowing different kinds.
 
 ```
-deployment <Name>: _base & {
+deployment: [Name=_]: _base & {
     name:     *Name | string
     ...
 ```
@@ -1162,7 +1162,7 @@
 
 Arguments can be specified as a map.
 ```
-    arg <Key>: string
+    arg: [string]: string
     args: [ "-\(k)=\(v)" for k, v in arg ] | [...string]
 ```
 
@@ -1172,10 +1172,10 @@
 
 ```
     // expose port defines named ports that is exposed in the service
-    expose port <N>: int
+    expose: port: [string]: int
 
     // port defines a named port that is not exposed in the service.
-    port <N>: int
+    port: [string]: int
 ```
 Both maps get defined in the container definition, but only `port` gets
 included in the service definition.
@@ -1188,9 +1188,9 @@
 We define a simple `env` map and an `envSpec` for more elaborate cases:
 
 ```
-    env <Key>: string
+    env: [string]: string
 
-    envSpec <Key>: {}
+    envSpec: [string]: {}
     envSpec: {"\(k)" value: v for k, v in env}
 ```
 The simple map automatically gets mapped into the more elaborate map
@@ -1201,7 +1201,7 @@
 volume spec and volume mount.
 
 ```
-    volume <Name>: {
+    volume: [Name=_]: {
         name:      *Name | string
         mountPath: string
         subPath:   null | string
@@ -1224,13 +1224,15 @@
 
 ```
 // define services implied by deployments
-service "\(k)": {
+service: "\(k)": {
 
     // Copy over all ports exposed from containers.
-    port "\(Name)": {
-        port:       *Port | int
-        targetPort: *Port | int
-    } for Name, Port in spec.expose.port
+    for Name, Port in spec.expose.port {
+        port: "\(Name)": {
+            port:       *Port | int
+            targetPort: *Port | int
+        }
+    }
 
     // Copy over the labels
     label: spec.label
@@ -1256,11 +1258,11 @@
             apiVersion: "v1"
             kind:       "Service"
 
-            metadata name:   x.name
-            metadata labels: x.label
-            spec selector:   x.label
+            metadata: name:   x.name
+            metadata: labels: x.label
+            spec: selector:   x.label
 
-            spec ports: [ p for p in x.port ]
+            spec: ports: [ p for p in x.port ]
         }
     }
 }
diff --git a/doc/tutorial/kubernetes/manual/services/cloud.cue b/doc/tutorial/kubernetes/manual/services/cloud.cue
index 55ffe63..bf86197 100644
--- a/doc/tutorial/kubernetes/manual/services/cloud.cue
+++ b/doc/tutorial/kubernetes/manual/services/cloud.cue
@@ -4,14 +4,14 @@
 _base: {
 	name: string
 
-	label <Key>: string
+	label: [string]: string
 
 	// k8s is a set of Kubernetes-specific settings that will be merged in at
 	// the top-level. The allowed fields are type specfic.
 	kubernetes: {}
 }
 
-deployment <Name>: _base & {
+deployment: [Name=_]: _base & {
 	// Allow any string, but take Name by default.
 	name:     string | *Name
 	kind:     *"deployment" | "stateful" | "daemon"
@@ -20,25 +20,25 @@
 	image: string
 
 	// expose port defines named ports that is exposed in the service
-	expose port <N>: int
+	expose: port: [string]: int
 
 	// port defines named ports that is not exposed in the service.
-	port <N>: int
+	port: [string]: int
 
-	arg <Key>: string
+	arg: [string]: string
 	args: [ "-\(k)=\(v)" for k, v in arg ] | [...string]
 
 	// Environment variables
-	env <Key>: string
+	env: [string]: string
 
-	envSpec <Key>: {}
+	envSpec: [string]: {}
 	envSpec: {
 		for k, v in env {
-			"\(k)" value: v
+			"\(k)": value: v
 		}
 	}
 
-	volume <Name>: {
+	volume: [Name=_]: {
 		name:      string | *Name
 		mountPath: string
 		subPath:   string | *null
@@ -47,10 +47,10 @@
 	}
 }
 
-service <Name>: _base & {
+service: [Name=_]: _base & {
 	name: *Name | string
 
-	port <Name>: {
+	port: [Name=_]: {
 		name: string | *Name
 
 		port:     int
@@ -60,16 +60,16 @@
 	kubernetes: {}
 }
 
-configMap <Name>: {
+configMap: [string]: {
 }
 
 // define services implied by deployments
 for k, spec in deployment if len(spec.expose.port) > 0 {
-	service "\(k)": {
+	service: "\(k)": {
 
 		// Copy over all ports exposed from containers.
 		for Name, Port in spec.expose.port {
-			port "\(Name)": {
+			port: "\(Name)": {
 				// Set default external port to Port. targetPort must be
 				// the respective containerPort (Port) if it differs from port.
 				port: int | *Port
diff --git a/doc/tutorial/kubernetes/manual/services/create_tool.cue b/doc/tutorial/kubernetes/manual/services/create_tool.cue
index 1713fa5..011bdc4 100644
--- a/doc/tutorial/kubernetes/manual/services/create_tool.cue
+++ b/doc/tutorial/kubernetes/manual/services/create_tool.cue
@@ -2,15 +2,15 @@
 
 import "encoding/yaml"
 
-command create: {
-    task kube: {
-        kind:   "exec"
-        cmd:    "kubectl create --dry-run -f -"
-        stdin:  yaml.MarshalStream(objects)
-        stdout: string
-    }
-    task display: {
-        kind: "print"
-        text: task.kube.stdout
-    }
+command: create: {
+	task: kube: {
+		kind:   "exec"
+		cmd:    "kubectl create --dry-run -f -"
+		stdin:  yaml.MarshalStream(objects)
+		stdout: string
+	}
+	task: display: {
+		kind: "print"
+		text: task.kube.stdout
+	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/dump_tool.cue b/doc/tutorial/kubernetes/manual/services/dump_tool.cue
index dd19dc6..573792f 100644
--- a/doc/tutorial/kubernetes/manual/services/dump_tool.cue
+++ b/doc/tutorial/kubernetes/manual/services/dump_tool.cue
@@ -2,9 +2,9 @@
 
 import "encoding/yaml"
 
-command dump: {
-    task print: {
-        kind: "print"
-        text: yaml.MarshalStream(objects)
-    }
+command: dump: {
+	task: print: {
+		kind: "print"
+		text: yaml.MarshalStream(objects)
+	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/bartender/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/bartender/kube.cue
index bb68c15..29bf0ec 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/bartender/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/bartender/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-deployment bartender image: "gcr.io/myproj/bartender:v0.1.34"
+deployment: bartender: image: "gcr.io/myproj/bartender:v0.1.34"
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/breaddispatcher/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/breaddispatcher/kube.cue
index ff580a8..e310e1e 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/breaddispatcher/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/breaddispatcher/kube.cue
@@ -1,7 +1,7 @@
 package kube
 
-deployment breaddispatcher: {
+deployment: breaddispatcher: {
 	image: "gcr.io/myproj/breaddispatcher:v0.3.24"
-	arg etcd:           "etcd:2379"
-	arg "event-server": "events:7788"
+	arg: etcd:           "etcd:2379"
+	arg: "event-server": "events:7788"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/host/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/host/kube.cue
index 6ed536b..4013bd3 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/host/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/host/kube.cue
@@ -1,6 +1,6 @@
 package kube
 
-deployment host: {
+deployment: host: {
 	replicas: 2
 	image:    "gcr.io/myproj/host:v0.1.10"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/kube.cue
index c8d47e8..99bcd22 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/kube.cue
@@ -1,10 +1,10 @@
 package kube
 
-_base label component: "frontend"
+_base: label: component: "frontend"
 
-deployment <Name>: {
-	expose port http: *7080 | int
-	kubernetes spec template metadata annotations: {
+deployment: [string]: {
+	expose: port: http: *7080 | int
+	kubernetes: spec: template: metadata: annotations: {
 		"prometheus.io.scrape": "true"
 		"prometheus.io.port":   "\(expose.port.http)"
 	}
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/maitred/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/maitred/kube.cue
index c1a5467..d0c777c 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/maitred/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/maitred/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-deployment maitred image: "gcr.io/myproj/maitred:v0.0.4"
+deployment: maitred: image: "gcr.io/myproj/maitred:v0.0.4"
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/valeter/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/valeter/kube.cue
index e0cb979..c1a61f3 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/valeter/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/valeter/kube.cue
@@ -1,8 +1,8 @@
 package kube
 
-deployment valeter: {
+deployment: valeter: {
 	image: "gcr.io/myproj/valeter:v0.0.4"
-	arg http: ":8080"
-	arg etcd: "etcd:2379"
-	expose port http: 8080
+	arg: http: ":8080"
+	arg: etcd: "etcd:2379"
+	expose: port: http: 8080
 }
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/waiter/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/waiter/kube.cue
index 947b81b..4e76ab1 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/waiter/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/waiter/kube.cue
@@ -1,6 +1,6 @@
 package kube
 
-deployment waiter: {
+deployment: waiter: {
 	image:    "gcr.io/myproj/waiter:v0.3.0"
 	replicas: 5
 }
diff --git a/doc/tutorial/kubernetes/manual/services/frontend/waterdispatcher/kube.cue b/doc/tutorial/kubernetes/manual/services/frontend/waterdispatcher/kube.cue
index bb6d01b..32df8f9 100644
--- a/doc/tutorial/kubernetes/manual/services/frontend/waterdispatcher/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/frontend/waterdispatcher/kube.cue
@@ -1,7 +1,7 @@
 package kube
 
-deployment waterdispatcher: {
+deployment: waterdispatcher: {
 	image: "gcr.io/myproj/waterdispatcher:v0.0.48"
-	arg http: ":8080"
-	arg etcd: "etcd:2379"
+	arg: http: ":8080"
+	arg: etcd: "etcd:2379"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/infra/download/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/download/kube.cue
index 8654afc..ba33dbd 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/download/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/download/kube.cue
@@ -1,6 +1,6 @@
 package kube
 
-deployment download: {
+deployment: download: {
 	image: "gcr.io/myproj/download:v0.0.2"
-	expose port client: 7080
+	expose: port: client: 7080
 }
diff --git a/doc/tutorial/kubernetes/manual/services/infra/etcd/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/etcd/kube.cue
index 587fb76..55417f7 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/etcd/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/etcd/kube.cue
@@ -1,33 +1,33 @@
 package kube
 
-service etcd kubernetes spec clusterIP: "None"
+service: etcd: kubernetes: spec: clusterIP: "None"
 
-deployment etcd: {
+deployment: etcd: {
 	kind:     "stateful"
 	replicas: 3
 
 	image: "quay.io/coreos/etcd:v3.3.10"
 
-	kubernetes spec template spec containers: [{command: ["/usr/local/bin/etcd"]}]
+	kubernetes: spec: template: spec: containers: [{command: ["/usr/local/bin/etcd"]}]
 
-	arg name:                          "$(NAME)"
-	arg "data-dir":                    "/data/etcd3"
-	arg "initial-advertise-peer-urls": "http://$(IP):2380"
-	arg "listen-peer-urls":            "http://$(IP):2380"
-	arg "listen-client-urls":          "http://$(IP):2379,http://127.0.0.1:2379"
-	arg "advertise-client-urls":       "http://$(IP):2379"
-	arg discovery:                     "https://discovery.etcd.io/xxxxxx"
+	arg: name:                          "$(NAME)"
+	arg: "data-dir":                    "/data/etcd3"
+	arg: "initial-advertise-peer-urls": "http://$(IP):2380"
+	arg: "listen-peer-urls":            "http://$(IP):2380"
+	arg: "listen-client-urls":          "http://$(IP):2379,http://127.0.0.1:2379"
+	arg: "advertise-client-urls":       "http://$(IP):2379"
+	arg: discovery:                     "https://discovery.etcd.io/xxxxxx"
 
-	env ETCDCTL_API:                    "3"
-	env ETCD_AUTO_COMPACTION_RETENTION: "4"
+	env: ETCDCTL_API:                    "3"
+	env: ETCD_AUTO_COMPACTION_RETENTION: "4"
 
-	envSpec NAME valueFrom fieldRef fieldPath: "metadata.name"
-	envSpec IP valueFrom fieldRef fieldPath:   "status.podIP"
+	envSpec: NAME: valueFrom: fieldRef: fieldPath: "metadata.name"
+	envSpec: IP: valueFrom: fieldRef: fieldPath:   "status.podIP"
 
-	expose port client: 2379
-	expose port peer:   2380
+	expose: port: client: 2379
+	expose: port: peer:   2380
 
-	kubernetes spec template spec containers: [{
+	kubernetes: spec: template: spec: containers: [{
 		volumeMounts: [{
 			name:      "etcd3"
 			mountPath: "/data"
@@ -41,24 +41,24 @@
 		}
 	}]
 
-	kubernetes spec: {
+	kubernetes: spec: {
 		volumeClaimTemplates: [{
 			metadata: {
 				name: "etcd3"
-				annotations "volume.alpha.kubernetes.io/storage-class": "default"
+				annotations: "volume.alpha.kubernetes.io/storage-class": "default"
 			}
 			spec: {
 				accessModes: ["ReadWriteOnce"]
-				resources requests storage: "10Gi"
+				resources: requests: storage: "10Gi"
 			}
 		}]
 
 		serviceName: "etcd"
-		template metadata annotations "prometheus.io.port":   "2379"
-		template metadata annotations "prometheus.io.scrape": "true"
-		template spec affinity: {
-			podAntiAffinity requiredDuringSchedulingIgnoredDuringExecution: [{
-				labelSelector matchExpressions: [{
+		template: metadata: annotations: "prometheus.io.port":   "2379"
+		template: metadata: annotations: "prometheus.io.scrape": "true"
+		template: spec: affinity: {
+			podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: [{
+				labelSelector: matchExpressions: [{
 					key:      "app"
 					operator: "In"
 					values: ["etcd"]
@@ -66,6 +66,6 @@
 				topologyKey: "kubernetes.io/hostname"
 			}]
 		}
-		template spec terminationGracePeriodSeconds: 10
+		template: spec: terminationGracePeriodSeconds: 10
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/infra/events/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/events/kube.cue
index 8f7ea21..1ddce39 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/events/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/events/kube.cue
@@ -1,28 +1,28 @@
 package kube
 
-deployment events: {
+deployment: events: {
 	replicas: 2
 	image:    "gcr.io/myproj/events:v0.1.31"
 
-	arg cert: "/etc/ssl/server.pem"
-	arg key:  "/etc/ssl/server.key"
-	arg grpc: ":7788"
+	arg: cert: "/etc/ssl/server.pem"
+	arg: key:  "/etc/ssl/server.key"
+	arg: grpc: ":7788"
 
-	port http: 7080
-	expose port grpc: 7788
+	port: http: 7080
+	expose: port: grpc: 7788
 
-	volume "secret-volume": {
+	volume: "secret-volume": {
 		mountPath: "/etc/ssl"
-		spec secret secretName: "biz-secrets"
+		spec: secret: secretName: "biz-secrets"
 	}
 
-	kubernetes spec template metadata annotations: {
+	kubernetes: spec: template: metadata: annotations: {
 		"prometheus.io.port":   "7080"
 		"prometheus.io.scrape": "true"
 	}
 
-	kubernetes spec template spec affinity podAntiAffinity requiredDuringSchedulingIgnoredDuringExecution: [{
-		labelSelector matchExpressions: [{
+	kubernetes: spec: template: spec: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: [{
+		labelSelector: matchExpressions: [{
 			key:      "app"
 			operator: "In"
 			values: ["events"]
diff --git a/doc/tutorial/kubernetes/manual/services/infra/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/kube.cue
index 52a75ce..43b3344 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-_base label component: "infra"
+_base: label: component: "infra"
diff --git a/doc/tutorial/kubernetes/manual/services/infra/tasks/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/tasks/kube.cue
index e579cd9..fabd7d8 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/tasks/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/tasks/kube.cue
@@ -1,17 +1,17 @@
 package kube
 
-deployment tasks: {
+deployment: tasks: {
 	image: "gcr.io/myproj/tasks:v0.2.6"
 
-	port http: 7080
-	expose port https: 7443
+	port: http: 7080
+	expose: port: https: 7443
 
-	volume "secret-volume": {
+	volume: "secret-volume": {
 		mountPath: "/etc/ssl"
-		spec secret secretName: "star-example-com-secrets"
+		spec: secret: secretName: "star-example-com-secrets"
 	}
 
-	kubernetes spec template metadata annotations: {
+	kubernetes: spec: template: metadata: annotations: {
 		"prometheus.io.port":   "7080"
 		"prometheus.io.scrape": "true"
 	}
diff --git a/doc/tutorial/kubernetes/manual/services/infra/tasks/service.cue b/doc/tutorial/kubernetes/manual/services/infra/tasks/service.cue
index defd546..3a1050f 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/tasks/service.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/tasks/service.cue
@@ -1,12 +1,12 @@
 package kube
 
-service tasks: {
-	port https: {
+service: tasks: {
+	port: https: {
 		port:       443
 		targetPort: 7443
 		protocol:   "TCP"
 	}
-	kubernetes spec: {
+	kubernetes: spec: {
 		type:           "LoadBalancer"
 		loadBalancerIP: "1.2.3.4"
 	}
diff --git a/doc/tutorial/kubernetes/manual/services/infra/updater/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/updater/kube.cue
index b7e285b..7ca1ad4 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/updater/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/updater/kube.cue
@@ -1,12 +1,12 @@
 package kube
 
-deployment updater: {
+deployment: updater: {
 	image: "gcr.io/myproj/updater:v0.1.0"
 	args: ["-key=/etc/certs/updater.pem"]
 
-	expose port http: 8080
-	volume "secret-updater": {
+	expose: port: http: 8080
+	volume: "secret-updater": {
 		mountPath: "/etc/certs"
-		spec secret secretName: "updater-secrets"
+		spec: secret: secretName: "updater-secrets"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/infra/watcher/kube.cue b/doc/tutorial/kubernetes/manual/services/infra/watcher/kube.cue
index b359d56..d76df18 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/watcher/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/watcher/kube.cue
@@ -1,12 +1,12 @@
 package kube
 
-deployment watcher: {
+deployment: watcher: {
 	image: "gcr.io/myproj/watcher:v0.1.0"
 
-	volume "secret-volume": {
+	volume: "secret-volume": {
 		mountPath: "/etc/ssl"
-		spec secret secretName: "star-example-com-secrets"
+		spec: secret: secretName: "star-example-com-secrets"
 	}
-	port http: 7080
-	expose port https: 7788
+	port: http: 7080
+	expose: port: https: 7788
 }
diff --git a/doc/tutorial/kubernetes/manual/services/infra/watcher/service.cue b/doc/tutorial/kubernetes/manual/services/infra/watcher/service.cue
index 76a7250..f09ab72 100644
--- a/doc/tutorial/kubernetes/manual/services/infra/watcher/service.cue
+++ b/doc/tutorial/kubernetes/manual/services/infra/watcher/service.cue
@@ -1,11 +1,11 @@
 package kube
 
-service watcher: {
-	kubernetes spec: {
+service: watcher: {
+	kubernetes: spec: {
 		type:           "LoadBalancer"
 		loadBalancerIP: "1.2.3.4" // static ip
 	}
-	ports https: {
+	ports: https: {
 		port:       7788
 		targetPort: 7788
 	}
diff --git a/doc/tutorial/kubernetes/manual/services/k8s.cue b/doc/tutorial/kubernetes/manual/services/k8s.cue
index 74e7228..cefab64 100644
--- a/doc/tutorial/kubernetes/manual/services/k8s.cue
+++ b/doc/tutorial/kubernetes/manual/services/k8s.cue
@@ -1,16 +1,16 @@
 package kube
 
-kubernetes services: {
+kubernetes: services: {
 	for k, x in service {
 		"\(k)": x.kubernetes & {
 			apiVersion: "v1"
 			kind:       "Service"
 
-			metadata name:   x.name
-			metadata labels: x.label
-			spec selector:   x.label
+			metadata: name:   x.name
+			metadata: labels: x.label
+			spec: selector:   x.label
 
-			spec ports: [ p for p in x.port ] // convert struct to list
+			spec: ports: [ p for p in x.port ] // convert struct to list
 		}
 	}
 	// Note that we cannot write
@@ -26,27 +26,27 @@
 // deployments: _k8sSpec(X: x) for x in deployment
 // This would look nicer and would allow for superior type checking.
 
-kubernetes deployments: {
+kubernetes: deployments: {
 	for k, x in deployment if x.kind == "deployment" {
 		"\(k)": (_k8sSpec & {X: x}).X.kubernetes & {
 			apiVersion: "extensions/v1beta1"
 			kind:       "Deployment"
-			spec replicas: x.replicas
+			spec: replicas: x.replicas
 		}
 	}
 }
 
-kubernetes statefulSets: {
+kubernetes: statefulSets: {
 	for k, x in deployment if x.kind == "stateful" {
 		"\(k)": (_k8sSpec & {X: x}).X.kubernetes & {
 			apiVersion: "apps/v1beta1"
 			kind:       "StatefulSet"
-			spec replicas: x.replicas
+			spec: replicas: x.replicas
 		}
 	}
 }
 
-kubernetes daemonSets: {
+kubernetes: daemonSets: {
 	for k, x in deployment if x.kind == "daemon" {
 		"\(k)": (_k8sSpec & {X: x}).X.kubernetes & {
 			apiVersion: "extensions/v1beta1"
@@ -55,14 +55,14 @@
 	}
 }
 
-kubernetes configMaps: {
+kubernetes: configMaps: {
 	for k, v in configMap {
 		"\(k)": {
 			apiVersion: "v1"
 			kind:       "ConfigMap"
 
-			metadata name: k
-			metadata labels component: _base.label.component
+			metadata: name: k
+			metadata: labels: component: _base.label.component
 			data: v
 		}
 	}
@@ -72,14 +72,14 @@
 // Unify the deployment at X and read out kubernetes to obtain
 // the conversion.
 // TODO: use alias
-_k8sSpec X kubernetes: {
-	metadata name: X.name
-	metadata labels component: X.label.component
+_k8sSpec: X: kubernetes: {
+	metadata: name: X.name
+	metadata: labels: component: X.label.component
 
-	spec template: {
-		metadata labels: X.label
+	spec: template: {
+		metadata: labels: X.label
 
-		spec containers: [{
+		spec: containers: [{
 			name:  X.name
 			image: X.image
 			args:  X.args
@@ -95,7 +95,7 @@
 	}
 
 	// Volumes
-	spec template spec: {
+	spec: template: spec: {
 		if len(X.volume) > 0 {
 			volumes: [
 					v.kubernetes & {name: v.name} for v in X.volume
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/caller/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/caller/kube.cue
index 09a8906..8ac92ba 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/caller/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/caller/kube.cue
@@ -1,22 +1,22 @@
 package kube
 
-deployment caller: _kitchenDeployment & {
+deployment: caller: _kitchenDeployment & {
 	replicas: 3
 	image:    "gcr.io/myproj/caller:v0.20.14"
 
-	arg key:  "/etc/certs/client.key"
-	arg cert: "/etc/certs/client.pem"
-	arg ca:   "/etc/certs/servfx.ca"
+	arg: key:  "/etc/certs/client.key"
+	arg: cert: "/etc/certs/client.pem"
+	arg: ca:   "/etc/certs/servfx.ca"
 
-	arg "ssh-tunnel-key": "/sslcerts/tunnel-private.pem"
+	arg: "ssh-tunnel-key": "/sslcerts/tunnel-private.pem"
 
-	volume "caller-disk": {
+	volume: "caller-disk": {
 		name: "ssd-caller"
 	}
 
-	volume "secret-ssh-key": {
+	volume: "secret-ssh-key": {
 		mountPath: "/sslcerts"
 		readOnly:  true
-		spec secret secretName: "secrets"
+		spec: secret: secretName: "secrets"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/dishwasher/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/dishwasher/kube.cue
index 8308b3c..5f6b97e 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/dishwasher/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/dishwasher/kube.cue
@@ -1,12 +1,12 @@
 package kube
 
-deployment dishwasher: _kitchenDeployment & {
+deployment: dishwasher: _kitchenDeployment & {
 	replicas: 5
 	image:    "gcr.io/myproj/dishwasher:v0.2.13"
-	arg "ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
-	volume "secret-ssh-key": {
+	arg: "ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
+	volume: "secret-ssh-key": {
 		mountPath: "/sslcerts"
 		readOnly:  true
-		spec secret secretName: "secrets"
+		spec: secret: secretName: "secrets"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/expiditer/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/expiditer/kube.cue
index 2c47da4..5bedf09 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/expiditer/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/expiditer/kube.cue
@@ -1,6 +1,6 @@
 package kube
 
-deployment expiditer: _kitchenDeployment & {
+deployment: expiditer: _kitchenDeployment & {
 	image: "gcr.io/myproj/expiditer:v0.5.34"
-	arg "ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
+	arg: "ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/headchef/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/headchef/kube.cue
index 61ce60d..0f9c00e 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/headchef/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/headchef/kube.cue
@@ -1,6 +1,6 @@
 package kube
 
-deployment headchef: _kitchenDeployment & {
+deployment: headchef: _kitchenDeployment & {
 	image: "gcr.io/myproj/headchef:v0.2.16"
-	volume "secret-headchef" mountPath: "/sslcerts"
+	volume: "secret-headchef": mountPath: "/sslcerts"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/kube.cue
index 40344c0..12d62bf 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/kube.cue
@@ -1,13 +1,13 @@
 package kube
 
-_base label component: "kitchen"
+_base: label: component: "kitchen"
 
-deployment <Name>: {
-	expose port client: 8080
+deployment: [string]: {
+	expose: port: client: 8080
 
-	kubernetes spec template metadata annotations "prometheus.io.scrape": "true"
+	kubernetes: spec: template: metadata: annotations: "prometheus.io.scrape": "true"
 
-	kubernetes spec template spec containers: [{
+	kubernetes: spec: template: spec: containers: [{
 		livenessProbe: {
 			httpGet: {
 				path: "/debug/health"
@@ -23,23 +23,23 @@
 _kitchenDeployment: {
 	name: string
 
-	arg env:            "prod"
-	arg logdir:         "/logs"
-	arg "event-server": "events:7788"
+	arg: env:            "prod"
+	arg: logdir:         "/logs"
+	arg: "event-server": "events:7788"
 
 	// Volumes
-	volume "\(name)-disk": {
+	volume: "\(name)-disk": {
 		name:      string
 		mountPath: *"/logs" | string
-		spec gcePersistentDisk: {
+		spec: gcePersistentDisk: {
 			pdName: *name | string
 			fsType: "ext4"
 		}
 	}
 
-	volume "secret-\(name)": {
+	volume: "secret-\(name)": {
 		mountPath: *"/etc/certs" | string
 		readOnly:  true
-		spec secret secretName: *"\(name)-secrets" | string
+		spec: secret: secretName: *"\(name)-secrets" | string
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/linecook/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/linecook/kube.cue
index 233fe9e..28189a4 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/linecook/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/linecook/kube.cue
@@ -1,11 +1,11 @@
 package kube
 
-deployment linecook: _kitchenDeployment & {
+deployment: linecook: _kitchenDeployment & {
 	image: "gcr.io/myproj/linecook:v0.1.42"
-	volume "secret-linecook" name: "secret-kitchen"
+	volume: "secret-linecook": name: "secret-kitchen"
 
-	arg name:                "linecook"
-	arg etcd:                "etcd:2379"
-	arg "reconnect-delay":   "1h"
-	arg "-recovery-overlap": "100000"
+	arg: name:                "linecook"
+	arg: etcd:                "etcd:2379"
+	arg: "reconnect-delay":   "1h"
+	arg: "-recovery-overlap": "100000"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/pastrychef/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/pastrychef/kube.cue
index 6af3f1a..4587705 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/pastrychef/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/pastrychef/kube.cue
@@ -1,15 +1,15 @@
 package kube
 
-deployment pastrychef: _kitchenDeployment & {
+deployment: pastrychef: _kitchenDeployment & {
 	image: "gcr.io/myproj/pastrychef:v0.1.15"
 
-	volume "secret-pastrychef": {
+	volume: "secret-pastrychef": {
 		name: "secret-ssh-key"
-		spec secret secretName: "secrets"
+		spec: secret: secretName: "secrets"
 	}
 
-	arg "ssh-tunnel-key":   "/etc/certs/tunnel-private.pem"
-	arg "reconnect-delay":  "1m"
-	arg etcd:               "etcd:2379"
-	arg "recovery-overlap": "10000"
+	arg: "ssh-tunnel-key":   "/etc/certs/tunnel-private.pem"
+	arg: "reconnect-delay":  "1m"
+	arg: etcd:               "etcd:2379"
+	arg: "recovery-overlap": "10000"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/kitchen/souschef/kube.cue b/doc/tutorial/kubernetes/manual/services/kitchen/souschef/kube.cue
index c6e6c0a..21974d6 100644
--- a/doc/tutorial/kubernetes/manual/services/kitchen/souschef/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/kitchen/souschef/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-deployment souschef image: "gcr.io/myproj/souschef:v0.5.3"
+deployment: souschef: image: "gcr.io/myproj/souschef:v0.5.3"
diff --git a/doc/tutorial/kubernetes/manual/services/ls_tool.cue b/doc/tutorial/kubernetes/manual/services/ls_tool.cue
index 0bd4499..d82e2af 100644
--- a/doc/tutorial/kubernetes/manual/services/ls_tool.cue
+++ b/doc/tutorial/kubernetes/manual/services/ls_tool.cue
@@ -2,12 +2,12 @@
 
 import "strings"
 
-command ls: {
-    task print: {
-        kind: "print"
-        Lines = [
-            "\(x.kind)  \t\(x.metadata.labels.component)   \t\(x.metadata.name)"
-            for x in objects ]
-        text: strings.Join(Lines, "\n")
-    }
+command: ls: {
+	task: print: {
+		kind: "print"
+		Lines = [
+			"\(x.kind)  \t\(x.metadata.labels.component)   \t\(x.metadata.name)"
+			for x in objects ]
+		text: strings.Join(Lines, "\n")
+	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/mon/alertmanager/configmap.cue b/doc/tutorial/kubernetes/manual/services/mon/alertmanager/configmap.cue
index 067a636..c834813 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/alertmanager/configmap.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/alertmanager/configmap.cue
@@ -2,7 +2,7 @@
 
 import "encoding/yaml"
 
-configMap alertmanager: {
+configMap: alertmanager: {
 	"alerts.yaml": yaml.Marshal(alerts_yaml)
 	alerts_yaml = {
 		receivers: [{
diff --git a/doc/tutorial/kubernetes/manual/services/mon/alertmanager/kube.cue b/doc/tutorial/kubernetes/manual/services/mon/alertmanager/kube.cue
index ccb7c17..c3b6f8a 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/alertmanager/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/alertmanager/kube.cue
@@ -1,18 +1,18 @@
 package kube
 
-service alertmanager: {
-	label name: "alertmanager"
+service: alertmanager: {
+	label: name: "alertmanager"
 
-	port alertmanager name: "main"
+	port: alertmanager: name: "main"
 
-	kubernetes metadata: {
-		annotations "prometheus.io/scrape": "true"
-		annotations "prometheus.io/path":   "/metrics"
+	kubernetes: metadata: {
+		annotations: "prometheus.io/scrape": "true"
+		annotations: "prometheus.io/path":   "/metrics"
 	}
 }
 
-deployment alertmanager: {
-	kubernetes spec selector matchLabels app: "alertmanager"
+deployment: alertmanager: {
+	kubernetes: spec: selector: matchLabels: app: "alertmanager"
 
 	image: "prom/alertmanager:v0.15.2"
 
@@ -24,14 +24,14 @@
 
 	// XXX: adding another label cause an error at the wrong position:
 	// expose port alertmanager configMap
-	expose port alertmanager: 9093
+	expose: port: alertmanager: 9093
 
-	volume "config-volume": {
+	volume: "config-volume": {
 		mountPath: "/etc/alertmanager"
-		spec configMap name: "alertmanager"
+		spec: configMap: name: "alertmanager"
 	}
-	volume alertmanager: {
+	volume: alertmanager: {
 		mountPath: "/alertmanager"
-		spec emptyDir: {}
+		spec: emptyDir: {}
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/mon/grafana/kube.cue b/doc/tutorial/kubernetes/manual/services/mon/grafana/kube.cue
index b188792..42ae606 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/grafana/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/grafana/kube.cue
@@ -1,14 +1,14 @@
 package kube
 
-deployment grafana: {
+deployment: grafana: {
 	image: "grafana/grafana:4.5.2"
 
-	expose port grafana: 3000
-	port web: 8080
+	expose: port: grafana: 3000
+	port: web: 8080
 
-	volume "grafana-volume": {
+	volume: "grafana-volume": {
 		mountPath: "/var/lib/grafana"
-		spec gcePersistentDisk: {
+		spec: gcePersistentDisk: {
 			pdName: "grafana-volume"
 			fsType: "ext4"
 		}
@@ -19,17 +19,17 @@
 	// the kubernetes api-server proxy. On production clusters, we recommend
 	// removing these env variables, setup auth for grafana, and expose the grafana
 	// service using a LoadBalancer or a public IP.
-	env GF_AUTH_BASIC_ENABLED:      "false"
-	env GF_AUTH_ANONYMOUS_ENABLED:  "true"
-	env GF_AUTH_ANONYMOUS_ORG_ROLE: "admin"
+	env: GF_AUTH_BASIC_ENABLED:      "false"
+	env: GF_AUTH_ANONYMOUS_ENABLED:  "true"
+	env: GF_AUTH_ANONYMOUS_ORG_ROLE: "admin"
 
-	kubernetes spec template spec containers: [{
+	kubernetes: spec: template: spec: containers: [{
 		// keep request = limit to keep this container in guaranteed class
-		resources limits: {
+		resources: limits: {
 			cpu:    "100m"
 			memory: "100Mi"
 		}
-		resources requests: {
+		resources: requests: {
 			cpu:    "100m"
 			memory: "100Mi"
 		}
diff --git a/doc/tutorial/kubernetes/manual/services/mon/kube.cue b/doc/tutorial/kubernetes/manual/services/mon/kube.cue
index 4932e10..343a16f 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-_base label component: "mon"
+_base: label: component: "mon"
diff --git a/doc/tutorial/kubernetes/manual/services/mon/nodeexporter/kube.cue b/doc/tutorial/kubernetes/manual/services/mon/nodeexporter/kube.cue
index 2c0f9a9..4b7019d 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/nodeexporter/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/nodeexporter/kube.cue
@@ -1,43 +1,43 @@
 package kube
 
-service "node-exporter": {
-	port scrape name: "metrics"
+service: "node-exporter": {
+	port: scrape: name: "metrics"
 
-	kubernetes metadata annotations "prometheus.io/scrape": "true"
-	kubernetes spec type:      "ClusterIP"
-	kubernetes spec clusterIP: "None"
+	kubernetes: metadata: annotations: "prometheus.io/scrape": "true"
+	kubernetes: spec: type:      "ClusterIP"
+	kubernetes: spec: clusterIP: "None"
 }
 
-deployment "node-exporter": {
+deployment: "node-exporter": {
 	kind: "daemon"
 
 	image: "quay.io/prometheus/node-exporter:v0.16.0"
 
-	expose port scrape: 9100
+	expose: port: scrape: 9100
 	args: ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"]
 
-	volume proc: {
+	volume: proc: {
 		mountPath: "/host/proc"
 		readOnly:  true
-		spec hostPath path: "/proc"
+		spec: hostPath: path: "/proc"
 	}
-	volume sys: {
+	volume: sys: {
 		mountPath: "/host/sys"
 		readOnly:  true
-		spec hostPath path: "/sys"
+		spec: hostPath: path: "/sys"
 	}
 
-	kubernetes spec template spec: {
+	kubernetes: spec: template: spec: {
 		hostNetwork: true
 		hostPID:     true
 
 		containers: [{
 			ports: [{hostPort: 9100}]
-			resources requests: {
+			resources: requests: {
 				memory: "30Mi"
 				cpu:    "100m"
 			}
-			resources limits: {
+			resources: limits: {
 				memory: "50Mi"
 				cpu:    "200m"
 			}
diff --git a/doc/tutorial/kubernetes/manual/services/mon/prometheus/configmap.cue b/doc/tutorial/kubernetes/manual/services/mon/prometheus/configmap.cue
index ba65cf4..6434221 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/prometheus/configmap.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/prometheus/configmap.cue
@@ -2,7 +2,7 @@
 
 import "encoding/yaml"
 
-configMap prometheus: {
+configMap: prometheus: {
 	"alert.rules": yaml.Marshal(alert_rules)
 	alert_rules = {
 		groups: [{
@@ -11,7 +11,7 @@
 				alert: "InstanceDown"
 				expr:  "up == 0"
 				for:   "30s"
-				labels severity: "page"
+				labels: severity: "page"
 				annotations: {
 					description: "{{$labels.app}} of job {{ $labels.job }} has been down for more than 30 seconds."
 					summary:     "Instance {{$labels.app}} down"
@@ -20,7 +20,7 @@
 				alert: "InsufficientPeers"
 				expr:  "count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)"
 				for:   "3m"
-				labels severity: "page"
+				labels: severity: "page"
 				annotations: {
 					description: "If one more etcd peer goes down the cluster will be unavailable"
 					summary:     "etcd cluster small"
@@ -29,13 +29,13 @@
 				alert: "EtcdNoMaster"
 				expr:  "sum(etcd_server_has_leader{app=\"etcd\"}) == 0"
 				for:   "1s"
-				labels severity:     "page"
-				annotations summary: "No ETCD master elected."
+				labels: severity:     "page"
+				annotations: summary: "No ETCD master elected."
 			}, {
 				alert: "PodRestart"
 				expr:  "(max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m])) > 2"
 				for:   "1m"
-				labels severity: "page"
+				labels: severity: "page"
 				annotations: {
 					description: "{{$labels.app}} {{ $labels.container }} resturted {{ $value }} times in 5m."
 					summary:     "Pod for {{$labels.container}} restarts too often"
@@ -45,9 +45,9 @@
 	}
 	"prometheus.yml": yaml.Marshal(prometheus_yml)
 	prometheus_yml = {
-		global scrape_interval: "15s"
+		global: scrape_interval: "15s"
 		rule_files: ["/etc/prometheus/alert.rules"]
-		alerting alertmanagers: [{
+		alerting: alertmanagers: [{
 			scheme: "http"
 			static_configs: [{
 				targets: ["alertmanager:9093"]
@@ -67,7 +67,7 @@
 			// Prometheus. The discovery auth config is automatic if Prometheus runs inside
 			// the cluster. Otherwise, more config options have to be provided within the
 			// <kubernetes_sd_config>.
-			tls_config ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+			tls_config: ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
 			// If your node certificates are self-signed or use a different CA to the
 			// master CA, then disable certificate verification below. Note that
 			// certificate verification is an integral part of a secure infrastructure
@@ -101,7 +101,7 @@
 			// Prometheus. The discovery auth config is automatic if Prometheus runs inside
 			// the cluster. Otherwise, more config options have to be provided within the
 			// <kubernetes_sd_config>.
-			tls_config ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+			tls_config: ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
 			bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token"
 			kubernetes_sd_configs: [{
 				role: "node"
@@ -143,7 +143,7 @@
 			// Prometheus. The discovery auth config is automatic if Prometheus runs inside
 			// the cluster. Otherwise, more config options have to be provided within the
 			// <kubernetes_sd_config>.
-			tls_config ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+			tls_config: ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
 			bearer_token_file: "/var/run/secrets/kubernetes.io/serviceaccount/token"
 			kubernetes_sd_configs: [{
 				role: "node"
@@ -217,7 +217,7 @@
 			// * `prometheus.io/probe`: Only probe services that have a value of `true`
 			job_name:     "kubernetes-services"
 			metrics_path: "/probe"
-			params module: ["http_2xx"]
+			params: module: ["http_2xx"]
 			kubernetes_sd_configs: [{
 				role: "service"
 			}]
@@ -253,7 +253,7 @@
 			// * `prometheus.io/probe`: Only probe services that have a value of `true`
 			job_name:     "kubernetes-ingresses"
 			metrics_path: "/probe"
-			params module: ["http_2xx"]
+			params: module: ["http_2xx"]
 			kubernetes_sd_configs: [{
 				role: "ingress"
 			}]
diff --git a/doc/tutorial/kubernetes/manual/services/mon/prometheus/kube.cue b/doc/tutorial/kubernetes/manual/services/mon/prometheus/kube.cue
index 1458a8f..f37d956 100644
--- a/doc/tutorial/kubernetes/manual/services/mon/prometheus/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/mon/prometheus/kube.cue
@@ -1,37 +1,37 @@
 package kube
 
-service prometheus: {
-	label name: "prometheus"
-	port web: {
+service: prometheus: {
+	label: name: "prometheus"
+	port: web: {
 		name:     "main"
 		nodePort: 30900
 	}
-	kubernetes metadata annotations "prometheus.io/scrape": "true"
-	kubernetes spec type: "NodePort"
+	kubernetes: metadata: annotations: "prometheus.io/scrape": "true"
+	kubernetes: spec: type: "NodePort"
 }
 
-deployment prometheus: {
+deployment: prometheus: {
 	image: "prom/prometheus:v2.4.3"
 	args: [
 		"--config.file=/etc/prometheus/prometheus.yml",
 		"--web.external-url=https://prometheus.example.com",
 	]
 
-	expose port web: 9090
+	expose: port: web: 9090
 
-	volume "config-volume": {
+	volume: "config-volume": {
 		mountPath: "/etc/prometheus"
-		spec configMap name: "prometheus"
+		spec: configMap: name: "prometheus"
 	}
 
-	kubernetes spec selector matchLabels app: "prometheus"
+	kubernetes: spec: selector: matchLabels: app: "prometheus"
 
-	kubernetes spec strategy: {
+	kubernetes: spec: strategy: {
 		type: "RollingUpdate"
 		rollingUpdate: {
 			maxSurge:       0
 			maxUnavailable: 1
 		}
 	}
-	kubernetes spec template metadata annotations "prometheus.io.scrape": "true"
+	kubernetes: spec: template: metadata: annotations: "prometheus.io.scrape": "true"
 }
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/authproxy/configmap.cue b/doc/tutorial/kubernetes/manual/services/proxy/authproxy/configmap.cue
index c7ab94c..51840d4 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/authproxy/configmap.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/authproxy/configmap.cue
@@ -4,7 +4,7 @@
 // kubectl apply -f configmap.yaml
 // kubectl scale --replicas=0 deployment/proxy
 // kubectl scale --replicas=1 deployment/proxy
-configMap authproxy "authproxy.cfg": """
+configMap: authproxy: "authproxy.cfg": """
 		# Google Auth Proxy Config File
 		## https://github.com/bitly/google_auth_proxy
 
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/authproxy/kube.cue b/doc/tutorial/kubernetes/manual/services/proxy/authproxy/kube.cue
index 44ac71a..66614cb 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/authproxy/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/authproxy/kube.cue
@@ -1,13 +1,13 @@
 package kube
 
-deployment authproxy: {
+deployment: authproxy: {
 	image: "skippy/oauth2_proxy:2.0.1"
 	args: ["--config=/etc/authproxy/authproxy.cfg"]
 
-	expose port client: 4180
+	expose: port: client: 4180
 
-	volume "config-volume": {
+	volume: "config-volume": {
 		mountPath: "/etc/authproxy"
-		spec configMap name: "authproxy"
+		spec: configMap: name: "authproxy"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/goget/kube.cue b/doc/tutorial/kubernetes/manual/services/proxy/goget/kube.cue
index c999c0d..88cd92c 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/goget/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/goget/kube.cue
@@ -1,12 +1,12 @@
 package kube
 
-deployment goget: {
+deployment: goget: {
 	image: "gcr.io/myproj/goget:v0.5.1"
 
-	expose port https: 7443
+	expose: port: https: 7443
 
-	volume "secret-volume": {
+	volume: "secret-volume": {
 		mountPath: "/etc/ssl"
-		spec secret secretName: "goget-secrets"
+		spec: secret: secretName: "goget-secrets"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/goget/service.cue b/doc/tutorial/kubernetes/manual/services/proxy/goget/service.cue
index 9f57889..187873b 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/goget/service.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/goget/service.cue
@@ -1,9 +1,9 @@
 package kube
 
-service goget: {
-	port http: {port: 443}
+service: goget: {
+	port: http: {port: 443}
 
-	kubernetes spec: {
+	kubernetes: spec: {
 		type:           "LoadBalancer"
 		loadBalancerIP: "1.3.5.7" // static ip
 	}
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/kube.cue b/doc/tutorial/kubernetes/manual/services/proxy/kube.cue
index 5584b3f..10e805c 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/kube.cue
@@ -1,3 +1,3 @@
 package kube
 
-_base label component: "proxy"
+_base: label: component: "proxy"
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/nginx/configmap.cue b/doc/tutorial/kubernetes/manual/services/proxy/nginx/configmap.cue
index 7bc1610..e3d6d55 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/nginx/configmap.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/nginx/configmap.cue
@@ -1,6 +1,6 @@
 package kube
 
-configMap nginx "nginx.conf": """
+configMap: nginx: "nginx.conf": """
 		events {
 		    worker_connections 768;
 		}
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/nginx/kube.cue b/doc/tutorial/kubernetes/manual/services/proxy/nginx/kube.cue
index 6cfed5d..46153c0 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/nginx/kube.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/nginx/kube.cue
@@ -1,19 +1,19 @@
 package kube
 
-deployment nginx: {
+deployment: nginx: {
 	image: "nginx:1.11.10-alpine"
 
-	expose port http:  80
-	expose port https: 443
+	expose: port: http:  80
+	expose: port: https: 443
 
-	volume "secret-volume": {
+	volume: "secret-volume": {
 		mountPath: "/etc/ssl"
-		spec secret secretName: "proxy-secrets"
+		spec: secret: secretName: "proxy-secrets"
 	}
 
-	volume "config-volume": {
+	volume: "config-volume": {
 		mountPath: "/etc/nginx/nginx.conf"
 		subPath:   "nginx.conf"
-		spec configMap name: "nginx"
+		spec: configMap: name: "nginx"
 	}
 }
diff --git a/doc/tutorial/kubernetes/manual/services/proxy/nginx/service.cue b/doc/tutorial/kubernetes/manual/services/proxy/nginx/service.cue
index 0488a18..1f80550 100644
--- a/doc/tutorial/kubernetes/manual/services/proxy/nginx/service.cue
+++ b/doc/tutorial/kubernetes/manual/services/proxy/nginx/service.cue
@@ -1,6 +1,6 @@
 package kube
 
-service nginx kubernetes spec: {
+service: nginx: kubernetes: spec: {
 	type:           "LoadBalancer"
 	loadBalancerIP: "1.3.4.5"
 }
diff --git a/doc/tutorial/kubernetes/quick/services/create_tool.cue b/doc/tutorial/kubernetes/quick/services/create_tool.cue
index 573890e..76a491f 100644
--- a/doc/tutorial/kubernetes/quick/services/create_tool.cue
+++ b/doc/tutorial/kubernetes/quick/services/create_tool.cue
@@ -6,14 +6,14 @@
 	"tool/cli"
 )
 
-command create: {
-	task kube: exec.Run & {
+command: create: {
+	task: kube: exec.Run & {
 		cmd:    "kubectl create --dry-run -f -"
 		stdin:  yaml.MarshalStream(objects)
 		stdout: string
 	}
 
-	task display: cli.Print & {
+	task: display: cli.Print & {
 		text: task.kube.stdout
 	}
 }
diff --git a/doc/tutorial/kubernetes/quick/services/dump_tool.cue b/doc/tutorial/kubernetes/quick/services/dump_tool.cue
index 5567771..b5301ec 100644
--- a/doc/tutorial/kubernetes/quick/services/dump_tool.cue
+++ b/doc/tutorial/kubernetes/quick/services/dump_tool.cue
@@ -5,8 +5,8 @@
 	"tool/cli"
 )
 
-command dump: {
-	task print: cli.Print & {
+command: dump: {
+	task: print: cli.Print & {
 		text: yaml.MarshalStream(objects)
 	}
 }
diff --git a/doc/tutorial/kubernetes/quick/services/k8s_defs.cue b/doc/tutorial/kubernetes/quick/services/k8s_defs.cue
index 06b27eb..10aafd4 100644
--- a/doc/tutorial/kubernetes/quick/services/k8s_defs.cue
+++ b/doc/tutorial/kubernetes/quick/services/k8s_defs.cue
@@ -6,7 +6,7 @@
   apps_v1beta1 "k8s.io/api/apps/v1beta1"
 )
 
-service <Name>: v1.Service
-deployment <Name>: extensions_v1beta1.Deployment
-daemonSet <Name>: extensions_v1beta1.DaemonSet
-statefulSet <Name>: apps_v1beta1.StatefulSet
+service: [string]:     v1.Service
+deployment: [string]:  extensions_v1beta1.Deployment
+daemonSet: [string]:   extensions_v1beta1.DaemonSet
+statefulSet: [string]: apps_v1beta1.StatefulSet
diff --git a/doc/tutorial/kubernetes/quick/services/kube_tool.cue b/doc/tutorial/kubernetes/quick/services/kube_tool.cue
index 43cf557..929ad34 100644
--- a/doc/tutorial/kubernetes/quick/services/kube_tool.cue
+++ b/doc/tutorial/kubernetes/quick/services/kube_tool.cue
@@ -3,9 +3,9 @@
 objects: [ x for v in objectSets for x in v ]
 
 objectSets: [
-    service,
-    deployment,
-    statefulSet,
-    daemonSet,
-    configMap
+	service,
+	deployment,
+	statefulSet,
+	daemonSet,
+	configMap,
 ]
diff --git a/doc/tutorial/kubernetes/quick/services/ls_tool.cue b/doc/tutorial/kubernetes/quick/services/ls_tool.cue
index b0d4718..1c73beb 100644
--- a/doc/tutorial/kubernetes/quick/services/ls_tool.cue
+++ b/doc/tutorial/kubernetes/quick/services/ls_tool.cue
@@ -6,15 +6,15 @@
 	"tool/file"
 )
 
-command ls: {
-	task print: cli.Print & {
+command: ls: {
+	task: print: cli.Print & {
 		text: tabwriter.Write([
 			"\(x.kind)  \t\(x.metadata.labels.component)  \t\(x.metadata.name)"
 			for x in objects
 		])
 	}
 
-	task write: file.Create & {
+	task: write: file.Create & {
 		filename: "foo.txt"
 		contents: task.print.text
 	}