blob: 0b494fceadf52c6b6a73b36456a48fe93469ca65 [file] [log] [blame]
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
bartender: {
name: *"bartender" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/bartender:v0.1.34"
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"bartender" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
bartender: {
name: *"bartender" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"bartender" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
bartender: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"bartender" | string
labels: {
app: *"bartender" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"bartender" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
bartender: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"bartender" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"bartender" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"bartender" | string
image: "gcr.io/myproj/bartender:v0.1.34"
args: []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
breaddispatcher: {
name: *"breaddispatcher" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/breaddispatcher:v0.3.24"
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {
etcd: "etcd:2379"
"event-server": "events:7788"
}
args: ["-etcd=etcd:2379", "-event-server=events:7788"] | []
env: {}
label: {
app: *"breaddispatcher" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
breaddispatcher: {
name: *"breaddispatcher" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"breaddispatcher" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
breaddispatcher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"breaddispatcher" | string
labels: {
app: *"breaddispatcher" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"breaddispatcher" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
breaddispatcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"breaddispatcher" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"breaddispatcher" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"breaddispatcher" | string
image: "gcr.io/myproj/breaddispatcher:v0.3.24"
args: ["-etcd=etcd:2379", "-event-server=events:7788"] | []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
host: {
name: *"host" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: 2
image: "gcr.io/myproj/host:v0.1.10"
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"host" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
host: {
name: *"host" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"host" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
host: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"host" | string
labels: {
app: *"host" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"host" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
host: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"host" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"host" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"host" | string
image: "gcr.io/myproj/host:v0.1.10"
args: []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: 2
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
maitred: {
name: *"maitred" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/maitred:v0.0.4"
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"maitred" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
maitred: {
name: *"maitred" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"maitred" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
maitred: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"maitred" | string
labels: {
app: *"maitred" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"maitred" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
maitred: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"maitred" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"maitred" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"maitred" | string
image: "gcr.io/myproj/maitred:v0.0.4"
args: []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
valeter: {
name: *"valeter" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/valeter:v0.0.4"
arg: {
http: ":8080"
etcd: "etcd:2379"
}
expose: {
port: {
http: 8080
}
}
port: {}
args: ["-http=:8080", "-etcd=etcd:2379"] | []
env: {}
label: {
app: *"valeter" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "8080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
valeter: {
name: *"valeter" | string
port: {
http: {
name: *"http" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"valeter" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
valeter: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"valeter" | string
labels: {
app: *"valeter" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"valeter" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
valeter: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"valeter" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"valeter" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "8080"
}
}
spec: {
containers: [{
name: *"valeter" | string
image: "gcr.io/myproj/valeter:v0.0.4"
args: ["-http=:8080", "-etcd=etcd:2379"] | []
ports: [{
name: "http"
containerPort: 8080
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
waiter: {
name: *"waiter" | string
kind: *"deployment" | "stateful" | "daemon"
image: "gcr.io/myproj/waiter:v0.3.0"
replicas: 5
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"waiter" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
waiter: {
name: *"waiter" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"waiter" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
waiter: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"waiter" | string
labels: {
app: *"waiter" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"waiter" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
waiter: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"waiter" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"waiter" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"waiter" | string
image: "gcr.io/myproj/waiter:v0.3.0"
args: []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: 5
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
waterdispatcher: {
name: *"waterdispatcher" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/waterdispatcher:v0.0.48"
expose: {
port: {
http: *7080 | int
}
}
port: {}
arg: {
http: ":8080"
etcd: "etcd:2379"
}
args: ["-http=:8080", "-etcd=etcd:2379"] | []
env: {}
label: {
app: *"waterdispatcher" | string
domain: "prod"
component: "frontend"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
waterdispatcher: {
name: *"waterdispatcher" | string
port: {
http: {
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"waterdispatcher" | string
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
waterdispatcher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"waterdispatcher" | string
labels: {
app: *"waterdispatcher" | string
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: *"waterdispatcher" | string
domain: "prod"
component: "frontend"
}
ports: [{
name: *"http" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
waterdispatcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"waterdispatcher" | string
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"waterdispatcher" | string
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: *"waterdispatcher" | string
image: "gcr.io/myproj/waterdispatcher:v0.0.48"
args: ["-http=:8080", "-etcd=etcd:2379"] | []
ports: [{
name: "http"
containerPort: *7080 | int
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
download: {
name: *"download" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/download:v0.0.2"
expose: {
port: {
client: 7080
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"download" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
volume: {}
}
}
service: {
download: {
name: *"download" | string
port: {
client: {
name: *"client" | string
port: 7080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"download" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
download: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"download" | string
labels: {
app: *"download" | string
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: *"download" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"client" | string
port: 7080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
download: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"download" | string
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"download" | string
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: *"download" | string
image: "gcr.io/myproj/download:v0.0.2"
args: []
ports: [{
name: "client"
containerPort: 7080
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
etcd: {
name: *"etcd" | string
kind: "stateful"
replicas: 3
image: "quay.io/coreos/etcd:v3.3.10"
kubernetes: {
spec: {
volumeClaimTemplates: [{
metadata: {
name: "etcd3"
annotations: {
"volume.alpha.kubernetes.io/storage-class": "default"
}
}
spec: {
accessModes: ["ReadWriteOnce"]
resources: {
requests: {
storage: "10Gi"
}
}
}
}]
serviceName: "etcd"
template: {
spec: {
containers: [{
command: ["/usr/local/bin/etcd"]
volumeMounts: [{
name: "etcd3"
mountPath: "/data"
}]
livenessProbe: {
httpGet: {
path: "/health"
port: "client"
}
initialDelaySeconds: 30
}
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["etcd"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
terminationGracePeriodSeconds: 10
}
metadata: {
annotations: {
"prometheus.io.port": "2379"
"prometheus.io.scrape": "true"
}
}
}
}
}
arg: {
name: "$(NAME)"
"data-dir": "/data/etcd3"
"initial-advertise-peer-urls": "http://$(IP):2380"
"listen-peer-urls": "http://$(IP):2380"
"listen-client-urls": "http://$(IP):2379,http://127.0.0.1:2379"
"advertise-client-urls": "http://$(IP):2379"
discovery: "https://discovery.etcd.io/xxxxxx"
}
env: {
ETCDCTL_API: "3"
ETCD_AUTO_COMPACTION_RETENTION: "4"
}
envSpec: {
NAME: {
valueFrom: {
fieldRef: {
fieldPath: "metadata.name"
}
}
}
IP: {
valueFrom: {
fieldRef: {
fieldPath: "status.podIP"
}
}
}
ETCDCTL_API: {
value: "3"
}
ETCD_AUTO_COMPACTION_RETENTION: {
value: "4"
}
}
expose: {
port: {
client: 2379
peer: 2380
}
}
port: {}
args: ["-name=$(NAME)", "-data-dir=/data/etcd3", "-initial-advertise-peer-urls=http://$(IP):2380", "-listen-peer-urls=http://$(IP):2380", "-listen-client-urls=http://$(IP):2379,http://127.0.0.1:2379", "-advertise-client-urls=http://$(IP):2379", "-discovery=https://discovery.etcd.io/xxxxxx"] | []
label: {
app: *"etcd" | string
domain: "prod"
component: "infra"
}
volume: {}
}
}
service: {
etcd: {
name: *"etcd" | string
port: {
client: {
name: *"client" | string
port: 2379
protocol: *"TCP" | "UDP"
}
peer: {
name: *"peer" | string
port: 2380
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
spec: {
clusterIP: "None"
}
}
label: {
app: *"etcd" | string
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
etcd: {
apiVersion: "v1"
kind: "Service"
spec: {
clusterIP: "None"
selector: {
app: *"etcd" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"client" | string
port: 2379
protocol: *"TCP" | "UDP"
}, {
name: *"peer" | string
port: 2380
protocol: *"TCP" | "UDP"
}]
}
metadata: {
name: *"etcd" | string
labels: {
app: *"etcd" | string
domain: "prod"
component: "infra"
}
}
}
}
deployments: {}
statefulSets: {
etcd: {
apiVersion: "apps/v1beta1"
kind: "StatefulSet"
metadata: {
name: *"etcd" | string
labels: {
component: "infra"
}
}
spec: {
volumeClaimTemplates: [{
metadata: {
name: "etcd3"
annotations: {
"volume.alpha.kubernetes.io/storage-class": "default"
}
}
spec: {
accessModes: ["ReadWriteOnce"]
resources: {
requests: {
storage: "10Gi"
}
}
}
}]
serviceName: "etcd"
replicas: 3
template: {
metadata: {
labels: {
app: *"etcd" | string
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "2379"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"etcd" | string
image: "quay.io/coreos/etcd:v3.3.10"
args: ["-name=$(NAME)", "-data-dir=/data/etcd3", "-initial-advertise-peer-urls=http://$(IP):2380", "-listen-peer-urls=http://$(IP):2380", "-listen-client-urls=http://$(IP):2379,http://127.0.0.1:2379", "-advertise-client-urls=http://$(IP):2379", "-discovery=https://discovery.etcd.io/xxxxxx"] | []
command: ["/usr/local/bin/etcd"]
volumeMounts: [{
name: "etcd3"
mountPath: "/data"
}]
env: [{
name: "NAME"
valueFrom: {
fieldRef: {
fieldPath: "metadata.name"
}
}
}, {
name: "IP"
valueFrom: {
fieldRef: {
fieldPath: "status.podIP"
}
}
}, {
name: "ETCDCTL_API"
value: "3"
}, {
name: "ETCD_AUTO_COMPACTION_RETENTION"
value: "4"
}]
ports: [{
name: "client"
containerPort: 2379
}, {
name: "peer"
containerPort: 2380
}]
livenessProbe: {
httpGet: {
path: "/health"
port: "client"
}
initialDelaySeconds: 30
}
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["etcd"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
terminationGracePeriodSeconds: 10
}
}
}
}
}
daemonSets: {}
configMaps: {}
}
deployment: {
events: {
name: *"events" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: 2
image: "gcr.io/myproj/events:v0.1.31"
arg: {
cert: "/etc/ssl/server.pem"
key: "/etc/ssl/server.key"
grpc: ":7788"
}
port: {
http: 7080
}
expose: {
port: {
grpc: 7788
}
}
args: ["-cert=/etc/ssl/server.pem", "-key=/etc/ssl/server.key", "-grpc=:7788"] | []
env: {}
volume: {
"secret-volume": {
name: *"secret-volume" | string
mountPath: "/etc/ssl"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "biz-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["events"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
}
}
}
}
label: {
app: *"events" | string
domain: "prod"
component: "infra"
}
envSpec: {}
}
}
service: {
events: {
name: *"events" | string
port: {
grpc: {
name: *"grpc" | string
port: 7788
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"events" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
events: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"events" | string
labels: {
app: *"events" | string
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: *"events" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"grpc" | string
port: 7788
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
events: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"events" | string
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"events" | string
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"events" | string
image: "gcr.io/myproj/events:v0.1.31"
args: ["-cert=/etc/ssl/server.pem", "-key=/etc/ssl/server.key", "-grpc=:7788"] | []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "grpc"
containerPort: 7788
}, {
name: "http"
containerPort: 7080
}]
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["events"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
volumes: [{
name: *"secret-volume" | string
}]
}
}
replicas: 2
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
tasks: {
name: *"tasks" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/tasks:v0.2.6"
port: {
http: 7080
}
expose: {
port: {
https: 7443
}
}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: *"secret-volume" | string
mountPath: "/etc/ssl"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "star-example-com-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
}
}
}
label: {
app: *"tasks" | string
domain: "prod"
component: "infra"
}
envSpec: {}
}
}
service: {
tasks: {
name: *"tasks" | string
port: {
https: {
name: *"https" | string
port: 443
targetPort: 7443
protocol: "TCP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.2.3.4"
}
}
label: {
app: *"tasks" | string
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
tasks: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: *"tasks" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"https" | string
port: 443
targetPort: 7443
protocol: "TCP"
}]
loadBalancerIP: "1.2.3.4"
}
metadata: {
name: *"tasks" | string
labels: {
app: *"tasks" | string
domain: "prod"
component: "infra"
}
}
}
}
deployments: {
tasks: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"tasks" | string
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"tasks" | string
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"tasks" | string
image: "gcr.io/myproj/tasks:v0.2.6"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7443
}, {
name: "http"
containerPort: 7080
}]
}]
volumes: [{
name: *"secret-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
updater: {
name: *"updater" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/updater:v0.1.0"
args: ["-key=/etc/certs/updater.pem"]
expose: {
port: {
http: 8080
}
}
port: {}
arg: {}
env: {}
volume: {
"secret-updater": {
name: *"secret-updater" | string
mountPath: "/etc/certs"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "updater-secrets"
}
}
kubernetes: {}
}
}
label: {
app: *"updater" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
}
}
service: {
updater: {
name: *"updater" | string
port: {
http: {
name: *"http" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"updater" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
updater: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"updater" | string
labels: {
app: *"updater" | string
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: *"updater" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"http" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
updater: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"updater" | string
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"updater" | string
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: *"updater" | string
image: "gcr.io/myproj/updater:v0.1.0"
args: ["-key=/etc/certs/updater.pem"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "http"
containerPort: 8080
}]
}]
volumes: [{
name: *"secret-updater" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
watcher: {
name: *"watcher" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/watcher:v0.1.0"
volume: {
"secret-volume": {
name: *"secret-volume" | string
mountPath: "/etc/ssl"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "star-example-com-secrets"
}
}
kubernetes: {}
}
}
port: {
http: 7080
}
expose: {
port: {
https: 7788
}
}
arg: {}
args: []
env: {}
label: {
app: *"watcher" | string
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
}
}
service: {
watcher: {
name: *"watcher" | string
port: {
https: {
name: *"https" | string
port: 7788
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.2.3.4"
}
}
ports: {
https: {
port: 7788
targetPort: 7788
}
}
label: {
app: *"watcher" | string
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
watcher: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: *"watcher" | string
domain: "prod"
component: "infra"
}
ports: [{
name: *"https" | string
port: 7788
protocol: *"TCP" | "UDP"
}]
loadBalancerIP: "1.2.3.4"
}
metadata: {
name: *"watcher" | string
labels: {
app: *"watcher" | string
domain: "prod"
component: "infra"
}
}
}
}
deployments: {
watcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"watcher" | string
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"watcher" | string
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: *"watcher" | string
image: "gcr.io/myproj/watcher:v0.1.0"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7788
}, {
name: "http"
containerPort: 7080
}]
}]
volumes: [{
name: *"secret-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
caller: {
name: *"caller" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: 3
image: "gcr.io/myproj/caller:v0.20.14"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
key: "/etc/certs/client.key"
cert: "/etc/certs/client.pem"
ca: "/etc/certs/servfx.ca"
"ssh-tunnel-key": "/sslcerts/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-key=/etc/certs/client.key", "-cert=/etc/certs/client.pem", "-ca=/etc/certs/servfx.ca", "-ssh-tunnel-key=/sslcerts/tunnel-private.pem"] | []
env: {}
volume: {
"caller-disk": {
name: "ssd-caller"
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
kubernetes: {}
spec: {
gcePersistentDisk: {
pdName: *"ssd-caller" | string
fsType: "ext4"
}
}
}
"secret-ssh-key": {
name: *"secret-ssh-key" | string
mountPath: "/sslcerts"
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"secret-caller": {
name: *"secret-caller" | string
mountPath: *"/etc/certs" | string
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: *"caller-secrets" | string
}
}
kubernetes: {}
}
}
label: {
app: *"caller" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
caller: {
name: *"caller" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"caller" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
caller: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"caller" | string
labels: {
app: *"caller" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"caller" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
caller: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"caller" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"caller" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"caller" | string
image: "gcr.io/myproj/caller:v0.20.14"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-key=/etc/certs/client.key", "-cert=/etc/certs/client.pem", "-ca=/etc/certs/servfx.ca", "-ssh-tunnel-key=/sslcerts/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "ssd-caller"
}, {
name: *"secret-ssh-key" | string
}, {
name: *"secret-caller" | string
}]
}
}
replicas: 3
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
dishwasher: {
name: *"dishwasher" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: 5
image: "gcr.io/myproj/dishwasher:v0.2.13"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
env: {}
volume: {
"secret-ssh-key": {
name: *"secret-ssh-key" | string
mountPath: "/sslcerts"
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"dishwasher-disk": {
name: *"dishwasher-disk" | string
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: *"dishwasher-disk" | string
fsType: "ext4"
}
}
kubernetes: {}
}
"secret-dishwasher": {
name: *"secret-dishwasher" | string
mountPath: *"/etc/certs" | string
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: *"dishwasher-secrets" | string
}
}
kubernetes: {}
}
}
label: {
app: *"dishwasher" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
dishwasher: {
name: *"dishwasher" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"dishwasher" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
dishwasher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"dishwasher" | string
labels: {
app: *"dishwasher" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"dishwasher" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
dishwasher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"dishwasher" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"dishwasher" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"dishwasher" | string
image: "gcr.io/myproj/dishwasher:v0.2.13"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: *"secret-ssh-key" | string
}, {
name: *"dishwasher-disk" | string
}, {
name: *"secret-dishwasher" | string
}]
}
}
replicas: 5
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
expiditer: {
name: *"expiditer" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/expiditer:v0.5.34"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
env: {}
volume: {
"expiditer-disk": {
name: *"expiditer-disk" | string
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: *"expiditer-disk" | string
fsType: "ext4"
}
}
kubernetes: {}
}
"secret-expiditer": {
name: *"secret-expiditer" | string
mountPath: *"/etc/certs" | string
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: *"expiditer-secrets" | string
}
}
kubernetes: {}
}
}
label: {
app: *"expiditer" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
expiditer: {
name: *"expiditer" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"expiditer" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
expiditer: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"expiditer" | string
labels: {
app: *"expiditer" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"expiditer" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
expiditer: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"expiditer" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"expiditer" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"expiditer" | string
image: "gcr.io/myproj/expiditer:v0.5.34"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: *"expiditer-disk" | string
}, {
name: *"secret-expiditer" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
headchef: {
name: *"headchef" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/headchef:v0.2.16"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788"] | []
env: {}
volume: {
"secret-headchef": {
name: *"secret-headchef" | string
mountPath: "/sslcerts"
subPath: *null | string
readOnly: true
kubernetes: {}
spec: {
secret: {
secretName: *"headchef-secrets" | string
}
}
}
"headchef-disk": {
name: *"headchef-disk" | string
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: *"headchef-disk" | string
fsType: "ext4"
}
}
kubernetes: {}
}
}
label: {
app: *"headchef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
headchef: {
name: *"headchef" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"headchef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
headchef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"headchef" | string
labels: {
app: *"headchef" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"headchef" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
headchef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"headchef" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"headchef" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"headchef" | string
image: "gcr.io/myproj/headchef:v0.2.16"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: *"secret-headchef" | string
}, {
name: *"headchef-disk" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
linecook: {
name: *"linecook" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/linecook:v0.1.42"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
name: "linecook"
etcd: "etcd:2379"
"reconnect-delay": "1h"
"-recovery-overlap": "100000"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-name=linecook", "-etcd=etcd:2379", "-reconnect-delay=1h", "--recovery-overlap=100000"] | []
env: {}
volume: {
"secret-linecook": {
name: "secret-kitchen"
mountPath: *"/etc/certs" | string
subPath: *null | string
readOnly: true
kubernetes: {}
spec: {
secret: {
secretName: *"linecook-secrets" | string
}
}
}
"linecook-disk": {
name: *"linecook-disk" | string
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: *"linecook-disk" | string
fsType: "ext4"
}
}
kubernetes: {}
}
}
label: {
app: *"linecook" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
linecook: {
name: *"linecook" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"linecook" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
linecook: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"linecook" | string
labels: {
app: *"linecook" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"linecook" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
linecook: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"linecook" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"linecook" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"linecook" | string
image: "gcr.io/myproj/linecook:v0.1.42"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-name=linecook", "-etcd=etcd:2379", "-reconnect-delay=1h", "--recovery-overlap=100000"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-kitchen"
}, {
name: *"linecook-disk" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
pastrychef: {
name: *"pastrychef" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/pastrychef:v0.1.15"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
"reconnect-delay": "1m"
etcd: "etcd:2379"
"recovery-overlap": "10000"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem", "-reconnect-delay=1m", "-etcd=etcd:2379", "-recovery-overlap=10000"] | []
env: {}
volume: {
"secret-pastrychef": {
name: "secret-ssh-key"
mountPath: *"/etc/certs" | string
subPath: *null | string
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"pastrychef-disk": {
name: *"pastrychef-disk" | string
mountPath: *"/logs" | string
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: *"pastrychef-disk" | string
fsType: "ext4"
}
}
kubernetes: {}
}
}
label: {
app: *"pastrychef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
}
}
service: {
pastrychef: {
name: *"pastrychef" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"pastrychef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
pastrychef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"pastrychef" | string
labels: {
app: *"pastrychef" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"pastrychef" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
pastrychef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"pastrychef" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"pastrychef" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"pastrychef" | string
image: "gcr.io/myproj/pastrychef:v0.1.15"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem", "-reconnect-delay=1m", "-etcd=etcd:2379", "-recovery-overlap=10000"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-ssh-key"
}, {
name: *"pastrychef-disk" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
souschef: {
name: *"souschef" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/souschef:v0.5.3"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: *"souschef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
envSpec: {}
volume: {}
}
}
service: {
souschef: {
name: *"souschef" | string
port: {
client: {
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"souschef" | string
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
souschef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"souschef" | string
labels: {
app: *"souschef" | string
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: *"souschef" | string
domain: "prod"
component: "kitchen"
}
ports: [{
name: *"client" | string
port: 8080
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
souschef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"souschef" | string
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"souschef" | string
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"souschef" | string
image: "gcr.io/myproj/souschef:v0.5.3"
args: []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
ports: [{
name: "client"
containerPort: 8080
}]
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
alertmanager: {
name: *"alertmanager" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
kubernetes: {
spec: {
selector: {
matchLabels: {
app: "alertmanager"
}
}
}
}
image: "prom/alertmanager:v0.15.2"
args: ["--config.file=/etc/alertmanager/alerts.yaml", "--storage.path=/alertmanager", "--web.external-url=https://alertmanager.example.com"]
expose: {
port: {
alertmanager: 9093
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: *"config-volume" | string
mountPath: "/etc/alertmanager"
subPath: *null | string
readOnly: *false | true
spec: {
configMap: {
name: "alertmanager"
}
}
kubernetes: {}
}
alertmanager: {
name: *"alertmanager" | string
mountPath: "/alertmanager"
subPath: *null | string
readOnly: *false | true
spec: {
emptyDir: {}
}
kubernetes: {}
}
}
label: {
app: *"alertmanager" | string
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
alertmanager: {
name: *"alertmanager" | string
label: {
name: "alertmanager"
app: *"alertmanager" | string
domain: "prod"
component: "mon"
}
port: {
alertmanager: {
name: "main"
port: 9093
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
"prometheus.io/path": "/metrics"
}
}
}
}
}
configMap: {
alertmanager: {
"alerts.yaml": """
receivers:
- name: pager
slack_configs:
- channel: '#cloudmon'
text: |-
{{ range .Alerts }}{{ .Annotations.description }}
{{ end }}
send_resolved: true
route:
receiver: pager
group_by:
- alertname
- cluster
"""
}
}
kubernetes: {
services: {
alertmanager: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"alertmanager" | string
labels: {
name: "alertmanager"
app: *"alertmanager" | string
domain: "prod"
component: "mon"
}
annotations: {
"prometheus.io/scrape": "true"
"prometheus.io/path": "/metrics"
}
}
spec: {
selector: {
name: "alertmanager"
app: *"alertmanager" | string
domain: "prod"
component: "mon"
}
ports: [{
name: "main"
port: 9093
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
alertmanager: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"alertmanager" | string
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"alertmanager" | string
domain: "prod"
component: "mon"
}
}
spec: {
containers: [{
name: *"alertmanager" | string
image: "prom/alertmanager:v0.15.2"
args: ["--config.file=/etc/alertmanager/alerts.yaml", "--storage.path=/alertmanager", "--web.external-url=https://alertmanager.example.com"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "alertmanager"
containerPort: 9093
}]
}]
volumes: [{
name: *"config-volume" | string
}, {
name: *"alertmanager" | string
}]
}
}
selector: {
matchLabels: {
app: "alertmanager"
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
alertmanager: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "alertmanager"
labels: {
component: "mon"
}
}
data: {
"alerts.yaml": """
receivers:
- name: pager
slack_configs:
- channel: '#cloudmon'
text: |-
{{ range .Alerts }}{{ .Annotations.description }}
{{ end }}
send_resolved: true
route:
receiver: pager
group_by:
- alertname
- cluster
"""
}
}
}
}
deployment: {
grafana: {
name: *"grafana" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "grafana/grafana:4.5.2"
expose: {
port: {
grafana: 3000
}
}
port: {
web: 8080
}
arg: {}
args: []
volume: {
"grafana-volume": {
name: *"grafana-volume" | string
mountPath: "/var/lib/grafana"
subPath: *null | string
readOnly: *false | true
spec: {
gcePersistentDisk: {
pdName: "grafana-volume"
fsType: "ext4"
}
}
kubernetes: {}
}
}
env: {
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: "admin"
}
kubernetes: {
spec: {
template: {
spec: {
containers: [{
resources: {
limits: {
cpu: "100m"
memory: "100Mi"
}
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}]
}
}
}
}
label: {
app: *"grafana" | string
domain: "prod"
component: "mon"
}
envSpec: {
GF_AUTH_BASIC_ENABLED: {
value: "false"
}
GF_AUTH_ANONYMOUS_ENABLED: {
value: "true"
}
GF_AUTH_ANONYMOUS_ORG_ROLE: {
value: "admin"
}
}
}
}
service: {
grafana: {
name: *"grafana" | string
port: {
grafana: {
name: *"grafana" | string
port: 3000
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"grafana" | string
domain: "prod"
component: "mon"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
grafana: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"grafana" | string
labels: {
app: *"grafana" | string
domain: "prod"
component: "mon"
}
}
spec: {
selector: {
app: *"grafana" | string
domain: "prod"
component: "mon"
}
ports: [{
name: *"grafana" | string
port: 3000
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
grafana: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"grafana" | string
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"grafana" | string
domain: "prod"
component: "mon"
}
}
spec: {
containers: [{
name: *"grafana" | string
image: "grafana/grafana:4.5.2"
args: []
env: [{
name: "GF_AUTH_BASIC_ENABLED"
value: "false"
}, {
name: "GF_AUTH_ANONYMOUS_ENABLED"
value: "true"
}, {
name: "GF_AUTH_ANONYMOUS_ORG_ROLE"
value: "admin"
}]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "grafana"
containerPort: 3000
}, {
name: "web"
containerPort: 8080
}]
resources: {
limits: {
cpu: "100m"
memory: "100Mi"
}
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}]
volumes: [{
name: *"grafana-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
"node-exporter": {
name: *"node-exporter" | string
kind: "daemon"
replicas: *1 | int
image: "quay.io/prometheus/node-exporter:v0.16.0"
expose: {
port: {
scrape: 9100
}
}
port: {}
arg: {}
args: ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"]
env: {}
volume: {
proc: {
name: *"proc" | string
mountPath: "/host/proc"
subPath: *null | string
readOnly: true
spec: {
hostPath: {
path: "/proc"
}
}
kubernetes: {}
}
sys: {
name: *"sys" | string
mountPath: "/host/sys"
subPath: *null | string
readOnly: true
spec: {
hostPath: {
path: "/sys"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
spec: {
hostNetwork: true
hostPID: true
containers: [{
ports: [{
hostPort: 9100
}]
resources: {
requests: {
memory: "30Mi"
cpu: "100m"
}
limits: {
memory: "50Mi"
cpu: "200m"
}
}
}]
}
}
}
}
label: {
app: *"node-exporter" | string
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
"node-exporter": {
name: *"node-exporter" | string
port: {
scrape: {
name: "metrics"
port: 9100
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
}
spec: {
type: "ClusterIP"
clusterIP: "None"
}
}
label: {
app: *"node-exporter" | string
domain: "prod"
component: "mon"
}
}
}
configMap: {}
kubernetes: {
services: {
"node-exporter": {
apiVersion: "v1"
kind: "Service"
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
name: *"node-exporter" | string
labels: {
app: *"node-exporter" | string
domain: "prod"
component: "mon"
}
}
spec: {
type: "ClusterIP"
clusterIP: "None"
selector: {
app: *"node-exporter" | string
domain: "prod"
component: "mon"
}
ports: [{
name: "metrics"
port: 9100
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {}
statefulSets: {}
daemonSets: {
"node-exporter": {
apiVersion: "extensions/v1beta1"
metadata: {
name: *"node-exporter" | string
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"node-exporter" | string
domain: "prod"
component: "mon"
}
}
spec: {
hostNetwork: true
hostPID: true
volumes: [{
name: *"proc" | string
}, {
name: *"sys" | string
}]
containers: [{
name: *"node-exporter" | string
image: "quay.io/prometheus/node-exporter:v0.16.0"
args: ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "scrape"
hostPort: 9100
containerPort: 9100
}]
resources: {
requests: {
memory: "30Mi"
cpu: "100m"
}
limits: {
memory: "50Mi"
cpu: "200m"
}
}
}]
}
}
}
kind: "DaemonSet"
}
}
configMaps: {}
}
deployment: {
prometheus: {
name: *"prometheus" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "prom/prometheus:v2.4.3"
args: ["--config.file=/etc/prometheus/prometheus.yml", "--web.external-url=https://prometheus.example.com"]
expose: {
port: {
web: 9090
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: *"config-volume" | string
mountPath: "/etc/prometheus"
subPath: *null | string
readOnly: *false | true
spec: {
configMap: {
name: "prometheus"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
selector: {
matchLabels: {
app: "prometheus"
}
}
strategy: {
type: "RollingUpdate"
rollingUpdate: {
maxSurge: 0
maxUnavailable: 1
}
}
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
}
}
}
label: {
app: *"prometheus" | string
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
prometheus: {
name: *"prometheus" | string
label: {
name: "prometheus"
app: *"prometheus" | string
domain: "prod"
component: "mon"
}
port: {
web: {
name: "main"
port: 9090
nodePort: 30900
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
}
spec: {
type: "NodePort"
}
}
}
}
configMap: {
prometheus: {
"alert.rules": """
groups:
- name: rules.yaml
rules:
- alert: InstanceDown
expr: up == 0
for: 30s
labels:
severity: page
annotations:
description: '{{$labels.app}} of job {{ $labels.job }} has been down for more
than 30 seconds.'
summary: Instance {{$labels.app}} down
- alert: InsufficientPeers
expr: count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)
for: 3m
labels:
severity: page
annotations:
description: If one more etcd peer goes down the cluster will be unavailable
summary: etcd cluster small
- alert: EtcdNoMaster
expr: sum(etcd_server_has_leader{app=\"etcd\"}) == 0
for: 1s
labels:
severity: page
annotations:
summary: No ETCD master elected.
- alert: PodRestart
expr: (max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m]))
> 2
for: 1m
labels:
severity: page
annotations:
description: '{{$labels.app}} {{ $labels.container }} resturted {{ $value }}
times in 5m.'
summary: Pod for {{$labels.container}} restarts too often
"""
"prometheus.yml": """
global:
scrape_interval: 15s
rule_files:
- /etc/prometheus/alert.rules
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- alertmanager:9093
scrape_configs:
- job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
action: keep
regex: default;kubernetes;https
- job_name: kubernetes-nodes
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: kubernetes-cadvisor
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
action: replace
target_label: __scheme__
regex: (https?)
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
action: replace
target_label: __address__
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
action: replace
target_label: kubernetes_name
- job_name: kubernetes-services
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __address__
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- job_name: kubernetes-ingresses
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels:
- __meta_kubernetes_ingress_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __meta_kubernetes_ingress_scheme
- __address__
- __meta_kubernetes_ingress_path
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_ingress_name
target_label: kubernetes_name
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
action: replace
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: kubernetes_pod_name
"""
}
}
kubernetes: {
services: {
prometheus: {
apiVersion: "v1"
kind: "Service"
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
name: *"prometheus" | string
labels: {
name: "prometheus"
app: *"prometheus" | string
domain: "prod"
component: "mon"
}
}
spec: {
type: "NodePort"
selector: {
name: "prometheus"
app: *"prometheus" | string
domain: "prod"
component: "mon"
}
ports: [{
name: "main"
port: 9090
nodePort: 30900
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
prometheus: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"prometheus" | string
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"prometheus" | string
domain: "prod"
component: "mon"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: *"prometheus" | string
image: "prom/prometheus:v2.4.3"
args: ["--config.file=/etc/prometheus/prometheus.yml", "--web.external-url=https://prometheus.example.com"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "web"
containerPort: 9090
}]
}]
volumes: [{
name: *"config-volume" | string
}]
}
}
selector: {
matchLabels: {
app: "prometheus"
}
}
strategy: {
type: "RollingUpdate"
rollingUpdate: {
maxSurge: 0
maxUnavailable: 1
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
prometheus: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "prometheus"
labels: {
component: "mon"
}
}
data: {
"alert.rules": """
groups:
- name: rules.yaml
rules:
- alert: InstanceDown
expr: up == 0
for: 30s
labels:
severity: page
annotations:
description: '{{$labels.app}} of job {{ $labels.job }} has been down for more
than 30 seconds.'
summary: Instance {{$labels.app}} down
- alert: InsufficientPeers
expr: count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)
for: 3m
labels:
severity: page
annotations:
description: If one more etcd peer goes down the cluster will be unavailable
summary: etcd cluster small
- alert: EtcdNoMaster
expr: sum(etcd_server_has_leader{app=\"etcd\"}) == 0
for: 1s
labels:
severity: page
annotations:
summary: No ETCD master elected.
- alert: PodRestart
expr: (max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m]))
> 2
for: 1m
labels:
severity: page
annotations:
description: '{{$labels.app}} {{ $labels.container }} resturted {{ $value }}
times in 5m.'
summary: Pod for {{$labels.container}} restarts too often
"""
"prometheus.yml": """
global:
scrape_interval: 15s
rule_files:
- /etc/prometheus/alert.rules
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- alertmanager:9093
scrape_configs:
- job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
action: keep
regex: default;kubernetes;https
- job_name: kubernetes-nodes
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: kubernetes-cadvisor
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
action: replace
target_label: __scheme__
regex: (https?)
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
action: replace
target_label: __address__
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
action: replace
target_label: kubernetes_name
- job_name: kubernetes-services
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __address__
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- job_name: kubernetes-ingresses
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels:
- __meta_kubernetes_ingress_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __meta_kubernetes_ingress_scheme
- __address__
- __meta_kubernetes_ingress_path
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_ingress_name
target_label: kubernetes_name
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
action: replace
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: kubernetes_pod_name
"""
}
}
}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
authproxy: {
name: *"authproxy" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "skippy/oauth2_proxy:2.0.1"
args: ["--config=/etc/authproxy/authproxy.cfg"]
expose: {
port: {
client: 4180
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: *"config-volume" | string
mountPath: "/etc/authproxy"
subPath: *null | string
readOnly: *false | true
spec: {
configMap: {
name: "authproxy"
}
}
kubernetes: {}
}
}
label: {
app: *"authproxy" | string
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
authproxy: {
name: *"authproxy" | string
port: {
client: {
name: *"client" | string
port: 4180
protocol: *"TCP" | "UDP"
}
}
label: {
app: *"authproxy" | string
domain: "prod"
component: "proxy"
}
kubernetes: {}
}
}
configMap: {
authproxy: {
"authproxy.cfg": """
# Google Auth Proxy Config File
## https://github.com/bitly/google_auth_proxy
## <addr>:<port> to listen on for HTTP clients
http_address = \"0.0.0.0:4180\"
## the OAuth Redirect URL.
redirect_url = \"https://auth.example.com/oauth2/callback\"
## the http url(s) of the upstream endpoint. If multiple, routing is based on path
upstreams = [
# frontend
\"http://frontend-waiter:7080/dpr/\",
\"http://frontend-maitred:7080/ui/\",
\"http://frontend-maitred:7080/ui\",
\"http://frontend-maitred:7080/report/\",
\"http://frontend-maitred:7080/report\",
\"http://frontend-maitred:7080/static/\",
# kitchen
\"http://kitchen-chef:8080/visit\",
# infrastructure
\"http://download:7080/file/\",
\"http://download:7080/archive\",
\"http://tasks:7080/tasks\",
\"http://tasks:7080/tasks/\",
]
## pass HTTP Basic Auth, X-Forwarded-User and X-Forwarded-Email information to upstream
pass_basic_auth = true
request_logging = true
## Google Apps Domains to allow authentication for
google_apps_domains = [
\"example.com\",
]
email_domains = [
\"example.com\",
]
## The Google OAuth Client ID, Secret
client_id = \"---\"
client_secret = \"---\"
## Cookie Settings
## Secret - the seed string for secure cookies
## Domain - optional cookie domain to force cookies to (ie: .yourcompany.com)
## Expire - expire timeframe for cookie
cookie_secret = \"won't tell you\"
cookie_domain = \".example.com\"
cookie_https_only = true
"""
}
}
kubernetes: {
services: {
authproxy: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: *"authproxy" | string
labels: {
app: *"authproxy" | string
domain: "prod"
component: "proxy"
}
}
spec: {
selector: {
app: *"authproxy" | string
domain: "prod"
component: "proxy"
}
ports: [{
name: *"client" | string
port: 4180
protocol: *"TCP" | "UDP"
}]
}
}
}
deployments: {
authproxy: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"authproxy" | string
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"authproxy" | string
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: *"authproxy" | string
image: "skippy/oauth2_proxy:2.0.1"
args: ["--config=/etc/authproxy/authproxy.cfg"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 4180
}]
}]
volumes: [{
name: *"config-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
authproxy: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "authproxy"
labels: {
component: "proxy"
}
}
data: {
"authproxy.cfg": """
# Google Auth Proxy Config File
## https://github.com/bitly/google_auth_proxy
## <addr>:<port> to listen on for HTTP clients
http_address = \"0.0.0.0:4180\"
## the OAuth Redirect URL.
redirect_url = \"https://auth.example.com/oauth2/callback\"
## the http url(s) of the upstream endpoint. If multiple, routing is based on path
upstreams = [
# frontend
\"http://frontend-waiter:7080/dpr/\",
\"http://frontend-maitred:7080/ui/\",
\"http://frontend-maitred:7080/ui\",
\"http://frontend-maitred:7080/report/\",
\"http://frontend-maitred:7080/report\",
\"http://frontend-maitred:7080/static/\",
# kitchen
\"http://kitchen-chef:8080/visit\",
# infrastructure
\"http://download:7080/file/\",
\"http://download:7080/archive\",
\"http://tasks:7080/tasks\",
\"http://tasks:7080/tasks/\",
]
## pass HTTP Basic Auth, X-Forwarded-User and X-Forwarded-Email information to upstream
pass_basic_auth = true
request_logging = true
## Google Apps Domains to allow authentication for
google_apps_domains = [
\"example.com\",
]
email_domains = [
\"example.com\",
]
## The Google OAuth Client ID, Secret
client_id = \"---\"
client_secret = \"---\"
## Cookie Settings
## Secret - the seed string for secure cookies
## Domain - optional cookie domain to force cookies to (ie: .yourcompany.com)
## Expire - expire timeframe for cookie
cookie_secret = \"won't tell you\"
cookie_domain = \".example.com\"
cookie_https_only = true
"""
}
}
}
}
deployment: {
goget: {
name: *"goget" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "gcr.io/myproj/goget:v0.5.1"
expose: {
port: {
https: 7443
}
}
port: {}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: *"secret-volume" | string
mountPath: "/etc/ssl"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "goget-secrets"
}
}
kubernetes: {}
}
}
label: {
app: *"goget" | string
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
goget: {
name: *"goget" | string
port: {
http: {
name: *"http" | string
port: 443
protocol: *"TCP" | "UDP"
}
https: {
name: *"https" | string
port: 7443
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.3.5.7"
}
}
label: {
app: *"goget" | string
domain: "prod"
component: "proxy"
}
}
}
configMap: {}
kubernetes: {
services: {
goget: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: *"goget" | string
domain: "prod"
component: "proxy"
}
ports: [{
name: *"http" | string
port: 443
protocol: *"TCP" | "UDP"
}, {
name: *"https" | string
port: 7443
protocol: *"TCP" | "UDP"
}]
loadBalancerIP: "1.3.5.7"
}
metadata: {
name: *"goget" | string
labels: {
app: *"goget" | string
domain: "prod"
component: "proxy"
}
}
}
}
deployments: {
goget: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"goget" | string
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"goget" | string
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: *"goget" | string
image: "gcr.io/myproj/goget:v0.5.1"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7443
}]
}]
volumes: [{
name: *"secret-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
nginx: {
name: *"nginx" | string
kind: *"deployment" | "stateful" | "daemon"
replicas: *1 | int
image: "nginx:1.11.10-alpine"
expose: {
port: {
http: 80
https: 443
}
}
port: {}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: *"secret-volume" | string
mountPath: "/etc/ssl"
subPath: *null | string
readOnly: *false | true
spec: {
secret: {
secretName: "proxy-secrets"
}
}
kubernetes: {}
}
"config-volume": {
name: *"config-volume" | string
mountPath: "/etc/nginx/nginx.conf"
subPath: "nginx.conf"
readOnly: *false | true
spec: {
configMap: {
name: "nginx"
}
}
kubernetes: {}
}
}
label: {
app: *"nginx" | string
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
nginx: {
name: *"nginx" | string
port: {
http: {
name: *"http" | string
port: 80
protocol: *"TCP" | "UDP"
}
https: {
name: *"https" | string
port: 443
protocol: *"TCP" | "UDP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.3.4.5"
}
}
label: {
app: *"nginx" | string
domain: "prod"
component: "proxy"
}
}
}
configMap: {
nginx: {
"nginx.conf": """
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# needs to be high for some download jobs.
keepalive_timeout 400;
# proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /dev/stdout;
error_log /dev/stdout;
# Disable POST body size constraints. We often deal with large
# files. Especially docker containers may be large.
client_max_body_size 0;
upstream goget {
server localhost:7070;
}
# Redirect incoming Google Cloud Storage notifications:
server {
listen 443 ssl;
server_name notify.example.com notify2.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://tasks:7080;
proxy_connect_timeout 1;
}
}
server {
listen 80;
listen 443 ssl;
server_name x.example.com example.io;
location ~ \"(/[^/]+)(/.*)?\" {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost$1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
location / {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
listen 80;
server_name www.example.com w.example.com;
resolver 8.8.8.8;
location / {
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://$host.default.example.appspot.com/$request_uri;
proxy_redirect http://$host.default.example.appspot.com/ /;
}
}
# Kubernetes URI space. Maps URIs paths to specific servers using the
# proxy.
server {
listen 80;
listen 443 ssl;
server_name proxy.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://kubeproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
# We could add the following line and the connection would still be SSL,
# but it doesn't appear to be necessary. Seems saver this way.
listen 80;
listen 443 default ssl;
server_name ~^(?<sub>.*)\\.example\\.com$;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://authproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
}
"""
}
}
kubernetes: {
services: {
nginx: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: *"nginx" | string
domain: "prod"
component: "proxy"
}
ports: [{
name: *"http" | string
port: 80
protocol: *"TCP" | "UDP"
}, {
name: *"https" | string
port: 443
protocol: *"TCP" | "UDP"
}]
loadBalancerIP: "1.3.4.5"
}
metadata: {
name: *"nginx" | string
labels: {
app: *"nginx" | string
domain: "prod"
component: "proxy"
}
}
}
}
deployments: {
nginx: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: *"nginx" | string
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: *"nginx" | string
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: *"nginx" | string
image: "nginx:1.11.10-alpine"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: *"config-volume" | string
subPath: "nginx.conf"
mountPath: "/etc/nginx/nginx.conf"
}]
ports: [{
name: "http"
containerPort: 80
}, {
name: "https"
containerPort: 443
}]
}]
volumes: [{
name: *"secret-volume" | string
}, {
name: *"config-volume" | string
}]
}
}
replicas: *1 | int
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
nginx: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "nginx"
labels: {
component: "proxy"
}
}
data: {
"nginx.conf": """
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# needs to be high for some download jobs.
keepalive_timeout 400;
# proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /dev/stdout;
error_log /dev/stdout;
# Disable POST body size constraints. We often deal with large
# files. Especially docker containers may be large.
client_max_body_size 0;
upstream goget {
server localhost:7070;
}
# Redirect incoming Google Cloud Storage notifications:
server {
listen 443 ssl;
server_name notify.example.com notify2.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://tasks:7080;
proxy_connect_timeout 1;
}
}
server {
listen 80;
listen 443 ssl;
server_name x.example.com example.io;
location ~ \"(/[^/]+)(/.*)?\" {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost$1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
location / {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
listen 80;
server_name www.example.com w.example.com;
resolver 8.8.8.8;
location / {
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://$host.default.example.appspot.com/$request_uri;
proxy_redirect http://$host.default.example.appspot.com/ /;
}
}
# Kubernetes URI space. Maps URIs paths to specific servers using the
# proxy.
server {
listen 80;
listen 443 ssl;
server_name proxy.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://kubeproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
# We could add the following line and the connection would still be SSL,
# but it doesn't appear to be necessary. Seems saver this way.
listen 80;
listen 443 default ssl;
server_name ~^(?<sub>.*)\\.example\\.com$;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://authproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
}
"""
}
}
}
}