blob: 8c276b0c6c6f7ee716d64187d28c63aed9ac4821 [file] [log] [blame]
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
bartender: {
name: "bartender"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/bartender:v0.1.34"
expose: {
port: {
http: 7080
}
}
port: {}
arg: {}
args: []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "bartender"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
bartender: {
name: "bartender"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "bartender"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
bartender: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "bartender"
labels: {
app: "bartender"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "bartender"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
bartender: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "bartender"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "bartender"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "bartender"
image: "gcr.io/myproj/bartender:v0.1.34"
args: []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
breaddispatcher: {
name: "breaddispatcher"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/breaddispatcher:v0.3.24"
expose: {
port: {
http: 7080
}
}
port: {}
arg: {
etcd: "etcd:2379"
"event-server": "events:7788"
}
args: ["-etcd=etcd:2379", "-event-server=events:7788"] | []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "breaddispatcher"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
breaddispatcher: {
name: "breaddispatcher"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "breaddispatcher"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
breaddispatcher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "breaddispatcher"
labels: {
app: "breaddispatcher"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "breaddispatcher"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
breaddispatcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "breaddispatcher"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "breaddispatcher"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "breaddispatcher"
image: "gcr.io/myproj/breaddispatcher:v0.3.24"
args: ["-etcd=etcd:2379", "-event-server=events:7788"] | []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
host: {
name: "host"
kind: "deployment"
replicas: 2
image: "gcr.io/myproj/host:v0.1.10"
expose: {
port: {
http: 7080
}
}
port: {}
arg: {}
args: []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "host"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
host: {
name: "host"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "host"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
host: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "host"
labels: {
app: "host"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "host"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
host: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "host"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "host"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "host"
image: "gcr.io/myproj/host:v0.1.10"
args: []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 2
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
maitred: {
name: "maitred"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/maitred:v0.0.4"
expose: {
port: {
http: 7080
}
}
port: {}
arg: {}
args: []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "maitred"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
maitred: {
name: "maitred"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "maitred"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
maitred: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "maitred"
labels: {
app: "maitred"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "maitred"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
maitred: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "maitred"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "maitred"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "maitred"
image: "gcr.io/myproj/maitred:v0.0.4"
args: []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
valeter: {
name: "valeter"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/valeter:v0.0.4"
arg: {
http: ":8080"
etcd: "etcd:2379"
}
expose: {
port: {
http: 8080
}
}
port: {}
args: ["-http=:8080", "-etcd=etcd:2379"] | []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "8080"
}
}
}
}
}
label: {
app: "valeter"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
valeter: {
name: "valeter"
port: {
http: {
name: "http"
port: 8080
protocol: "TCP"
}
}
label: {
app: "valeter"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
valeter: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "valeter"
labels: {
app: "valeter"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "valeter"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
valeter: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "valeter"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "valeter"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "8080"
}
}
spec: {
containers: [{
name: "valeter"
image: "gcr.io/myproj/valeter:v0.0.4"
args: ["-http=:8080", "-etcd=etcd:2379"] | []
ports: [{
name: "http"
containerPort: 8080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
waiter: {
name: "waiter"
kind: "deployment"
image: "gcr.io/myproj/waiter:v0.3.0"
replicas: 5
expose: {
port: {
http: 7080
}
}
port: {}
arg: {}
args: []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "waiter"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
waiter: {
name: "waiter"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "waiter"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
waiter: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "waiter"
labels: {
app: "waiter"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "waiter"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
waiter: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "waiter"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "waiter"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "waiter"
image: "gcr.io/myproj/waiter:v0.3.0"
args: []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 5
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
waterdispatcher: {
name: "waterdispatcher"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/waterdispatcher:v0.0.48"
expose: {
port: {
http: 7080
}
}
port: {}
arg: {
http: ":8080"
etcd: "etcd:2379"
}
args: ["-http=:8080", "-etcd=etcd:2379"] | []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
}
}
}
label: {
app: "waterdispatcher"
domain: "prod"
component: "frontend"
}
envSpec: {}
volume: {}
}
}
service: {
waterdispatcher: {
name: "waterdispatcher"
port: {
http: {
name: "http"
port: 7080
protocol: "TCP"
}
}
label: {
app: "waterdispatcher"
domain: "prod"
component: "frontend"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
waterdispatcher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "waterdispatcher"
labels: {
app: "waterdispatcher"
domain: "prod"
component: "frontend"
}
}
spec: {
selector: {
app: "waterdispatcher"
domain: "prod"
component: "frontend"
}
ports: [{
name: "http"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
waterdispatcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "waterdispatcher"
labels: {
component: "frontend"
}
}
spec: {
template: {
metadata: {
labels: {
app: "waterdispatcher"
domain: "prod"
component: "frontend"
}
annotations: {
"prometheus.io.scrape": "true"
"prometheus.io.port": "7080"
}
}
spec: {
containers: [{
name: "waterdispatcher"
image: "gcr.io/myproj/waterdispatcher:v0.0.48"
args: ["-http=:8080", "-etcd=etcd:2379"] | []
ports: [{
name: "http"
containerPort: 7080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
download: {
name: "download"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/download:v0.0.2"
expose: {
port: {
client: 7080
}
}
port: {}
arg: {}
args: []
env: {}
label: {
app: "download"
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
volume: {}
}
}
service: {
download: {
name: "download"
port: {
client: {
name: "client"
port: 7080
protocol: "TCP"
}
}
label: {
app: "download"
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
download: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "download"
labels: {
app: "download"
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: "download"
domain: "prod"
component: "infra"
}
ports: [{
name: "client"
port: 7080
protocol: "TCP"
}]
}
}
}
deployments: {
download: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "download"
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: "download"
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: "download"
image: "gcr.io/myproj/download:v0.0.2"
args: []
ports: [{
name: "client"
containerPort: 7080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
etcd: {
name: "etcd"
kind: "stateful"
replicas: 3
image: "quay.io/coreos/etcd:v3.3.10"
kubernetes: {
spec: {
volumeClaimTemplates: [{
metadata: {
name: "etcd3"
annotations: {
"volume.alpha.kubernetes.io/storage-class": "default"
}
}
spec: {
accessModes: ["ReadWriteOnce"]
resources: {
requests: {
storage: "10Gi"
}
}
}
}]
serviceName: "etcd"
template: {
spec: {
containers: [{
command: ["/usr/local/bin/etcd"]
volumeMounts: [{
name: "etcd3"
mountPath: "/data"
}]
livenessProbe: {
httpGet: {
path: "/health"
port: "client"
}
initialDelaySeconds: 30
}
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["etcd"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
terminationGracePeriodSeconds: 10
}
metadata: {
annotations: {
"prometheus.io.port": "2379"
"prometheus.io.scrape": "true"
}
}
}
}
}
arg: {
name: "$(NAME)"
"data-dir": "/data/etcd3"
"initial-advertise-peer-urls": "http://$(IP):2380"
"listen-peer-urls": "http://$(IP):2380"
"listen-client-urls": "http://$(IP):2379,http://127.0.0.1:2379"
"advertise-client-urls": "http://$(IP):2379"
discovery: "https://discovery.etcd.io/xxxxxx"
}
env: {
ETCDCTL_API: "3"
ETCD_AUTO_COMPACTION_RETENTION: "4"
}
envSpec: {
NAME: {
valueFrom: {
fieldRef: {
fieldPath: "metadata.name"
}
}
}
IP: {
valueFrom: {
fieldRef: {
fieldPath: "status.podIP"
}
}
}
ETCDCTL_API: {
value: "3"
}
ETCD_AUTO_COMPACTION_RETENTION: {
value: "4"
}
}
expose: {
port: {
client: 2379
peer: 2380
}
}
port: {}
args: ["-name=$(NAME)", "-data-dir=/data/etcd3", "-initial-advertise-peer-urls=http://$(IP):2380", "-listen-peer-urls=http://$(IP):2380", "-listen-client-urls=http://$(IP):2379,http://127.0.0.1:2379", "-advertise-client-urls=http://$(IP):2379", "-discovery=https://discovery.etcd.io/xxxxxx"] | []
label: {
app: "etcd"
domain: "prod"
component: "infra"
}
volume: {}
}
}
service: {
etcd: {
name: "etcd"
port: {
client: {
name: "client"
port: 2379
protocol: "TCP"
}
peer: {
name: "peer"
port: 2380
protocol: "TCP"
}
}
kubernetes: {
spec: {
clusterIP: "None"
}
}
label: {
app: "etcd"
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
etcd: {
apiVersion: "v1"
kind: "Service"
spec: {
clusterIP: "None"
selector: {
app: "etcd"
domain: "prod"
component: "infra"
}
ports: [{
name: "client"
port: 2379
protocol: "TCP"
}, {
name: "peer"
port: 2380
protocol: "TCP"
}]
}
metadata: {
name: "etcd"
labels: {
app: "etcd"
domain: "prod"
component: "infra"
}
}
}
}
deployments: {}
statefulSets: {
etcd: {
apiVersion: "apps/v1beta1"
kind: "StatefulSet"
metadata: {
name: "etcd"
labels: {
component: "infra"
}
}
spec: {
volumeClaimTemplates: [{
metadata: {
name: "etcd3"
annotations: {
"volume.alpha.kubernetes.io/storage-class": "default"
}
}
spec: {
accessModes: ["ReadWriteOnce"]
resources: {
requests: {
storage: "10Gi"
}
}
}
}]
serviceName: "etcd"
replicas: 3
template: {
metadata: {
labels: {
app: "etcd"
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "2379"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "etcd"
image: "quay.io/coreos/etcd:v3.3.10"
args: ["-name=$(NAME)", "-data-dir=/data/etcd3", "-initial-advertise-peer-urls=http://$(IP):2380", "-listen-peer-urls=http://$(IP):2380", "-listen-client-urls=http://$(IP):2379,http://127.0.0.1:2379", "-advertise-client-urls=http://$(IP):2379", "-discovery=https://discovery.etcd.io/xxxxxx"] | []
command: ["/usr/local/bin/etcd"]
volumeMounts: [{
name: "etcd3"
mountPath: "/data"
}]
env: [{
name: "NAME"
valueFrom: {
fieldRef: {
fieldPath: "metadata.name"
}
}
}, {
name: "IP"
valueFrom: {
fieldRef: {
fieldPath: "status.podIP"
}
}
}, {
name: "ETCDCTL_API"
value: "3"
}, {
name: "ETCD_AUTO_COMPACTION_RETENTION"
value: "4"
}]
ports: [{
name: "client"
containerPort: 2379
}, {
name: "peer"
containerPort: 2380
}]
livenessProbe: {
httpGet: {
path: "/health"
port: "client"
}
initialDelaySeconds: 30
}
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["etcd"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
terminationGracePeriodSeconds: 10
}
}
}
}
}
daemonSets: {}
configMaps: {}
}
deployment: {
events: {
name: "events"
kind: "deployment"
replicas: 2
image: "gcr.io/myproj/events:v0.1.31"
arg: {
cert: "/etc/ssl/server.pem"
key: "/etc/ssl/server.key"
grpc: ":7788"
}
port: {
http: 7080
}
expose: {
port: {
grpc: 7788
}
}
args: ["-cert=/etc/ssl/server.pem", "-key=/etc/ssl/server.key", "-grpc=:7788"] | []
env: {}
volume: {
"secret-volume": {
name: "secret-volume"
mountPath: "/etc/ssl"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "biz-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["events"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
}
}
}
}
label: {
app: "events"
domain: "prod"
component: "infra"
}
envSpec: {}
}
}
service: {
events: {
name: "events"
port: {
grpc: {
name: "grpc"
port: 7788
protocol: "TCP"
}
}
label: {
app: "events"
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
events: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "events"
labels: {
app: "events"
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: "events"
domain: "prod"
component: "infra"
}
ports: [{
name: "grpc"
port: 7788
protocol: "TCP"
}]
}
}
}
deployments: {
events: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "events"
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: "events"
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "events"
image: "gcr.io/myproj/events:v0.1.31"
args: ["-cert=/etc/ssl/server.pem", "-key=/etc/ssl/server.key", "-grpc=:7788"] | []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "grpc"
containerPort: 7788
}, {
name: "http"
containerPort: 7080
}]
}]
affinity: {
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [{
labelSelector: {
matchExpressions: [{
key: "app"
operator: "In"
values: ["events"]
}]
}
topologyKey: "kubernetes.io/hostname"
}]
}
}
volumes: [{
name: "secret-volume"
}]
}
}
replicas: 2
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
tasks: {
name: "tasks"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/tasks:v0.2.6"
port: {
http: 7080
}
expose: {
port: {
https: 7443
}
}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: "secret-volume"
mountPath: "/etc/ssl"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "star-example-com-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
}
}
}
label: {
app: "tasks"
domain: "prod"
component: "infra"
}
envSpec: {}
}
}
service: {
tasks: {
name: "tasks"
port: {
https: {
name: "https"
port: 443
targetPort: 7443
protocol: "TCP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.2.3.4"
}
}
label: {
app: "tasks"
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
tasks: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: "tasks"
domain: "prod"
component: "infra"
}
ports: [{
name: "https"
port: 443
targetPort: 7443
protocol: "TCP"
}]
loadBalancerIP: "1.2.3.4"
}
metadata: {
name: "tasks"
labels: {
app: "tasks"
domain: "prod"
component: "infra"
}
}
}
}
deployments: {
tasks: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "tasks"
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: "tasks"
domain: "prod"
component: "infra"
}
annotations: {
"prometheus.io.port": "7080"
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "tasks"
image: "gcr.io/myproj/tasks:v0.2.6"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7443
}, {
name: "http"
containerPort: 7080
}]
}]
volumes: [{
name: "secret-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
updater: {
name: "updater"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/updater:v0.1.0"
args: ["-key=/etc/certs/updater.pem"]
expose: {
port: {
http: 8080
}
}
port: {}
arg: {}
env: {}
volume: {
"secret-updater": {
name: "secret-updater"
mountPath: "/etc/certs"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "updater-secrets"
}
}
kubernetes: {}
}
}
label: {
app: "updater"
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
}
}
service: {
updater: {
name: "updater"
port: {
http: {
name: "http"
port: 8080
protocol: "TCP"
}
}
label: {
app: "updater"
domain: "prod"
component: "infra"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
updater: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "updater"
labels: {
app: "updater"
domain: "prod"
component: "infra"
}
}
spec: {
selector: {
app: "updater"
domain: "prod"
component: "infra"
}
ports: [{
name: "http"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
updater: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "updater"
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: "updater"
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: "updater"
image: "gcr.io/myproj/updater:v0.1.0"
args: ["-key=/etc/certs/updater.pem"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "http"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-updater"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
watcher: {
name: "watcher"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/watcher:v0.1.0"
volume: {
"secret-volume": {
name: "secret-volume"
mountPath: "/etc/ssl"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "star-example-com-secrets"
}
}
kubernetes: {}
}
}
port: {
http: 7080
}
expose: {
port: {
https: 7788
}
}
arg: {}
args: []
env: {}
label: {
app: "watcher"
domain: "prod"
component: "infra"
}
kubernetes: {}
envSpec: {}
}
}
service: {
watcher: {
name: "watcher"
port: {
https: {
name: "https"
port: 7788
protocol: "TCP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.2.3.4"
}
}
ports: {
https: {
port: 7788
targetPort: 7788
}
}
label: {
app: "watcher"
domain: "prod"
component: "infra"
}
}
}
configMap: {}
kubernetes: {
services: {
watcher: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: "watcher"
domain: "prod"
component: "infra"
}
ports: [{
name: "https"
port: 7788
protocol: "TCP"
}]
loadBalancerIP: "1.2.3.4"
}
metadata: {
name: "watcher"
labels: {
app: "watcher"
domain: "prod"
component: "infra"
}
}
}
}
deployments: {
watcher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "watcher"
labels: {
component: "infra"
}
}
spec: {
template: {
metadata: {
labels: {
app: "watcher"
domain: "prod"
component: "infra"
}
}
spec: {
containers: [{
name: "watcher"
image: "gcr.io/myproj/watcher:v0.1.0"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7788
}, {
name: "http"
containerPort: 7080
}]
}]
volumes: [{
name: "secret-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
caller: {
name: "caller"
kind: "deployment"
replicas: 3
image: "gcr.io/myproj/caller:v0.20.14"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
key: "/etc/certs/client.key"
cert: "/etc/certs/client.pem"
ca: "/etc/certs/servfx.ca"
"ssh-tunnel-key": "/sslcerts/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-key=/etc/certs/client.key", "-cert=/etc/certs/client.pem", "-ca=/etc/certs/servfx.ca", "-ssh-tunnel-key=/sslcerts/tunnel-private.pem"] | []
env: {}
volume: {
"caller-disk": {
name: "ssd-caller"
mountPath: "/logs"
subPath: null
readOnly: false
kubernetes: {}
spec: {
gcePersistentDisk: {
pdName: "ssd-caller"
fsType: "ext4"
}
}
}
"secret-ssh-key": {
name: "secret-ssh-key"
mountPath: "/sslcerts"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"secret-caller": {
name: "secret-caller"
mountPath: "/etc/certs"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "caller-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "caller"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
caller: {
name: "caller"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "caller"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
caller: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "caller"
labels: {
app: "caller"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "caller"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
caller: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "caller"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "caller"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "caller"
image: "gcr.io/myproj/caller:v0.20.14"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-key=/etc/certs/client.key", "-cert=/etc/certs/client.pem", "-ca=/etc/certs/servfx.ca", "-ssh-tunnel-key=/sslcerts/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "ssd-caller"
}, {
name: "secret-ssh-key"
}, {
name: "secret-caller"
}]
}
}
replicas: 3
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
dishwasher: {
name: "dishwasher"
kind: "deployment"
replicas: 5
image: "gcr.io/myproj/dishwasher:v0.2.13"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
env: {}
volume: {
"secret-ssh-key": {
name: "secret-ssh-key"
mountPath: "/sslcerts"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"dishwasher-disk": {
name: "dishwasher-disk"
mountPath: "/logs"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "dishwasher-disk"
fsType: "ext4"
}
}
kubernetes: {}
}
"secret-dishwasher": {
name: "secret-dishwasher"
mountPath: "/etc/certs"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "dishwasher-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "dishwasher"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
dishwasher: {
name: "dishwasher"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "dishwasher"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
dishwasher: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "dishwasher"
labels: {
app: "dishwasher"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "dishwasher"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
dishwasher: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "dishwasher"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "dishwasher"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "dishwasher"
image: "gcr.io/myproj/dishwasher:v0.2.13"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-ssh-key"
}, {
name: "dishwasher-disk"
}, {
name: "secret-dishwasher"
}]
}
}
replicas: 5
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
expiditer: {
name: "expiditer"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/expiditer:v0.5.34"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
env: {}
volume: {
"expiditer-disk": {
name: "expiditer-disk"
mountPath: "/logs"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "expiditer-disk"
fsType: "ext4"
}
}
kubernetes: {}
}
"secret-expiditer": {
name: "secret-expiditer"
mountPath: "/etc/certs"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "expiditer-secrets"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "expiditer"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
expiditer: {
name: "expiditer"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "expiditer"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
expiditer: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "expiditer"
labels: {
app: "expiditer"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "expiditer"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
expiditer: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "expiditer"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "expiditer"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "expiditer"
image: "gcr.io/myproj/expiditer:v0.5.34"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "expiditer-disk"
}, {
name: "secret-expiditer"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
headchef: {
name: "headchef"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/headchef:v0.2.16"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788"] | []
env: {}
volume: {
"secret-headchef": {
name: "secret-headchef"
mountPath: "/sslcerts"
subPath: null
readOnly: true
kubernetes: {}
spec: {
secret: {
secretName: "headchef-secrets"
}
}
}
"headchef-disk": {
name: "headchef-disk"
mountPath: "/logs"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "headchef-disk"
fsType: "ext4"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "headchef"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
headchef: {
name: "headchef"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "headchef"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
headchef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "headchef"
labels: {
app: "headchef"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "headchef"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
headchef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "headchef"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "headchef"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "headchef"
image: "gcr.io/myproj/headchef:v0.2.16"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-headchef"
}, {
name: "headchef-disk"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
linecook: {
name: "linecook"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/linecook:v0.1.42"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
name: "linecook"
etcd: "etcd:2379"
"reconnect-delay": "1h"
"-recovery-overlap": "100000"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-name=linecook", "-etcd=etcd:2379", "-reconnect-delay=1h", "--recovery-overlap=100000"] | []
env: {}
volume: {
"secret-linecook": {
name: "secret-kitchen"
mountPath: "/etc/certs"
subPath: null
readOnly: true
kubernetes: {}
spec: {
secret: {
secretName: "linecook-secrets"
}
}
}
"linecook-disk": {
name: "linecook-disk"
mountPath: "/logs"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "linecook-disk"
fsType: "ext4"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "linecook"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
linecook: {
name: "linecook"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "linecook"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
linecook: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "linecook"
labels: {
app: "linecook"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "linecook"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
linecook: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "linecook"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "linecook"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "linecook"
image: "gcr.io/myproj/linecook:v0.1.42"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-name=linecook", "-etcd=etcd:2379", "-reconnect-delay=1h", "--recovery-overlap=100000"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-kitchen"
}, {
name: "linecook-disk"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
pastrychef: {
name: "pastrychef"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/pastrychef:v0.1.15"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {
env: "prod"
logdir: "/logs"
"event-server": "events:7788"
"ssh-tunnel-key": "/etc/certs/tunnel-private.pem"
"reconnect-delay": "1m"
etcd: "etcd:2379"
"recovery-overlap": "10000"
}
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem", "-reconnect-delay=1m", "-etcd=etcd:2379", "-recovery-overlap=10000"] | []
env: {}
volume: {
"secret-pastrychef": {
name: "secret-ssh-key"
mountPath: "/etc/certs"
subPath: null
readOnly: true
spec: {
secret: {
secretName: "secrets"
}
}
kubernetes: {}
}
"pastrychef-disk": {
name: "pastrychef-disk"
mountPath: "/logs"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "pastrychef-disk"
fsType: "ext4"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "pastrychef"
domain: "prod"
component: "kitchen"
}
envSpec: {}
}
}
service: {
pastrychef: {
name: "pastrychef"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "pastrychef"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
pastrychef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "pastrychef"
labels: {
app: "pastrychef"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "pastrychef"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
pastrychef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "pastrychef"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "pastrychef"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "pastrychef"
image: "gcr.io/myproj/pastrychef:v0.1.15"
args: ["-env=prod", "-logdir=/logs", "-event-server=events:7788", "-ssh-tunnel-key=/etc/certs/tunnel-private.pem", "-reconnect-delay=1m", "-etcd=etcd:2379", "-recovery-overlap=10000"] | []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 8080
}]
}]
volumes: [{
name: "secret-ssh-key"
}, {
name: "pastrychef-disk"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
souschef: {
name: "souschef"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/souschef:v0.5.3"
expose: {
port: {
client: 8080
}
}
port: {}
arg: {}
args: []
env: {}
kubernetes: {
spec: {
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
}]
}
}
}
}
label: {
app: "souschef"
domain: "prod"
component: "kitchen"
}
envSpec: {}
volume: {}
}
}
service: {
souschef: {
name: "souschef"
port: {
client: {
name: "client"
port: 8080
protocol: "TCP"
}
}
label: {
app: "souschef"
domain: "prod"
component: "kitchen"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
souschef: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "souschef"
labels: {
app: "souschef"
domain: "prod"
component: "kitchen"
}
}
spec: {
selector: {
app: "souschef"
domain: "prod"
component: "kitchen"
}
ports: [{
name: "client"
port: 8080
protocol: "TCP"
}]
}
}
}
deployments: {
souschef: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "souschef"
labels: {
component: "kitchen"
}
}
spec: {
template: {
metadata: {
labels: {
app: "souschef"
domain: "prod"
component: "kitchen"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "souschef"
image: "gcr.io/myproj/souschef:v0.5.3"
args: []
livenessProbe: {
httpGet: {
path: "/debug/health"
port: 8080
}
initialDelaySeconds: 40
periodSeconds: 3
}
ports: [{
name: "client"
containerPort: 8080
}]
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
alertmanager: {
name: "alertmanager"
kind: "deployment"
replicas: 1
kubernetes: {
spec: {
selector: {
matchLabels: {
app: "alertmanager"
}
}
}
}
image: "prom/alertmanager:v0.15.2"
args: ["--config.file=/etc/alertmanager/alerts.yaml", "--storage.path=/alertmanager", "--web.external-url=https://alertmanager.example.com"]
expose: {
port: {
alertmanager: 9093
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: "config-volume"
mountPath: "/etc/alertmanager"
subPath: null
readOnly: false
spec: {
configMap: {
name: "alertmanager"
}
}
kubernetes: {}
}
alertmanager: {
name: "alertmanager"
mountPath: "/alertmanager"
subPath: null
readOnly: false
spec: {
emptyDir: {}
}
kubernetes: {}
}
}
label: {
app: "alertmanager"
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
alertmanager: {
name: "alertmanager"
label: {
name: "alertmanager"
app: "alertmanager"
domain: "prod"
component: "mon"
}
port: {
alertmanager: {
name: "main"
port: 9093
protocol: "TCP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
"prometheus.io/path": "/metrics"
}
}
}
}
}
configMap: {
alertmanager: {
"alerts.yaml": """
receivers:
- name: pager
slack_configs:
- channel: '#cloudmon'
text: |-
{{ range .Alerts }}{{ .Annotations.description }}
{{ end }}
send_resolved: true
route:
receiver: pager
group_by:
- alertname
- cluster
"""
}
}
kubernetes: {
services: {
alertmanager: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "alertmanager"
labels: {
name: "alertmanager"
app: "alertmanager"
domain: "prod"
component: "mon"
}
annotations: {
"prometheus.io/scrape": "true"
"prometheus.io/path": "/metrics"
}
}
spec: {
selector: {
name: "alertmanager"
app: "alertmanager"
domain: "prod"
component: "mon"
}
ports: [{
name: "main"
port: 9093
protocol: "TCP"
}]
}
}
}
deployments: {
alertmanager: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "alertmanager"
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: "alertmanager"
domain: "prod"
component: "mon"
}
}
spec: {
containers: [{
name: "alertmanager"
image: "prom/alertmanager:v0.15.2"
args: ["--config.file=/etc/alertmanager/alerts.yaml", "--storage.path=/alertmanager", "--web.external-url=https://alertmanager.example.com"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "alertmanager"
containerPort: 9093
}]
}]
volumes: [{
name: "config-volume"
}, {
name: "alertmanager"
}]
}
}
selector: {
matchLabels: {
app: "alertmanager"
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
alertmanager: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "alertmanager"
labels: {
component: "mon"
}
}
data: {
"alerts.yaml": """
receivers:
- name: pager
slack_configs:
- channel: '#cloudmon'
text: |-
{{ range .Alerts }}{{ .Annotations.description }}
{{ end }}
send_resolved: true
route:
receiver: pager
group_by:
- alertname
- cluster
"""
}
}
}
}
deployment: {
grafana: {
name: "grafana"
kind: "deployment"
replicas: 1
image: "grafana/grafana:4.5.2"
expose: {
port: {
grafana: 3000
}
}
port: {
web: 8080
}
arg: {}
args: []
volume: {
"grafana-volume": {
name: "grafana-volume"
mountPath: "/var/lib/grafana"
subPath: null
readOnly: false
spec: {
gcePersistentDisk: {
pdName: "grafana-volume"
fsType: "ext4"
}
}
kubernetes: {}
}
}
env: {
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: "admin"
}
kubernetes: {
spec: {
template: {
spec: {
containers: [{
resources: {
limits: {
cpu: "100m"
memory: "100Mi"
}
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}]
}
}
}
}
label: {
app: "grafana"
domain: "prod"
component: "mon"
}
envSpec: {
GF_AUTH_BASIC_ENABLED: {
value: "false"
}
GF_AUTH_ANONYMOUS_ENABLED: {
value: "true"
}
GF_AUTH_ANONYMOUS_ORG_ROLE: {
value: "admin"
}
}
}
}
service: {
grafana: {
name: "grafana"
port: {
grafana: {
name: "grafana"
port: 3000
protocol: "TCP"
}
}
label: {
app: "grafana"
domain: "prod"
component: "mon"
}
kubernetes: {}
}
}
configMap: {}
kubernetes: {
services: {
grafana: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "grafana"
labels: {
app: "grafana"
domain: "prod"
component: "mon"
}
}
spec: {
selector: {
app: "grafana"
domain: "prod"
component: "mon"
}
ports: [{
name: "grafana"
port: 3000
protocol: "TCP"
}]
}
}
}
deployments: {
grafana: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "grafana"
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: "grafana"
domain: "prod"
component: "mon"
}
}
spec: {
containers: [{
name: "grafana"
image: "grafana/grafana:4.5.2"
args: []
env: [{
name: "GF_AUTH_BASIC_ENABLED"
value: "false"
}, {
name: "GF_AUTH_ANONYMOUS_ENABLED"
value: "true"
}, {
name: "GF_AUTH_ANONYMOUS_ORG_ROLE"
value: "admin"
}]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "grafana"
containerPort: 3000
}, {
name: "web"
containerPort: 8080
}]
resources: {
limits: {
cpu: "100m"
memory: "100Mi"
}
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}]
volumes: [{
name: "grafana-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
"node-exporter": {
name: "node-exporter"
kind: "daemon"
replicas: 1
image: "quay.io/prometheus/node-exporter:v0.16.0"
expose: {
port: {
scrape: 9100
}
}
port: {}
arg: {}
args: ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"]
env: {}
volume: {
proc: {
name: "proc"
mountPath: "/host/proc"
subPath: null
readOnly: true
spec: {
hostPath: {
path: "/proc"
}
}
kubernetes: {}
}
sys: {
name: "sys"
mountPath: "/host/sys"
subPath: null
readOnly: true
spec: {
hostPath: {
path: "/sys"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
template: {
spec: {
hostNetwork: true
hostPID: true
containers: [{
ports: [{
hostPort: 9100
}]
resources: {
requests: {
memory: "30Mi"
cpu: "100m"
}
limits: {
memory: "50Mi"
cpu: "200m"
}
}
}]
}
}
}
}
label: {
app: "node-exporter"
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
"node-exporter": {
name: "node-exporter"
port: {
scrape: {
name: "metrics"
port: 9100
protocol: "TCP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
}
spec: {
type: "ClusterIP"
clusterIP: "None"
}
}
label: {
app: "node-exporter"
domain: "prod"
component: "mon"
}
}
}
configMap: {}
kubernetes: {
services: {
"node-exporter": {
apiVersion: "v1"
kind: "Service"
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
name: "node-exporter"
labels: {
app: "node-exporter"
domain: "prod"
component: "mon"
}
}
spec: {
type: "ClusterIP"
clusterIP: "None"
selector: {
app: "node-exporter"
domain: "prod"
component: "mon"
}
ports: [{
name: "metrics"
port: 9100
protocol: "TCP"
}]
}
}
}
deployments: {}
statefulSets: {}
daemonSets: {
"node-exporter": {
apiVersion: "extensions/v1beta1"
metadata: {
name: "node-exporter"
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: "node-exporter"
domain: "prod"
component: "mon"
}
}
spec: {
hostNetwork: true
hostPID: true
volumes: [{
name: "proc"
}, {
name: "sys"
}]
containers: [{
name: "node-exporter"
image: "quay.io/prometheus/node-exporter:v0.16.0"
args: ["--path.procfs=/host/proc", "--path.sysfs=/host/sys"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "scrape"
hostPort: 9100
containerPort: 9100
}]
resources: {
requests: {
memory: "30Mi"
cpu: "100m"
}
limits: {
memory: "50Mi"
cpu: "200m"
}
}
}]
}
}
}
kind: "DaemonSet"
}
}
configMaps: {}
}
deployment: {
prometheus: {
name: "prometheus"
kind: "deployment"
replicas: 1
image: "prom/prometheus:v2.4.3"
args: ["--config.file=/etc/prometheus/prometheus.yml", "--web.external-url=https://prometheus.example.com"]
expose: {
port: {
web: 9090
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: "config-volume"
mountPath: "/etc/prometheus"
subPath: null
readOnly: false
spec: {
configMap: {
name: "prometheus"
}
}
kubernetes: {}
}
}
kubernetes: {
spec: {
selector: {
matchLabels: {
app: "prometheus"
}
}
strategy: {
type: "RollingUpdate"
rollingUpdate: {
maxSurge: 0
maxUnavailable: 1
}
}
template: {
metadata: {
annotations: {
"prometheus.io.scrape": "true"
}
}
}
}
}
label: {
app: "prometheus"
domain: "prod"
component: "mon"
}
envSpec: {}
}
}
service: {
prometheus: {
name: "prometheus"
label: {
name: "prometheus"
app: "prometheus"
domain: "prod"
component: "mon"
}
port: {
web: {
name: "main"
port: 9090
nodePort: 30900
protocol: "TCP"
}
}
kubernetes: {
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
}
spec: {
type: "NodePort"
}
}
}
}
configMap: {
prometheus: {
"alert.rules": """
groups:
- name: rules.yaml
rules:
- alert: InstanceDown
expr: up == 0
for: 30s
labels:
severity: page
annotations:
description: '{{$labels.app}} of job {{ $labels.job }} has been down for more
than 30 seconds.'
summary: Instance {{$labels.app}} down
- alert: InsufficientPeers
expr: count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)
for: 3m
labels:
severity: page
annotations:
description: If one more etcd peer goes down the cluster will be unavailable
summary: etcd cluster small
- alert: EtcdNoMaster
expr: sum(etcd_server_has_leader{app=\"etcd\"}) == 0
for: 1s
labels:
severity: page
annotations:
summary: No ETCD master elected.
- alert: PodRestart
expr: (max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m]))
> 2
for: 1m
labels:
severity: page
annotations:
description: '{{$labels.app}} {{ $labels.container }} resturted {{ $value }}
times in 5m.'
summary: Pod for {{$labels.container}} restarts too often
"""
"prometheus.yml": """
global:
scrape_interval: 15s
rule_files:
- /etc/prometheus/alert.rules
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- alertmanager:9093
scrape_configs:
- job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
action: keep
regex: default;kubernetes;https
- job_name: kubernetes-nodes
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: kubernetes-cadvisor
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
action: replace
target_label: __scheme__
regex: (https?)
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
action: replace
target_label: __address__
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
action: replace
target_label: kubernetes_name
- job_name: kubernetes-services
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __address__
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- job_name: kubernetes-ingresses
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels:
- __meta_kubernetes_ingress_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __meta_kubernetes_ingress_scheme
- __address__
- __meta_kubernetes_ingress_path
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_ingress_name
target_label: kubernetes_name
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
action: replace
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: kubernetes_pod_name
"""
}
}
kubernetes: {
services: {
prometheus: {
apiVersion: "v1"
kind: "Service"
metadata: {
annotations: {
"prometheus.io/scrape": "true"
}
name: "prometheus"
labels: {
name: "prometheus"
app: "prometheus"
domain: "prod"
component: "mon"
}
}
spec: {
type: "NodePort"
selector: {
name: "prometheus"
app: "prometheus"
domain: "prod"
component: "mon"
}
ports: [{
name: "main"
port: 9090
nodePort: 30900
protocol: "TCP"
}]
}
}
}
deployments: {
prometheus: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "prometheus"
labels: {
component: "mon"
}
}
spec: {
template: {
metadata: {
labels: {
app: "prometheus"
domain: "prod"
component: "mon"
}
annotations: {
"prometheus.io.scrape": "true"
}
}
spec: {
containers: [{
name: "prometheus"
image: "prom/prometheus:v2.4.3"
args: ["--config.file=/etc/prometheus/prometheus.yml", "--web.external-url=https://prometheus.example.com"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "web"
containerPort: 9090
}]
}]
volumes: [{
name: "config-volume"
}]
}
}
selector: {
matchLabels: {
app: "prometheus"
}
}
strategy: {
type: "RollingUpdate"
rollingUpdate: {
maxSurge: 0
maxUnavailable: 1
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
prometheus: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "prometheus"
labels: {
component: "mon"
}
}
data: {
"alert.rules": """
groups:
- name: rules.yaml
rules:
- alert: InstanceDown
expr: up == 0
for: 30s
labels:
severity: page
annotations:
description: '{{$labels.app}} of job {{ $labels.job }} has been down for more
than 30 seconds.'
summary: Instance {{$labels.app}} down
- alert: InsufficientPeers
expr: count(up{job=\"etcd\"} == 0) > (count(up{job=\"etcd\"}) / 2 - 1)
for: 3m
labels:
severity: page
annotations:
description: If one more etcd peer goes down the cluster will be unavailable
summary: etcd cluster small
- alert: EtcdNoMaster
expr: sum(etcd_server_has_leader{app=\"etcd\"}) == 0
for: 1s
labels:
severity: page
annotations:
summary: No ETCD master elected.
- alert: PodRestart
expr: (max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m]))
> 2
for: 1m
labels:
severity: page
annotations:
description: '{{$labels.app}} {{ $labels.container }} resturted {{ $value }}
times in 5m.'
summary: Pod for {{$labels.container}} restarts too often
"""
"prometheus.yml": """
global:
scrape_interval: 15s
rule_files:
- /etc/prometheus/alert.rules
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- alertmanager:9093
scrape_configs:
- job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
action: keep
regex: default;kubernetes;https
- job_name: kubernetes-nodes
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: kubernetes-cadvisor
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels:
- __meta_kubernetes_node_name
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
action: replace
target_label: __scheme__
regex: (https?)
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
action: replace
target_label: __address__
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
action: replace
target_label: kubernetes_name
- job_name: kubernetes-services
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: service
relabel_configs:
- source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __address__
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- job_name: kubernetes-ingresses
metrics_path: /probe
params:
module:
- http_2xx
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels:
- __meta_kubernetes_ingress_annotation_prometheus_io_probe
action: keep
regex: true
- source_labels:
- __meta_kubernetes_ingress_scheme
- __address__
- __meta_kubernetes_ingress_path
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels:
- __param_target
target_label: app
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_ingress_name
target_label: kubernetes_name
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
action: replace
regex: ([^:]+)(?::\\d+)?;(\\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: kubernetes_pod_name
"""
}
}
}
}
deployment: {}
service: {}
configMap: {}
kubernetes: {
services: {}
deployments: {}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
authproxy: {
name: "authproxy"
kind: "deployment"
replicas: 1
image: "skippy/oauth2_proxy:2.0.1"
args: ["--config=/etc/authproxy/authproxy.cfg"]
expose: {
port: {
client: 4180
}
}
port: {}
arg: {}
env: {}
volume: {
"config-volume": {
name: "config-volume"
mountPath: "/etc/authproxy"
subPath: null
readOnly: false
spec: {
configMap: {
name: "authproxy"
}
}
kubernetes: {}
}
}
label: {
app: "authproxy"
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
authproxy: {
name: "authproxy"
port: {
client: {
name: "client"
port: 4180
protocol: "TCP"
}
}
label: {
app: "authproxy"
domain: "prod"
component: "proxy"
}
kubernetes: {}
}
}
configMap: {
authproxy: {
"authproxy.cfg": """
# Google Auth Proxy Config File
## https://github.com/bitly/google_auth_proxy
## <addr>:<port> to listen on for HTTP clients
http_address = \"0.0.0.0:4180\"
## the OAuth Redirect URL.
redirect_url = \"https://auth.example.com/oauth2/callback\"
## the http url(s) of the upstream endpoint. If multiple, routing is based on path
upstreams = [
# frontend
\"http://frontend-waiter:7080/dpr/\",
\"http://frontend-maitred:7080/ui/\",
\"http://frontend-maitred:7080/ui\",
\"http://frontend-maitred:7080/report/\",
\"http://frontend-maitred:7080/report\",
\"http://frontend-maitred:7080/static/\",
# kitchen
\"http://kitchen-chef:8080/visit\",
# infrastructure
\"http://download:7080/file/\",
\"http://download:7080/archive\",
\"http://tasks:7080/tasks\",
\"http://tasks:7080/tasks/\",
]
## pass HTTP Basic Auth, X-Forwarded-User and X-Forwarded-Email information to upstream
pass_basic_auth = true
request_logging = true
## Google Apps Domains to allow authentication for
google_apps_domains = [
\"example.com\",
]
email_domains = [
\"example.com\",
]
## The Google OAuth Client ID, Secret
client_id = \"---\"
client_secret = \"---\"
## Cookie Settings
## Secret - the seed string for secure cookies
## Domain - optional cookie domain to force cookies to (ie: .yourcompany.com)
## Expire - expire timeframe for cookie
cookie_secret = \"won't tell you\"
cookie_domain = \".example.com\"
cookie_https_only = true
"""
}
}
kubernetes: {
services: {
authproxy: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: "authproxy"
labels: {
app: "authproxy"
domain: "prod"
component: "proxy"
}
}
spec: {
selector: {
app: "authproxy"
domain: "prod"
component: "proxy"
}
ports: [{
name: "client"
port: 4180
protocol: "TCP"
}]
}
}
}
deployments: {
authproxy: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "authproxy"
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: "authproxy"
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: "authproxy"
image: "skippy/oauth2_proxy:2.0.1"
args: ["--config=/etc/authproxy/authproxy.cfg"]
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "client"
containerPort: 4180
}]
}]
volumes: [{
name: "config-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
authproxy: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "authproxy"
labels: {
component: "proxy"
}
}
data: {
"authproxy.cfg": """
# Google Auth Proxy Config File
## https://github.com/bitly/google_auth_proxy
## <addr>:<port> to listen on for HTTP clients
http_address = \"0.0.0.0:4180\"
## the OAuth Redirect URL.
redirect_url = \"https://auth.example.com/oauth2/callback\"
## the http url(s) of the upstream endpoint. If multiple, routing is based on path
upstreams = [
# frontend
\"http://frontend-waiter:7080/dpr/\",
\"http://frontend-maitred:7080/ui/\",
\"http://frontend-maitred:7080/ui\",
\"http://frontend-maitred:7080/report/\",
\"http://frontend-maitred:7080/report\",
\"http://frontend-maitred:7080/static/\",
# kitchen
\"http://kitchen-chef:8080/visit\",
# infrastructure
\"http://download:7080/file/\",
\"http://download:7080/archive\",
\"http://tasks:7080/tasks\",
\"http://tasks:7080/tasks/\",
]
## pass HTTP Basic Auth, X-Forwarded-User and X-Forwarded-Email information to upstream
pass_basic_auth = true
request_logging = true
## Google Apps Domains to allow authentication for
google_apps_domains = [
\"example.com\",
]
email_domains = [
\"example.com\",
]
## The Google OAuth Client ID, Secret
client_id = \"---\"
client_secret = \"---\"
## Cookie Settings
## Secret - the seed string for secure cookies
## Domain - optional cookie domain to force cookies to (ie: .yourcompany.com)
## Expire - expire timeframe for cookie
cookie_secret = \"won't tell you\"
cookie_domain = \".example.com\"
cookie_https_only = true
"""
}
}
}
}
deployment: {
goget: {
name: "goget"
kind: "deployment"
replicas: 1
image: "gcr.io/myproj/goget:v0.5.1"
expose: {
port: {
https: 7443
}
}
port: {}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: "secret-volume"
mountPath: "/etc/ssl"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "goget-secrets"
}
}
kubernetes: {}
}
}
label: {
app: "goget"
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
goget: {
name: "goget"
port: {
http: {
name: "http"
port: 443
protocol: "TCP"
}
https: {
name: "https"
port: 7443
protocol: "TCP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.3.5.7"
}
}
label: {
app: "goget"
domain: "prod"
component: "proxy"
}
}
}
configMap: {}
kubernetes: {
services: {
goget: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: "goget"
domain: "prod"
component: "proxy"
}
ports: [{
name: "http"
port: 443
protocol: "TCP"
}, {
name: "https"
port: 7443
protocol: "TCP"
}]
loadBalancerIP: "1.3.5.7"
}
metadata: {
name: "goget"
labels: {
app: "goget"
domain: "prod"
component: "proxy"
}
}
}
}
deployments: {
goget: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "goget"
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: "goget"
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: "goget"
image: "gcr.io/myproj/goget:v0.5.1"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}]
ports: [{
name: "https"
containerPort: 7443
}]
}]
volumes: [{
name: "secret-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {}
}
deployment: {
nginx: {
name: "nginx"
kind: "deployment"
replicas: 1
image: "nginx:1.11.10-alpine"
expose: {
port: {
http: 80
https: 443
}
}
port: {}
arg: {}
args: []
env: {}
volume: {
"secret-volume": {
name: "secret-volume"
mountPath: "/etc/ssl"
subPath: null
readOnly: false
spec: {
secret: {
secretName: "proxy-secrets"
}
}
kubernetes: {}
}
"config-volume": {
name: "config-volume"
mountPath: "/etc/nginx/nginx.conf"
subPath: "nginx.conf"
readOnly: false
spec: {
configMap: {
name: "nginx"
}
}
kubernetes: {}
}
}
label: {
app: "nginx"
domain: "prod"
component: "proxy"
}
kubernetes: {}
envSpec: {}
}
}
service: {
nginx: {
name: "nginx"
port: {
http: {
name: "http"
port: 80
protocol: "TCP"
}
https: {
name: "https"
port: 443
protocol: "TCP"
}
}
kubernetes: {
spec: {
type: "LoadBalancer"
loadBalancerIP: "1.3.4.5"
}
}
label: {
app: "nginx"
domain: "prod"
component: "proxy"
}
}
}
configMap: {
nginx: {
"nginx.conf": """
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# needs to be high for some download jobs.
keepalive_timeout 400;
# proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /dev/stdout;
error_log /dev/stdout;
# Disable POST body size constraints. We often deal with large
# files. Especially docker containers may be large.
client_max_body_size 0;
upstream goget {
server localhost:7070;
}
# Redirect incoming Google Cloud Storage notifications:
server {
listen 443 ssl;
server_name notify.example.com notify2.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://tasks:7080;
proxy_connect_timeout 1;
}
}
server {
listen 80;
listen 443 ssl;
server_name x.example.com example.io;
location ~ \"(/[^/]+)(/.*)?\" {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost$1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
location / {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
listen 80;
server_name www.example.com w.example.com;
resolver 8.8.8.8;
location / {
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://$host.default.example.appspot.com/$request_uri;
proxy_redirect http://$host.default.example.appspot.com/ /;
}
}
# Kubernetes URI space. Maps URIs paths to specific servers using the
# proxy.
server {
listen 80;
listen 443 ssl;
server_name proxy.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://kubeproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
# We could add the following line and the connection would still be SSL,
# but it doesn't appear to be necessary. Seems saver this way.
listen 80;
listen 443 default ssl;
server_name ~^(?<sub>.*)\\.example\\.com$;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://authproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
}
"""
}
}
kubernetes: {
services: {
nginx: {
apiVersion: "v1"
kind: "Service"
spec: {
type: "LoadBalancer"
selector: {
app: "nginx"
domain: "prod"
component: "proxy"
}
ports: [{
name: "http"
port: 80
protocol: "TCP"
}, {
name: "https"
port: 443
protocol: "TCP"
}]
loadBalancerIP: "1.3.4.5"
}
metadata: {
name: "nginx"
labels: {
app: "nginx"
domain: "prod"
component: "proxy"
}
}
}
}
deployments: {
nginx: {
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata: {
name: "nginx"
labels: {
component: "proxy"
}
}
spec: {
template: {
metadata: {
labels: {
app: "nginx"
domain: "prod"
component: "proxy"
}
}
spec: {
containers: [{
name: "nginx"
image: "nginx:1.11.10-alpine"
args: []
volumeMounts: [{
name: v.name
mountPath: v.mountPath
if v.subPath != null | true {
subPath: v.subPath
}
if v.readOnly {
readOnly: v.readOnly
}
}, {
name: "config-volume"
subPath: "nginx.conf"
mountPath: "/etc/nginx/nginx.conf"
}]
ports: [{
name: "http"
containerPort: 80
}, {
name: "https"
containerPort: 443
}]
}]
volumes: [{
name: "secret-volume"
}, {
name: "config-volume"
}]
}
}
replicas: 1
}
}
}
statefulSets: {}
daemonSets: {}
configMaps: {
nginx: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: "nginx"
labels: {
component: "proxy"
}
}
data: {
"nginx.conf": """
events {
worker_connections 768;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
# needs to be high for some download jobs.
keepalive_timeout 400;
# proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_read_timeout 300;
send_timeout 300;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /dev/stdout;
error_log /dev/stdout;
# Disable POST body size constraints. We often deal with large
# files. Especially docker containers may be large.
client_max_body_size 0;
upstream goget {
server localhost:7070;
}
# Redirect incoming Google Cloud Storage notifications:
server {
listen 443 ssl;
server_name notify.example.com notify2.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://tasks:7080;
proxy_connect_timeout 1;
}
}
server {
listen 80;
listen 443 ssl;
server_name x.example.com example.io;
location ~ \"(/[^/]+)(/.*)?\" {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost$1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
location / {
set $myhost $host;
if ($arg_go-get = \"1\") {
set $myhost \"goget\";
}
proxy_pass http://$myhost;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
listen 80;
server_name www.example.com w.example.com;
resolver 8.8.8.8;
location / {
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://$host.default.example.appspot.com/$request_uri;
proxy_redirect http://$host.default.example.appspot.com/ /;
}
}
# Kubernetes URI space. Maps URIs paths to specific servers using the
# proxy.
server {
listen 80;
listen 443 ssl;
server_name proxy.example.com;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://kubeproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
server {
# We could add the following line and the connection would still be SSL,
# but it doesn't appear to be necessary. Seems saver this way.
listen 80;
listen 443 default ssl;
server_name ~^(?<sub>.*)\\.example\\.com$;
ssl_certificate /etc/ssl/server.crt;
ssl_certificate_key /etc/ssl/server.key;
# Security enhancements to deal with poodles and the like.
# See https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
# ssl_ciphers 'AES256+EECDH:AES256+EDH';
ssl_ciphers \"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4\";
# We don't like poodles.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_session_cache shared:SSL:10m;
# Enable Forward secrecy.
ssl_dhparam /etc/ssl/dhparam.pem;
ssl_prefer_server_ciphers on;
# Enable HTST.
add_header Strict-Transport-Security max-age=1209600;
if ($ssl_protocol = \"\") {
rewrite ^ https://$host$request_uri? permanent;
}
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://authproxy:4180;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 1;
}
}
}
"""
}
}
}
}