for i in range(2, 40)
2.3
kubectl run kubia --image=luksa/kubia --port=8080 --generator=run-pod/v1
kubectl run mongo --image=luksa/kubia --port=8080 --generator=run-pod/v1
docker run -itd --name mongo -p 27017:27017 mongo
kubectl run kubia --image=luksa/kubia --port=8080 --generator=run/v1
"without -pod " there will have rc be created
kubectl expose rc kubia --type=LoadBalancer --name kubia-http
kubectl run redis --image=redis --port=6379 --generator=run/v1
kubectl get rc
kubectl scale rc kubia --replicas=3
kubectl get pod -o wide
kubectl cluster-info
3
kubectl get po kubia-zxzij -o yaml
kubia-manual.yaml
888888888888888
apiVersion: v1
kind: Pod
metadata:
name: kubia-manual
spec:
containers:
- image: luksa/kubia
name: kubia
ports:
- containerPort: 8080
protocol: TCP
88888888888888888
kubectl explain pods svc rc deployment ep
kubectl explain pod.spec
kubectl create -f kubia-manual.yaml
kubectl apply -f kubia-manual.yaml
kubectl logs kubia-manual
kubectl port-forward kubia-manual 8888:8080
kubia-manual-with-labels.yaml
888888888888888
apiVersion: v1
kind: Pod
metadata:
name: kubia-manual-v2
labels:
creation_method: manual
env: prod
spec:
containers:
- image: luksa/kubia
name: kubia
ports:
- containerPort: 8080
protocol: TCP
88888888888888888
kubectl get po --show-labels
kubectl get po -L creation_method,env
kubectl label po kubia-manual creation_method=manual
kubectl label po kubia-manual-v2 env=debug --overwrite
kubectl get po -l '!env'
kubectl get po -L '!env'
kubectl label node gke-kubia-85f6-node-0rrx gpu=true
kubectl label node gke-kubia-85f6-node-0rrx gpu-
删除一个Label,只需在命令行最后指定Label的key名并与一个减号相连即可:
kubia-gpu.yaml
888888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: kubia-gpu
labels:
creation_method: manual
env: prod
spec:
nodeSelector:
gpu: "true"
containers:
- image: luksa/kubia
name: kubia
8888888888888888888888888
kubectl get node -l kubernetes.io/hostname=r4
kubectl annotate pod kubia-manual mycompany.com/someannotation="foo bar"
kubectl get po --namespace kube-system
888888888888888888888888
apiVersion: v1
kind: Namespace
metadata:
name: custom-namespace
888888888888888888888888
kubectl create namespace custom-namespace
kubectl create -f kubia-manual.yaml -n custom-namespace
更改当前的namespace命令:
more /root/.bashrc
alias k='kubectl'
alias kcd='kubectl config set-context $(kubectl config current-context) --namespace'
kubectl config set-context --current --namespace=ran1
TIP To quickly switch to a different namespace, you can set up the following
alias: alias kcd='kubectl config set-context $(kubectl config currentcontext) --namespace '. You can then switch between namespaces using kcd
some-namespace.
kubectl delete po -l creation_method=manual
kubectl delete ns custom-namespace
kubectl delete all --all
###############################################
4
888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: kubia-liveness
labels:
creation_method: manual
env: prod
spec:
containers:
- image: luksa/kubia-unhealthy
name: kubia
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 15
8888888888888888888888888888888888888888888
kubia-rc.yaml
88888888888888888888888888888888888888
apiVersion: v1
kind: ReplicationController
metadata:
name: kubia
spec:
replicas: 3
selector:
app: kubia
template:
metadata:
labels:
app: kubia
type: special
spec:
containers:
- name: kubia
image: luksa/kubia
ports:
- containerPort: 8080
8888888888888888888888888888888888888888888
kubectl get pods --show-labels
kubectl delete rc kubia --cascade=false
kubia-replicaset.yaml
888888888888888888888888888888888888888888
apiVersion: apps/v1beta2
kind: ReplicaSet
metadata:
name: kubia
spec:
replicas: 3
selector:
matchLabels:
app: kubia
template:
metadata:
labels:
app: kubia
spec:
containers:
- name: kubia
image: luksa/kubia
8888888888888888888888888888888888888888888888
kubia-replicaset-matchexpressions.yaml
888888888888888888888888888888888888888888888
apiVersion: apps/v1beta2
kind: ReplicaSet
metadata:
name: kubia
spec:
replicas: 3
selector:
matchExpressions:
- key: app
operator: In
values:
- kuiba
template:
metadata:
labels:
app: kubia
spec:
containers:
- name: kubia
image: luksa/kubia
8888888888888888888888888888888888888888888888888
kubectl delete rs kubia
ssd-monitor-daemonset.yaml
888888888888888888888888888888888888888888888888
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: ssd-monitor
spec:
selector:
matchLabels:
app: ssd-monitor
template:
metadata:
labels:
app: ssd-monitor
spec:
nodeSelector:
disk: ssd
containers:
- name: main
image: luksa/ssd-monitor
8888888888888888888888888888888888888888888888888888
kubectl label node minikube disk=ssd
exporter.yaml
8888888888888888888888888888888888888888
apiVersion: batch/v1
kind: Job
metadata:
name: batch-job
spec:
template:
metadata:
labels:
app: batch-job
spec:
restartPolicy: OnFailure
containers:
- name: main
image: luksa/batch-job
888888888888888888888888888888888888888888888
multi-completion-batch-job.yaml
8888888888888888888888888888888888888888888
apiVersion: batch/v1
kind: Job
metadata:
name: multi-completion-batch-job
spec:
completions: 5
parallelism: 2
template:
metadata:
labels:
app: batch-job
spec:
restartPolicy: OnFailure
containers:
- name: main
image: luksa/batch-job
888888888888888888888888888888888888888888888
kubectl scale job multi-completion-batch-job --replicas 3
cronjob.yaml
88888888888888888888888888888888888888888
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: batch-job-every-fifteen-minutes
spec:
schedule: "0,15,30,45 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: periodic-batch-job
spec:
restartPolicy: OnFailure
containers:
- name: main
image: luksa/batch-job
888888888888888888888888888888888888888888888888
5
kubia-svc.yaml
8888888888888888888888888888888888888888888
apiVersion: v1
kind: Service
metadata:
name: kubia
spec:
ports:
- port: 80
targetPort:8080
selector:
app: kubia
888888888888888888888888888888888888888888888888
kubectl get svc
8888888888888888888888888888888888888888888
apiVersion: v1
kind: Service
metadata:
name: kubia
spec:
ports:
- name: http
port: 80
targetPort:8080
- name: https
port: 443
targetPort:8443
selector:
app: kubia
888888888888888888888888888888888888888888888888
kubectl get svc
88888888888888888888888888888888
kubia-rc.yaml (with port name)
88888888888888888888888888888888888888
apiVersion: v1
kind: ReplicationController
metadata:
name: kubia
spec:
replicas: 3
selector:
app: kubia
template:
metadata:
labels:
app: kubia
type: special
spec:
containers:
- name: kubia
image: luksa/kubia
ports:
- name: http
containerPort: 8080
- name: https
containerPort: 8443
88888888888888888888888888888
[root@r4 ~]# k exec kubia-vfz6s env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=kubia-vfz6s
KUBERNETES_SERVICE_HOST=10 0.1
KUBERNETES_PORT=tcp://10 .1:443
KUBIA_PORT_80_TCP_PORT=80
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT_443_TCP=tcp://10 0.1:443
KUBIA_SERVICE_HOST=10 4.157
KUBIA_SERVICE_PORT=80
KUBIA_PORT_80_TCP_PROTO=tcp
KUBERNETES_SERVICE_PORT=443
KUBERNETES_PORT_443_TCP_PORT=443
KUBIA_PORT=tcp://1 24.157:80
KUBIA_PORT_80_TCP_ADDR=1 24.157
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=1 4.0.1
KUBIA_PORT_80_TCP=tcp://1 24.157:80
NPM_CONFIG_LOGLEVEL=info
NODE_VERSION=7.9.0
YARN_VERSION=0.22.0
HOME=/root
[root@r4 ~]# k get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubia ClusterIP 1 .81.182 <none> 80/TCP 4m2s
[root@r4 ~]# curl 1 .81.182
You've hit kubia-6ncks
[root@r4 ~]# curl 10. 224.157
<a href="https://10 224.157/">Found</a>.
[root@r4 ~]#
-------------
[root@r4 ~]# k exec kubia-fvf4k env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=kubia-fvf4k
KUBERNETES_PORT_443_TCP=tcp://1 .1:443
KUBERNETES_SERVICE_HOST=1 .1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBIA_SERVICE_HOST=10 1.182
KUBIA_PORT_80_TCP=tcp:// .81.182:80
KUBIA_PORT_80_TCP_ADDR=10 .182
KUBERNETES_PORT=tcp://1 .1:443
KUBERNETES_PORT_443_TCP_ADDR=10 .0.1
KUBIA_PORT=tcp://10 182:80
KUBIA_PORT_80_TCP_PORT=80
KUBERNETES_PORT_443_TCP_PORT=443
KUBIA_SERVICE_PORT=80
KUBIA_PORT_80_TCP_PROTO=tcp
NPM_CONFIG_LOGLEVEL=info
NODE_VERSION=7.9.0
YARN_VERSION=0.22.0
HOME=/root
[root@r4 ~]#
kubia-svc-nodeport.yaml
888888888888888888888888888888888888888888888888
apiVersion: v1
kind: Service
metadata:
name: kuiba-nodeport
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
nodePort: 30123
selector:
app: kubia
8888888888888888888888888888888888888888888888888888
kuba-svc-loadbalancer.yaml
######################################
88888888888888888888888888
apiVersion: v1
kind: Service
metadata:
name: kubia-loadbalancer
spec:
externalTrafficPolicy: Local
type: LoadBalancer
ports:
- port: 80
targetPort: 8080
selector:
app: kubia
88888888888888888888888888888888888
kubectl get svc kubia-loadbalancer
kubia-ingress.yaml
8888888888888888888888888888888888888
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubia
spec:
tls:
- hosts:
- kubia.example.com
secretName: tls-secret
rules:
- host: kubia.example.com
http:
paths:
- path: /kubia
backend:
serviceName: kubia
servicePort: 80
- path: /foo
backend:
serviceName: bar
servicePort: 80
8888888888888888888888888888888888888888
[root@r4 work]# k create -f kubia-ingress.yaml
error: error validating "kubia-ingress.yaml": error validating data: [ValidationError(Ingress.spec.rules[0].http): unknown field "backend" in io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue, ValidationError(Ingress.spec.rules[0].http): unknown field "path" in io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue, ValidationError(Ingress.spec.rules[0].http): missing required field "paths" in io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue]; if you choose to ignore these errors, turn validation off with --validate=false
[root@r4 work]#
88888888888888888888888888888888888888888
kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
6
fortune-pod.yaml
888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: fortune
spec:
containers:
- image: luksa/fortune
name: html-generator
volumeMounts:
- name: html
mountPath: /var/htdocs
- image: nginx:alpine
name: web-server
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
readOnly: true
ports:
- containerPort: 80
protocol: TCP
volumes:
- name: html
emptyDir:
medium: Memory
######################################
add a selector for svc
[root@r4 work]# more gitrepo-volume-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: gitrepo-volume-pod
labels:
app: gitrepo-volume-pod
spec:
containers:
- image: nginx:alpine
name: web-server
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
readOnly: true
ports:
- containerPort: 80
protocol: TCP
volumes:
- name: html
gitRepo:
repository: https://github.com/luksa/kubia-website-example.git
revision: master
directory: .
[root@r4 work]#
#############################################
888888888888888888888888888
kubectl port-forward fortune 8080:80
gitrepo-volume-pod.yaml
88888888888888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: gitrepo-volume-pod
spec:
containers:
- image: nginx:alpine
name: web-server
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
readOnly: true
ports:
- containerPort: 80
protocol: TCP
volumes:
- name: html
gitRepo:
repository: https://github.com/luksa/kubia-website-example.git
revision: master
directory: .
888888888888888888888888888888888888888888888888
###########################################
[root@r4 work]# more gitrepo-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: gitrepo-volume
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30321
selector:
app: gitrepo-volume-pod
[root@r4 work]#
#####################################
k expose po gitrepo-volume-pod
mongodb-pod-nfs.yaml
888888888888888888888888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: mongodb
spec:
volumes:
- name: mongodb-data
nfs:
server: 10. 09
path: /v
containers:
- image: mongo
name: mongodb
volumeMounts:
- name: mongodb-data
mountPath: /data/db
ports:
- containerPort: 27017
protocol: TCP
8888888888888888888888888888888888888888888888
mongodb-pv-nfs.yaml
88888888888888888888888
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongodb-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 10 09
path: /v
apiVersion: v1
kind: PersistentVolume
metadata:
name: wpdata-pv
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 1 09
path: /nfsdata/wpdata
88888888888888888888888888888888888888888
mongodb-pv-nfs.yaml
888888888888888888888888
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongodb-pvc
spec:
resources:
requests:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: ""
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wpdata-pvc
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: ""
888888888888888888888888888888
mongodb-pod-pvc.yaml
888888888888888888888888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: mongodb
spec:
volumes:
- name: mongodb-data
persistentVolumeClaim:
claimName: mongodb-pvc
containers:
- image: mongo
name: mongodb
volumeMounts:
- name: mongodb-data
mountPath: /data/db
ports:
- containerPort: 27017
protocol: TCP
888888888888888888888888888888888888
kubectl get pods --watch
kubectl get pods -o yaml --watch
77777777777777777777777777777777777777777777
7
######################
kubectl create configmap fortune-config --from-literal=sleep-interval=25
kubectl create configmap myconfigmap --from-literal=foo=bar --from-literal=bar=baz --from-literal=one=two
kubectl create configmap my-config --from-file=config-file.conf
kubectl create configmap my-config --from-file=customkey=config-file.conf
fortune-pod-env-configmap.yaml
888888888888888888888888888888
apiVersion: v1
kind: Pod
metadata:
name: fortune-env-from-configmap
spec:
containers:
- image: luksa/fortune:env
name: fortune-env
ports:
- containerPort: 80
protocol: TCP
env:
- name: INTERVAL
valueFrom:
configMapKeyRef:
name: fortune-config
key: sleep-interval
apiVersion: v1
kind: Service
metadata:
name: fortune-env
spec:
ports:
- port: 80
targetPort: 80
selector:
app: fortune-env
k create configmap fortune-config --from-literal=sleep-interval=25
-------------------------------------
secret
fortune-pod-https.yaml
333333333333333333
apiVersion: v1
kind: Pod
metadata:
name: fortune-https
spec:
containers:
- image: luksa/fortune:env
name: html-generator
env:
- name: INTERVAL
valueFrom:
configMapKeyRef:
name: fortune-config
key: sleep-interval
volumeMounts:
- name: html
mountPath: /var/htdocs
- image: nginx:alpine
name: web-server
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
readOnly: true
- name: config
mountPath: /etc/nginx/conf.d
readOnly: true
- name: certs
mountPath: /etc/nginx/certs/
readOnly: true
ports:
- containerPort: 80
- containerPort: 443
volumes:
- name: html
emptyDir: {}
- name: config
configMap:
name: fortune-config
items:
- key: my-nginx-config.conf
path: https.conf
- name: certs
secret:
secretName: fortune-https
------------------------------------------
333333333333333333333
8
downward-api-volume.yaml
333333333333333333333333333333333333333
apiVersion: v1
kind: Pod
metadata:
name: downward
labels:
foo: bar
annotations:
key1: value1
key2: |
multi
line
value
spec:
containers:
- name: main
image: busybox
command: ["sleep", "9999999"]
resources:
requests:
cpu: 15m
memory: 100Ki
limits:
cpu: 100m
memory: 4Mi
volumeMounts:
- name: downward
mountPath: /etc/downward
volumes:
- name: downward
downwardAPI:
items:
- path: "podName"
fieldRef:
fieldPath: metadata.name
- path: "podNamespace"
fieldRef:
fieldPath: metadata.namespace
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "annotations"
fieldRef:
fieldPath: metadata.annotations
- path: "containerCpuRequestMilliCores"
resourceFieldRef:
containerName: main
resource: requests.cpu
divisor: 1m
- path: "containerMemoryLimitBytes"
resourceFieldRef:
containerName: main
resource: limits.memory
divisor: 1
33333333333
curl.yaml
333333333333333333333333333
apiVersion: v1
kind: Pod
metadata:
name: curl
spec:
containers:
- name: main
image: tutum/curl
command: ["sleep", "999999"]
333333333333333
curl-with-ambassador.yaml
3333333333333333333333333333333333333
apiVersion: v1
kind: Pod
metadata:
name: curl-with-ambassador
spec:
containers:
- name: main
image: tutum/curl
command: ["sleep", "999999"]
- name: ambassador
image: luksa/kubectl-proxy:1.6.2
----------------------------- tem0
apiVersion: v1
kind: Pod
metadata:
name: curl
spec:
containers:
- name: main
image: tutum/curl
command: ["sleep", "999999"]
---------------------
9.2
kubia-rc-and-service-v1.yaml
33333333333333333333333333333
apiVersion: v1
kind: ReplicationController
metadata:
name: kubia-v1
spec:
replicas: 3
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: luksa/kubia:v1
name: nodejs
---
apiVersion: v1
kind: Service
metadata:
name: kubia
spec:
type: LoadBalancer
selector:
app: kubia
ports:
- port: 80
targetPort: 8080
33333333333333333333333333
kubectl rolling-update kubia-v1 kubia-v2 --image=luksa/kubia:v2
9.3.
kubia-deployment-v1.yaml
33333333333333333333
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kubia
spec:
replicas: 3
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: luksa/kubia:v1
name: nodejs
3333333333333333333333333
kubectl patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}'
'
while true; do curl http://130.211.109.222; done
kubectl set image deployment kubia nodejs=luksa/kubia:v2
kubectl edit Opens the object’s manifest in your default editor. After making
changes, saving the file, and exiting the editor, the object is updated.
Example: kubectl edit deployment kubia
kubectl patch Modifies individual properties of an object.
Example: kubectl patch deployment kubia -p '{"spec":
{"template": {"spec": {"containers": [{"name":
"nodejs", "image": "luksa/kubia:v2"}]}}}}'
kubectl apply Modifies the object by applying property values from a full YAML or
JSON file. If the object specified in the YAML/JSON doesn’t exist yet,
it’s created. The file needs to contain the full definition of the
resource (it can’t include only the fields you want to update, as is the
case with kubectl patch).
Example: kubectl apply -f kubia-deployment-v2.yaml
kubectl replace Replaces the object with a new one from a YAML/JSON file. In contrast to the apply command, this command requires the object to
exist; otherwise it prints an error.
Example: kubectl replace -f kubia-deployment-v2.yaml
kubectl set image Changes the container image defined in a Pod, ReplicationController’s template, Deployment, DaemonSet, Job, or ReplicaSet.
Example: kubectl set image deployment kubia
nodejs=luksa/kubia:v2
'
kubectl set image deployment kubia nodejs=luksa/kubia:v3
--------------------------------------------
kubectl rollout status deployment kubia
kubectl rollout undo deployment kubia
kubectl rollout history deployment kubia
kubectl rollout undo deployment kubia --to-revision=1
kubia-deployment-v3-with-readinesscheck.yaml
33333333333333333333
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kubia
spec:
replicas: 3
minReadySeconds: 10
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: luksa/kubia:v3
name: nodejs
readinessProbe:
periodSeconds: 1
httpGet:
path: /
port: 8080
33333333333333333333333333333333333333333333333333333
kubectl describe deploy kubia
kubectl rollout undo deploy kubia
3333333333333333333333333333333
------------------------
10
3个PV
persistent-volumes-gcepd.yaml
333333333333333333333
kind: List
apiVersion: v1
items:
- apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-a
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
nfs:
server: 10 9
path: /nfsdata/pv-a
- apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-b
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
nfs:
server: 1 9
path: /nfsdata/pv-b
- apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-c
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
nfs:
server: 1 9
path: /nfsdata/pv-c
33333333333333333333333333
kubia-service-headless.yaml
3333333333333333333
apiVersion: v1
kind: Service
metadata:
name: kubia
spec:
clusterIP: None
selector:
app: kubia
ports:
- name: http
port: 80
333333333333333333333333333333
kubia-statefulset.yaml
333333333333333333333333333
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: kubia
spec:
serviceName: kubia
replicas: 2
template:
metadata:
labels:
app: kubia
spec:
containers:
- name: kubia
image: luksa/kubia-pet
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: data
mountPath: /var/data
volumeClaimTemplates:
- metadata:
name: data
spec:
resources:
requests:
storage: 1Mi
accessModes:
- ReadWriteOnce
33333333333333333333333333333333333
kubectl proxy
curl localhost:8001
curl localhost:8001/api/v1/namespaces/ran/pods/kubia-0/proxy
curl -X POST -d "Hey there! This greeting was submitted to kubia-0." localhost:8001/api/v1/namespaces/ran/pods/kubia-0/proxy/
curl -X POST -d "Hey there! This greeting was submitted to kubia-0." localhost:8001/api/v1/namespaces/ran/pods/kubia-0/proxy/
kubie-service-public.yaml
333333333333333333333
apiVersion: v1
kind: Service
metadata:
name: kubia-public
spec:
selector:
app: kubia
ports:
- port: 80
targetPort: 8080
333333333333333333333333
kubectl run -it srvlookup --image=tutum/dnsutils --rm --restart=Never -- dig SRV kubia.default.svc.cluster.local
kubectl edit statefulset kubia