K8S-config

K8S-config文件

————–kubelet
[ kubernetes]# cat kubelet.kubeconfig
apiVersion: v1
clusters:

  • cluster:
    certificate-authority-data: xxx==
    server: https://127.0.0.1:8443
    name: default-cluster
    contexts:
  • context:
    cluster: default-cluster
    namespace: default
    user: default-auth
    name: default-context
    current-context: default-context
    kind: Config
    preferences: {}
    users:
  • name: default-auth
    user:
    client-certificate: /etc/kubernetes/cert/kubelet-client-current.pem
    client-key: /etc/kubernetes/cert/kubelet-client-current.pem
    [ kubernetes]#

[ kubernetes]# cat kubelet-config.yaml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ” 10.2.0.20″
staticPodPath: “”
syncFrequency: 1m
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodURL: “”
port: 10250
readOnlyPort: 0
rotateCertificates: true
serverTLSBootstrap: true
authentication:
anonymous:
enabled: false
webhook:
enabled: true
x509:
clientCAFile: “/etc/kubernetes/cert/ca.pem”
authorization:
mode: Webhook
registryPullQPS: 0
registryBurst: 20
eventRecordQPS: 0
eventBurst: 20
enableDebuggingHandlers: true
enableContentionProfiling: true
healthzPort: 10248
healthzBindAddress: ” 10.2.0.20″
clusterDomain: “cluster.local”
clusterDNS:

  • “10.254.0.2”
    nodeStatusUpdateFrequency: 10s
    nodeStatusReportFrequency: 1m
    imageMinimumGCAge: 2m
    imageGCHighThresholdPercent: 85
    imageGCLowThresholdPercent: 80
    volumeStatsAggPeriod: 1m
    kubeletCgroups: “”
    systemCgroups: “”
    cgroupRoot: “”
    cgroupsPerQOS: true
    cgroupDriver: cgroupfs
    runtimeRequestTimeout: 10m
    hairpinMode: promiscuous-bridge
    maxPods: 220
    podCIDR: “172.30.0.0/16”
    podPidsLimit: -1
    resolvConf: /etc/resolv.conf
    maxOpenFiles: 1000000
    kubeAPIQPS: 1000
    kubeAPIBurst: 2000
    serializeImagePulls: false
    evictionHard:
    memory.available: “100Mi”
    nodefs.available: “10%”
    nodefs.inodesFree: “5%”
    imagefs.available: “15%”
    evictionSoft: {}
    enableControllerAttachDetach: true
    failSwapOn: true
    containerLogMaxSize: 20Mi
    containerLogMaxFiles: 10
    systemReserved: {}
    kubeReserved: {}
    systemReservedCgroup: “”
    kubeReservedCgroup: “”
    enforceNodeAllocatable: [“pods”]
    [ kubernetes]#

———-kube-scheduler

[ kubernetes]# cat kube-scheduler.kubeconfig
apiVersion: v1
clusters:

  • cluster:
    certificate-authority-data: xxx==
    server: https://127.0.0.1:8443
    name: kubernetes
    contexts:
  • context:
    cluster: kubernetes
    user: system:kube-scheduler
    name: system:kube-scheduler
    current-context: system:kube-scheduler
    kind: Config
    preferences: {}
    users:
  • name: system:kube-scheduler
    user:
    client-certificate-data: xxx
    client-key-data: xxx=
    [ kubernetes]#

[ kubernetes]# cat kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
bindTimeoutSeconds: 600
clientConnection:
burst: 200
kubeconfig: “/etc/kubernetes/kube-scheduler.kubeconfig”
qps: 100
enableContentionProfiling: false
enableProfiling: true
hardPodAffinitySymmetricWeight: 1
healthzBindAddress: 10.2.0.20:10251
leaderElection:
leaderElect: true
metricsBindAddress: 10.2.0.20:10251
[ kubernetes]#

———— kube-proxy

[ kubernetes]# cat kube-proxy.kubeconfig
apiVersion: v1
clusters:

  • cluster:
    certificate-authority-data: xxx==
    server: https://127.0.0.1:8443
    name: kubernetes
    contexts:
  • context:
    cluster: kubernetes
    user: kube-proxy
    name: default
    current-context: default
    kind: Config
    preferences: {}
    users:
  • name: kube-proxy
    user:
    client-certificate-data: xxx
    client-key-data: xxx=
    [ kubernetes]#

[ kubernetes]# cat kube-proxy-config.yaml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
burst: 200
kubeconfig: “/etc/kubernetes/kube-proxy.kubeconfig”
qps: 100
bindAddress: 10.2.0.20
healthzBindAddress: 10.2.0.20:10256
metricsBindAddress: 10.2.0.20:10249
enableProfiling: true
clusterCIDR: 172.30.0.0/16
hostnameOverride: k1
mode: “ipvs”
portRange: “”
kubeProxyIPTablesConfiguration:
masqueradeAll: false
kubeProxyIPVSConfiguration:
scheduler: rr
excludeCIDRs: []
[ kubernetes]#

—————– kube-controller-manager

[ kubernetes]# cat kube-controller-manager.kubeconfig
apiVersion: v1
clusters:

  • cluster:
    certificate-authority-data: xxx==
    server: https://127.0.0.1:8443
    name: kubernetes
    contexts:
  • context:
    cluster: kubernetes
    user: system:kube-controller-manager
    name: system:kube-controller-manager
    current-context: system:kube-controller-manager
    kind: Config
    preferences: {}
    users:
  • name: system:kube-controller-manager
    user:
    client-certificate-data: xxx==
    client-key-data: xxx==
    [ kubernetes]#

———-kubelet-bootstrap
[ kubernetes]# cat kubelet-bootstrap.kubeconfig
apiVersion: v1
clusters:

  • cluster:
    certificate-authority-data: xxx==
    server: https://127.0.0.1:8443
    name: kubernetes
    contexts:
  • context:
    cluster: kubernetes
    user: kubelet-bootstrap
    name: default
    current-context: default
    kind: Config
    preferences: {}
    users:
  • name: kubelet-bootstrap
    user:
    token: lfpxxx
    [ kubernetes]#

—————–audit-policy
[ kubernetes]# cat audit-policy.yaml
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk, so drop them.

  • level: None
    resources:
    • group: “” resources:
      • endpoints
      • services
      • services/status
        users:
    • ‘system:kube-proxy’
      verbs:
    • watch
  • level: None
    resources:
    • group: “” resources:
      • nodes
      • nodes/status
        userGroups:
    • ‘system:nodes’
      verbs:
    • get
  • level: None
    namespaces:
    • kube-system
      resources:
    • group: “” resources:
      • endpoints
        users:
    • ‘system:kube-controller-manager’
    • ‘system:kube-scheduler’
    • ‘system:serviceaccount:kube-system:endpoint-controller’
      verbs:
    • get
    • update
  • level: None
    resources:
    • group: “” resources:
      • namespaces
      • namespaces/status
      • namespaces/finalize
        users:
    • ‘system:apiserver’
      verbs:
    • get
    Don’t log HPA fetching metrics.
  • level: None
    resources:
    • group: metrics.k8s.io
      users:
    • ‘system:kube-controller-manager’
      verbs:
    • get
    • list
    Don’t log these read-only URLs.
  • level: None
    nonResourceURLs:
    • ‘/healthz*’
    • /version
    • ‘/swagger*’
    Don’t log events requests.
  • level: None
    resources:
    • group: “” resources:
      • events
    node and pod status calls from nodes are high-volume and can be large, don’t log responses for expected updates from nodes
  • level: Request
    omitStages:
    • RequestReceived
      resources:
    • group: “” resources:
      • nodes/status
      • pods/status
        users:
    • kubelet
    • ‘system:node-problem-detector’
    • ‘system:serviceaccount:kube-system:node-problem-detector’
      verbs:
    • update
    • patch
  • level: Request
    omitStages:
    • RequestReceived
      resources:
    • group: “” resources:
      • nodes/status
      • pods/status
        userGroups:
    • ‘system:nodes’
      verbs:
    • update
    • patch
    deletecollection calls can be large, don’t log responses for expected namespace deletions
  • level: Request
    omitStages:
    • RequestReceived
      users:
    • ‘system:serviceaccount:kube-system:namespace-controller’
      verbs:
    • deletecollection
    Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, so only log at the Metadata level.
  • level: Metadata
    omitStages:
    • RequestReceived
      resources:
    • group: “” resources:
      • secrets
      • configmaps
    • group: authentication.k8s.io resources:
      • tokenreviews
        # Get repsonses can be large; skip them.
  • level: Request
    omitStages:
    • RequestReceived
      resources:
    • group: “”
    • group: admissionregistration.k8s.io
    • group: apiextensions.k8s.io
    • group: apiregistration.k8s.io
    • group: apps
    • group: authentication.k8s.io
    • group: authorization.k8s.io
    • group: autoscaling
    • group: batch
    • group: certificates.k8s.io
    • group: extensions
    • group: metrics.k8s.io
    • group: networking.k8s.io
    • group: policy
    • group: rbac.authorization.k8s.io
    • group: scheduling.k8s.io
    • group: settings.k8s.io
    • group: storage.k8s.io
      verbs:
    • get
    • list
    • watch
    Default level for known APIs
  • level: RequestResponse
    omitStages:
    • RequestReceived
      resources:
    • group: “”
    • group: admissionregistration.k8s.io
    • group: apiextensions.k8s.io
    • group: apiregistration.k8s.io
    • group: apps
    • group: authentication.k8s.io
    • group: authorization.k8s.io
    • group: autoscaling
    • group: batch
    • group: certificates.k8s.io
    • group: extensions
    • group: metrics.k8s.io
    • group: networking.k8s.io
    • group: policy
    • group: rbac.authorization.k8s.io
    • group: scheduling.k8s.io
    • group: settings.k8s.io
    • group: storage.k8s.io
    Default level for all other requests.
  • level: Metadata
    omitStages:
    • RequestReceived
      [ kubernetes]#

————— encryption-config
[ kubernetes]# cat encryption-config.yaml
kind: EncryptionConfig
apiVersion: v1
resources:

  • resources:
    • secrets
      providers:
    • aescbc:
      keys:
      – name: key1
      secret: BZ1hDxxx
    • identity: {}
      [ kubernetes]#

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注