YurtAppDaemon

Background

In edge scenarios, edge nodes from the same region will be assigned to the same NodePool, at which point some system components, such as CoreDNS, will typically need to be deployed in NodePool dimension. When creating the NodePool, we want to create these system components automatically, without any manual operations.

YurtAppDaemon ensures that all or some of the NodePools run replicas with a Deployment or StatefulSet template. As NodePools are created, these sub-Deployments or sub-StatefulSets are added to the cluster and the creation/updating of them are controlled by the YurtAppDaemon controller.

img

Usage:

  • Create test1 NodePool
  1. cat <<EOF | kubectl apply -f -
  2. apiVersion: apps.openyurt.io/v1alpha1
  3. kind: NodePool
  4. metadata:
  5. name: test1
  6. spec:
  7. selector:
  8. matchLabels:
  9. apps.openyurt.io/nodepool: test1
  10. type: Edge
  11. EOF
  • Create test2 NodePool
  1. cat <<EOF | kubectl apply -f -
  2. apiVersion: apps.openyurt.io/v1alpha1
  3. kind: NodePool
  4. metadata:
  5. name: test2
  6. spec:
  7. selector:
  8. matchLabels:
  9. apps.openyurt.io/nodepool: test2
  10. type: Edge
  11. EOF
  • Add nodes to the corresponding NodePool
  1. kubectl label node cn-beijing.172.23.142.31 apps.openyurt.io/desired-nodepool=test1
  2. kubectl label node cn-beijing.172.23.142.32 apps.openyurt.io/desired-nodepool=test1
  3. kubectl label node cn-beijing.172.23.142.34 apps.openyurt.io/desired-nodepool=test2
  4. kubectl label node cn-beijing.172.23.142.35 apps.openyurt.io/desired-nodepool=test2
  • Create YurtAppDaemon
  1. cat <<EOF | kubectl apply -f -
  2. apiVersion: apps.openyurt.io/v1alpha1
  3. kind: YurtAppDaemon
  4. metadata:
  5. name: daemon-1
  6. namespace: default
  7. spec:
  8. selector:
  9. matchLabels:
  10. app: daemon-1
  11. workloadTemplate:
  12. deploymentTemplate:
  13. metadata:
  14. labels:
  15. app: daemon-1
  16. spec:
  17. replicas: 1
  18. selector:
  19. matchLabels:
  20. app: daemon-1
  21. template:
  22. metadata:
  23. labels:
  24. app: daemon-1
  25. spec:
  26. containers:
  27. - image: nginx:1.18.0
  28. imagePullPolicy: Always
  29. name: nginx
  30. nodepoolSelector:
  31. matchLabels:
  32. yurtappdaemon.openyurt.io/type: "nginx"
  33. EOF
  • Label test1 NodePool
  1. kubectl label np test1 yurtappdaemon.openyurt.io/type=nginx
  2. # Check the Deployment
  3. kubectl get deployments.apps
  4. # Check the Deployment nodeselector
  5. # Check the Pod
  • Label test2 NodePool
  1. kubectl label np test2 yurtappdaemon.openyurt.io/type=nginx
  2. # Check the Deployment
  3. kubectl get deployments.apps
  4. # Check the Deployment nodeselector
  5. # Check the Pod
  • Update YurtAppDaemon
  1. # Change yurtappdaemon workloadTemplate replicas to 2
  2. # Change yurtappdaemon workloadTemplate image to nginx:1.19.0
  3. # Check the Pod
  • Remove NodePool labels
  1. # Remove the nodepool test1 label
  2. kubectl label np test1 yurtappdaemon.openyurt.io/type-
  3. # Check the Deployment
  4. # Check the Pod
  5. # Remove the nodepool test2 label
  6. kubectl label np test2 yurtappdaemon.openyurt.io/type-
  7. # Check the Deployment
  8. # Check the Pod

Example for deploying coredns

Using YurtAppDaemon+service topology to solve dns resolution problems

  • Create NodePool
  1. cat <<EOF | kubectl apply -f -
  2. apiVersion: apps.openyurt.io/v1alpha1
  3. kind: NodePool
  4. metadata:
  5. name: hangzhou
  6. spec:
  7. selector:
  8. matchLabels:
  9. apps.openyurt.io/nodepool: hangzhou
  10. taints:
  11. - effect: NoSchedule
  12. key: node-role.openyurt.io/edge
  13. type: Edge
  14. EOF
  • Add label to NodePool
  1. kubectl label np hangzhou yurtappdaemon.openyurt.io/type=coredns
  • Deploy coredns
  1. cat <<EOF | kubectl apply -f -
  2. apiVersion: apps.openyurt.io/v1alpha1
  3. kind: YurtAppDaemon
  4. metadata:
  5. name: coredns
  6. namespace: kube-system
  7. spec:
  8. selector:
  9. matchLabels:
  10. k8s-app: kube-dns
  11. workloadTemplate:
  12. deploymentTemplate:
  13. metadata:
  14. labels:
  15. k8s-app: kube-dns
  16. spec:
  17. replicas: 2
  18. selector:
  19. matchLabels:
  20. k8s-app: kube-dns
  21. template:
  22. metadata:
  23. labels:
  24. k8s-app: kube-dns
  25. spec:
  26. volumes:
  27. - name: config-volume
  28. configMap:
  29. name: coredns
  30. items:
  31. - key: Corefile
  32. path: Corefile
  33. name: coredns
  34. dnsPolicy: Default
  35. serviceAccount: coredns
  36. serviceAccountName: coredns
  37. containers:
  38. - args:
  39. - -conf
  40. - /etc/coredns/Corefile
  41. image: k8s.gcr.io/coredns:1.6.7
  42. imagePullPolicy: IfNotPresent
  43. name: coredns
  44. resources:
  45. limits:
  46. memory: 170Mi
  47. requests:
  48. cpu: 100m
  49. memory: 70Mi
  50. securityContext:
  51. allowPrivilegeEscalation: false
  52. capabilities:
  53. add:
  54. - NET_BIND_SERVICE
  55. drop:
  56. - all
  57. readOnlyRootFilesystem: true
  58. livenessProbe:
  59. failureThreshold: 5
  60. httpGet:
  61. path: /health
  62. port: 8080
  63. scheme: HTTP
  64. initialDelaySeconds: 60
  65. periodSeconds: 10
  66. successThreshold: 1
  67. timeoutSeconds: 5
  68. volumeMounts:
  69. - mountPath: /etc/coredns
  70. name: config-volume
  71. readOnly: true
  72. nodepoolSelector:
  73. matchLabels:
  74. yurtappdaemon.openyurt.io/type: "coredns"
  75. ---
  76. apiVersion: v1
  77. kind: Service
  78. metadata:
  79. namespace: kube-system
  80. annotations:
  81. prometheus.io/port: "9153"
  82. prometheus.io/scrape: "true"
  83. openyurt.io/topologyKeys: openyurt.io/nodepool
  84. labels:
  85. k8s-app: kube-dns
  86. kubernetes.io/cluster-service: "true"
  87. kubernetes.io/name: KubeDNS
  88. name: kube-dns
  89. spec:
  90. clusterIP: __kubernetes-coredns-ip__ ##修改为kubernetes dns service ip
  91. ports:
  92. - name: dns
  93. port: 53
  94. protocol: UDP
  95. targetPort: 53
  96. - name: dns-tcp
  97. port: 53
  98. protocol: TCP
  99. targetPort: 53
  100. - name: metrics
  101. port: 9153
  102. protocol: TCP
  103. targetPort: 9153
  104. selector:
  105. k8s-app: kube-dns
  106. sessionAffinity: None
  107. type: ClusterIP
  108. ---
  109. apiVersion: v1
  110. data:
  111. Corefile: |
  112. .:53 {
  113. errors
  114. health {
  115. lameduck 5s
  116. }
  117. ready
  118. kubernetes cluster.local in-addr.arpa ip6.arpa {
  119. pods insecure
  120. fallthrough in-addr.arpa ip6.arpa
  121. ttl 30
  122. }
  123. prometheus :9153
  124. forward . /etc/resolv.conf
  125. cache 30
  126. loop
  127. reload
  128. loadbalance
  129. }
  130. kind: ConfigMap
  131. metadata:
  132. name: coredns
  133. namespace: kube-system
  134. ---
  135. apiVersion: v1
  136. kind: ServiceAccount
  137. metadata:
  138. name: coredns
  139. namespace: kube-system
  140. labels:
  141. kubernetes.io/cluster-service: "true"
  142. addonmanager.kubernetes.io/mode: Reconcile
  143. ---
  144. apiVersion: rbac.authorization.k8s.io/v1
  145. kind: ClusterRole
  146. metadata:
  147. labels:
  148. kubernetes.io/bootstrapping: rbac-defaults
  149. addonmanager.kubernetes.io/mode: Reconcile
  150. name: system:coredns
  151. rules:
  152. - apiGroups:
  153. - ""
  154. resources:
  155. - endpoints
  156. - services
  157. - pods
  158. - namespaces
  159. verbs:
  160. - list
  161. - watch
  162. - apiGroups:
  163. - ""
  164. resources:
  165. - nodes
  166. verbs:
  167. - get
  168. ---
  169. apiVersion: rbac.authorization.k8s.io/v1
  170. kind: ClusterRoleBinding
  171. metadata:
  172. annotations:
  173. rbac.authorization.kubernetes.io/autoupdate: "true"
  174. labels:
  175. kubernetes.io/bootstrapping: rbac-defaults
  176. addonmanager.kubernetes.io/mode: EnsureExists
  177. name: system:coredns
  178. roleRef:
  179. apiGroup: rbac.authorization.k8s.io
  180. kind: ClusterRole
  181. name: system:coredns
  182. subjects:
  183. - kind: ServiceAccount
  184. name: coredns
  185. namespace: kube-system
  186. EOF