同步操作将从 atompi/install-single-master-K8s 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
两个命令从空白系统到单 master 节点 K8s 集群。
支持操作系统: CentOS 7+
支持 K8s 版本: v1.18.0+
单 master 节点,使用 kubeadm 快速部署,使用 calico 作为 CNI 插件。生产环境 HA 部署见:Prod-K8S-HA-Installer
# 三个参数: $host_name $host_ip $name_server
# 主机名
host_name=k8s-w01
# 此处IP改为本机IP
host_ip=10.0.0.234
# dns服务器IP
name_server=192.168.1.60
bash pre_reboot.sh $host_name $host_ip $name_server
# 三个参数: $docker_version $kubernetes_version $docker_server
docker_version=20.10.7
kubernetes_version=1.21.3
docker_server=hub.gitee.cc
bash post_reboot.sh $docker_version $kubernetes_version $docker_server
注意: 如果已经存在kubernetes集群,执行完上面两个脚本之后可以加入到集群,命令如下:
在master节点上执行,可获取kubeadm join 命令及参数
kubeadm token create --print-join-command
kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303
在worker节点上执行如下
kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303
## master查看所需的镜像
[root@k8smaster1 install-single-master-K8s]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.21.3
k8s.gcr.io/kube-controller-manager:v1.21.3
k8s.gcr.io/kube-scheduler:v1.21.3
k8s.gcr.io/kube-proxy:v1.21.3
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
# 准备镜像
docker pull registry.cn-hangzhou.aliyuncs.com/k8sos/coredns:v1.8.0
docker tag registry.cn-hangzhou.aliyuncs.com/k8sos/coredns:v1.8.0 registry.cn-hangzhou.aliyuncs.com/k8sos/coredns/coredns:v1.8.0
# 手动初始化时使用阿里云镜像
kubeadm init \
--control-plane-endpoint "kube-gitee-go.gitee.cc" \
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
--kubernetes-version=v1.21.3 \
--service-cidr=10.96.0.0/12 \
--service-dns-domain "cluster.local" \
--pod-network-cidr=10.244.0.0/16 \
--apiserver-advertise-address "0.0.0.0" \
--apiserver-bind-port 6443 \
--upload-certs
将镜像上传到内网的harbor上
docker login http://hub.gitee.cc/ -uxx -pxxxx
# 使用脚本批量上传
sh push_all_images.sh
从内网仓库下载镜像同时使用脚本初始化集群
# 三个参数:
# $control_plane_endpoint
# $private_registry_host
# $kubernetes_version
control_plane_endpoint="kube-gitee-go.gitee.cc:6443"
private_registry_host=hub.gitee.cc
kubernetes_version=v1.21.3
bash ./init_cluster.sh ${control_plane_endpoint} ${private_registry_host} ${kubernetes_version}
# 注释 脚本执行的如下命令
kubeadm init \
--control-plane-endpoint "kube-gitee-go.gitee.cc" \
--image-repository=hub.gitee.cc/google_containers \
--kubernetes-version=v1.21.3 \
--service-cidr=10.96.0.0/12 \
--service-dns-domain "cluster.local" \
--pod-network-cidr=10.244.0.0/16 \
--apiserver-advertise-address "0.0.0.0" \
--apiserver-bind-port 6443 \
--upload-certs
使用脚本init_cluster.sh
初始化之后日志、calico 网络插件、和node脚本信息输出在/root/kube_src
目录下
[root@k8smaster1 kube_src]# ll /root/kube_src/
total 44
-rw-r--r-- 1 root root 24920 Mar 27 09:20 calico-typha.yaml
-rw-r--r-- 1 root root 5589 Mar 27 09:19 kubeadm-init.log
-rw-r--r-- 1 root root 351 Mar 27 09:20 kubeadm-join-master.sh
-rw-r--r-- 1 root root 185 Mar 27 09:20 kubeadm-join-worker.sh
[root@k8smaster1 kube_src]# cat kubeadm-join-worker.sh
kubeadm join kube-apiserver-go.gitee.cc:6443 --token ivtmnz.5r5kyk7nin4put80 --discovery-token-ca-cert-hash sha256:e3501b4a21e8a59bed0f45001d2c106be8781673d1586b8401edb368e0d98833
在其他worker节点执行如上命令加入集群。
查看集群状态
[root@k8smaster1 kube_src]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8smaster1 Ready master 3m44s v1.19.9
k8snode1 Ready <none> 54s v1.19.9
k8snode2 Ready <none> 53s v1.19.9
增加命令提示
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
#添加kubectl的k别名
vim ~/.bashrc
alias k='kubectl'
#tab命令只在使用完整的kubectl 命令起作用,使用别名k 时不起作用,修补:
source <(kubectl completion bash | sed 's/kubectl/k/g') #写入 .bashrc
kubeadm reset
rm -rf $HOME/.kube
检查 master 初始化结果
# 只在 master 节点执行
# 执行如下命令,等待 3-10 分钟,直到所有的容器组处于 Running 状态
watch kubectl get pod -n kube-system -o wide
# 查看 master 节点初始化结果
kubectl get nodes -o wide
获得 join命令参数,在master 节点操作
# 只在 master 节点执行
kubeadm token create --print-join-command
有效时间
该 token 的有效时间为 2 个小时,2小时内,您可以使用此 token 初始化任意数量的 worker 节点。
在任意master节点上执行kubectl top命令
# kubectl top node
error: Metrics API not available
COPY
发现top指令无法取得Metrics,这表示Kubernetes 丛集没有安装Heapster或是Metrics Server 来提供Metrics API给top指令取得资源使用量。
安装metric-server组件
git clone https://gitee.com/cainiao555/metrics-server.git
kubectl create -f metrics-server/
方式1
# 只在 master 节点执行
kubectl apply -f https://kuboard.cn/install-script/v1.17.x/nginx-ingress.yaml
或者
kubectl apply -f https://kuboard.cn/install-script/v1.19.x/nginx-ingress.yaml
卸载
只在您想选择其他 Ingress Controller 的情况下卸载
# 只在 master 节点执行
kubectl delete -f https://kuboard.cn/install-script/v1.19.x/nginx-ingress.yaml
定制化ingress
# 如果打算用于生产环境,请参考 https://github.com/nginxinc/kubernetes-ingress/blob/v1.5.5/docs/installation.md 并根据您自己的情况做进一步定制
查看ingress运行状态
[root@k8smaster1 kube_src]# kubectl get pods --all-namespaces |grep nginx
nginx-ingress nginx-ingress-rnsg2 1/1 Running 0 53m
nginx-ingress nginx-ingress-rxwvc 1/1 Running 0 53m
[root@k8smaster1 kube_src]# kubectl get pod -n nginx-ingress
NAME READY STATUS RESTARTS AGE
nginx-ingress-rnsg2 1/1 Running 0 54m
nginx-ingress-rxwvc 1/1 Running 0 54m
方式2
Ingress 是 Kubernetes 中的一个抽象资源,其功能是透过 Web Server 的 Virtual Host 概念以域名(Domain Name)方式转发到内部 Service,这避免了使用 Service 中的 NodePort 与 LoadBalancer 类型所带来的限制(如 Port 数量上限),而实现 Ingress 功能则是透过 Ingress Controller来达成,它会负责监听 Kubernetes API中的 Ingress 与 Service 资源物件,并在发生资源变化时,依据资源预期的结果来设定 Web Server。另外Ingress Controller 有许多实现可以选择:
部署 ingress-nginx
wget https://cdn.jsdelivr.net/gh/kubernetes/ingress-nginx@controller-v0.44.0/deploy/static/provider/baremetal/deploy.yaml -O ingress-nginx.yaml
sed -i -e 's#k8s.gcr.io/ingress-nginx#registry.cn-hangzhou.aliyuncs.com/kainstall#g' \
-e 's#@sha256:.*$##g' ingress-nginx.yaml
kubectl apply -f ingress-nginx.yaml
kubectl wait --namespace ingress-nginx --for=condition=ready pods --selector=app.kubernetes.io/component=controller --timeout=60s
pod/ingress-nginx-controller-67848f7b-2gxzb condition met
COPY
官方默认加上了 admission
功能,而我们的 apiserver 使用宿主机的dns,不是coredns,所以连接不上 ingress-nginx Controller 的 service 地址,这里我们把 admission
准入钩子去掉,使我们创建 ingress 资源时,不去验证Controller。
admission webhook 的作用我简单的总结下,当用户的请求到达 k8s apiserver 后,apiserver 根据
MutatingWebhookConfiguration
和ValidatingWebhookConfiguration
的配置,先调用MutatingWebhookConfiguration
去修改用户请求的配置文件,最后会调用ValidatingWebhookConfiguration
来验证这个修改后的配置文件是否合法。
kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission
Dashboard 是Kubernetes官方开发的基于Web的仪表板,目的是提升管理Kubernetes集群资源便利性,并以资源视觉化方式,来让人更直觉的看到整个集群资源状态。
部署 dashboard
wget https://cdn.jsdelivr.net/gh/kubernetes/dashboard@v2.2.0/aio/deploy/recommended.yaml -O dashboard.yaml
kubectl apply -f dashboard.yaml
部署 ingress
cat << EOF | kubectl apply -f -
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/secure-backends: 'true'
nginx.ingress.kubernetes.io/backend-protocol: 'HTTPS'
nginx.ingress.kubernetes.io/ssl-passthrough: 'true'
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
tls:
- hosts:
- kubernetes-dashboard.cluster.local
secretName: kubernetes-dashboard-certs
rules:
- host: kubernetes-dashboard.cluster.local
http:
paths:
- path: /
backend:
serviceName: kubernetes-dashboard
servicePort: 443
EOF
COPY
创建 sa,使用 sa 的 token 进行登录 dashboard
kubectl create serviceaccount kubernetes-dashboard-admin-sa -n kubernetes-dashboard
kubectl create clusterrolebinding kubernetes-dashboard-admin-sa --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard-admin-sa -n kubernetes-dashboard
kubectl describe secrets $(kubectl describe sa kubernetes-dashboard-admin-sa -n kubernetes-dashboard | awk '/Tokens/ {print $2}') -n kubernetes-dashboard | awk '/token:/{print $2}'
eyJhbGciOiJSUzI1NiIsImtpZCI6IkZqLVpEbzQxNzR3ZGJ5MUlpalE5V1pVM0phRVg0UlhCZ3pwdnY1Y0lEZGcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi1zYS10b2tlbi1sOXhiaCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi1zYSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImMxYWZiYmEyLTQyMzktNGM3Yy05NjBlLThiZTkwNDY0MzY5MCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi1zYSJ9.fbZodynYBF8QQOvwj_lzU1wxKiD0HE1CWiyAvp79y9Uu2uQerRMPEuT6KFwFLZ9Pj3be_HTbrDN88im3s-Q2ARpolSACRexMM_nJ2u4pc3MXNEf6e7AJUHB4JnbTsIn5RCSwA8kjYFlWKxX8s1Q8pSKUy_21aMYxuBaqPhzQiuu9RmPBmHkNSYWVncgiPqZWaaadI_l53Jj0KjTMLahG7fqVt2ioTp1ZsIZNaQdNdh8Gzn-SuFCIrNN5oR3bdWNyxbv0OGxrKBHqlVs_8V46ygBc1lyGfpKcA59Wq8-FtIc3zzx531Ix6fDvouJuqHsMxu9VCOFG5mjyYzdsQgemIA
COPY
获取 dashboard 的 ingres 连接地址
echo https://$(kubectl get node -o jsonpath='{range .items[*]}{ .status.addresses[?(@.type=="InternalIP")].address} {.status.conditions[?(@.status == "True")].status}{"\n"}{end}' | awk '{if($2=="True")a=$1}END{print a}'):$(kubectl get svc --all-namespaces -o go-template="{{range .items}}{{if eq .metadata.name \"ingress-nginx-controller\" }}{{range.spec.ports}}{{if eq .port "443"}}{{.nodePort}}{{end}}{{end}}{{end}}{{end}}")
https://192.168.77.145:37454
将 host 绑定后,使用token 进行登录
192.168.77.145 kubernetes-dashboard.cluster.local
https://kubernetes-dashboard.cluster.local:37454
部署应用
cat <<EOF | kubectl apply -f -
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress-demo-app
labels:
app: ingress-demo-app
spec:
replicas: 2
selector:
matchLabels:
app: ingress-demo-app
template:
metadata:
labels:
app: ingress-demo-app
spec:
containers:
- name: whoami
image: traefik/whoami:v1.6.1
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: ingress-demo-app
spec:
type: ClusterIP
selector:
app: ingress-demo-app
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ingress-demo-app
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: app.demo.com
http:
paths:
- path: /
backend:
serviceName: ingress-demo-app
servicePort: 80
EOF
获取应用的pods
# kubectl get pods -l app=ingress-demo-app
NAME READY STATUS RESTARTS AGE
ingress-demo-app-694bf5d965-69v42 1/1 Running 0 68s
ingress-demo-app-694bf5d965-7qt5p 1/1 Running 0 68s
通过 ingress 访问
echo http://$(kubectl get node -o jsonpath='{range .items[*]}{ .status.addresses[?(@.type=="InternalIP")].address} {.status.conditions[?(@.status == "True")].status}{"\n"}{end}' | awk '{if($2=="True")a=$1}END{print a}'):$(kubectl get svc --all-namespaces -o go-template="{{range .items}}{{if eq .metadata.name \"ingress-nginx-controller\" }}{{range.spec.ports}}{{if eq .port "80"}}{{.nodePort}}{{end}}{{end}}{{end}}{{end}}")
http://192.168.77.145:40361
kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ingress-nginx-controller-67848f7b-sx7mf 1/1 Running 0 97m 10.244.5.6 k8s-worker-node3 <none> <none>
# curl -H 'Host:app.demo.com' http://192.168.77.145:40361
Hostname: ingress-demo-app-694bf5d965-7qt5p
IP: 127.0.0.1
IP: 10.244.5.8
RemoteAddr: 10.244.5.6:38674
GET / HTTP/1.1
Host: app.demo.com
User-Agent: curl/7.64.0
Accept: */*
X-Forwarded-For: 10.244.5.1
X-Forwarded-Host: app.demo.com
X-Forwarded-Port: 80
X-Forwarded-Proto: http
X-Real-Ip: 10.244.5.1
X-Request-Id: 90f14481aacd9ab5a1ef20d6113ddbe0
X-Scheme: http
从 whoami 应用返回单额信息可以看到,我们通过 ingress 访问到了 whomai app。
Kubectl客户端工具上传到/usr/local/bin下
#在46机器上配置kubernetes的config
mkdir -p /root/.kube/
#在master上将admin.conf文件拷贝过去
scp /etc/kubernetes/admin.conf root@192.168.1.46:/root/.kube/
# 在46上操作
cd /root/.kube/ && mv admin.conf config
部署NFS服务器
yum -y install nfs-utils rpcbind
[root@Gitee-Go app]# cat /etc/exports
/data/nfs 192.168.1.0/24(rw,sync,insecure,no_subtree_check,no_root_squash)
service rpcbind restart
service nfs restart
showmount -e localhost
Node节点部署NFS客户端
yum -y install nfs-utils
systemctl restart nfs
# 测试到NFS服务器的连接
showmount -e 192.168.1.46
下载NFS存储分配器
git clone https://github.com.cnpmjs.org/kubernetes-retired/external-storage.git
cd external-storage/nfs-client/deploy
vim deployment.yaml
修改文件中的部分配置,然后保存。
deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: managed-nfs-storage
- name: NFS_SERVER
value: 192.168.1.46
- name: NFS_PATH
value: /data/nfs
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.46
path: /data/nfs
class.yaml中的provisioner要与deployment.yaml中一致
class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: managed-nfs-storage # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
接下来,执行以下命令,创建NFS存储分配器的相关资源。
kubectl apply -f external-storage/nfs-client/deploy/
kubectl get deployment
test-claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
test-pod.yaml
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: gcr.io/google_containers/busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim
查看集群中storageclass信息
$ k get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
cds1 csi-cdsplugin Delete Immediate false 5s
$ k describe storageclass cds1
Name: cds1
IsDefaultClass: No
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"cds1"},"parameters":{"paymentTiming":"Postpaid","reservationLength":"","storageType":"hdd"},"provisioner":"csi-cdsplugin","reclaimPolicy":"Delete"}
Provisioner: csi-cdsplugin
Parameters: paymentTiming=Postpaid,reservationLength=,storageType=hdd
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: Immediate
Events: <none>
在kubernetes1.20.1版本以上,出现一个bug
1.20.4版本,解决方法 /etc/kubernetes/manifests/kube-apiserver.yaml 添加”–- feature-gates=RemoveSelfLink=false”
参考文献: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/issues/25 https://stackoverflow.com/questions/65376314/kubernetes-nfs-provider-selflink-was-empty
参考文献:
ansible部署方式 https://gitee.com/oschina/CI-gitee-Docs/blob/master/install-k8s-ha.md
https://lework.github.io/2021/04/03/debian-kubeadm-install/#%E9%83%A8%E7%BD%B2-whoami-app
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。