同步操作将从 zhuang kang/kubeode 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
一键安装命令(要求centos7系统为新装系统无任何软件环境可联网) 不推荐git下来仓库大概1.5gb左右比较大,可以直接下载离线包 ##一键安装通道01(默认走家庭宽带普通通道---不稳定不推荐)
while [ true ]; do rm -f K8s_1.0.tar*;curl -o K8s_1.0.tar http://www.linuxtools.cn:42344/K8s_1.0.tar && break 1 ||sleep 5;echo 网络错误正在重试下载 ;done && tar -xzvf K8s_1.0.tar && cd K8s/ && sh install.sh
##一键安装通道02(走群友无私赞助电信机房专线服务器--高速稳定下载----强烈推荐)
while [ true ]; do rm -f K8s_1.0.tar*;curl -o K8s_1.0.tar http://117.27.146.72:42344/K8s_1.0.tar && break 1 ||sleep 5;echo 网络错误正在重试下载 ;done && tar -xzvf K8s_1.0.tar && cd K8s/ && sh install.sh
[root@k8s-master-db2 ~]#
[root@k8s-master-db2 ~]# sh K8s/shell_01/Check02.sh
==============master节点健康检测 kube-apiserver kube-controller-manager kube-scheduler etcd kubelet kube-proxy docker==================
192.168.123.30 | CHANGED | rc=0 >>
active active active active active active active
===============================================note节点监控检测 etcd kubelet kube-proxy docker===============================================
192.168.123.33 | CHANGED | rc=0 >>
active active active active
192.168.123.31 | CHANGED | rc=0 >>
active active active active
192.168.123.34 | CHANGED | rc=0 >>
active active active active
192.168.123.35 | CHANGED | rc=0 >>
active active active active
192.168.123.32 | CHANGED | rc=0 >>
active active active active
192.168.123.36 | CHANGED | rc=0 >>
active active active active
192.168.123.37 | CHANGED | rc=0 >>
active active active active
192.168.123.38 | CHANGED | rc=0 >>
active active active active
192.168.123.39 | CHANGED | rc=0 >>
active active active active
===============================================监测csr,cs,pvc,pv,storageclasses===============================================
NAME STATUS MESSAGE ERROR
componentstatus/scheduler Healthy ok
componentstatus/controller-manager Healthy ok
componentstatus/etcd-8 Healthy {"health":"true"}
componentstatus/etcd-2 Healthy {"health":"true"}
componentstatus/etcd-4 Healthy {"health":"true"}
componentstatus/etcd-1 Healthy {"health":"true"}
componentstatus/etcd-9 Healthy {"health":"true"}
componentstatus/etcd-3 Healthy {"health":"true"}
componentstatus/etcd-5 Healthy {"health":"true"}
componentstatus/etcd-0 Healthy {"health":"true"}
componentstatus/etcd-7 Healthy {"health":"true"}
componentstatus/etcd-6 Healthy {"health":"true"}
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/gluster1-test Bound pvc-320524b5-e0db-11e9-b631-000c291569a0 1Gi RWX gluster-heketi 3d4h
persistentvolumeclaim/my-grafana Bound pvc-3915480c-e0db-11e9-b631-000c291569a0 10Gi RWO gluster-heketi 3d4h
persistentvolumeclaim/my-mysql Bound pvc-bde4543d-e0f5-11e9-b631-000c291569a0 8Gi RWO gluster-heketi 3d1h
persistentvolumeclaim/my-prometheus-prometheus-alertmanager Bound pvc-376a9230-e0db-11e9-b631-000c291569a0 2Gi RWO gluster-heketi 3d4h
persistentvolumeclaim/my-prometheus-prometheus-server Bound pvc-376b29bc-e0db-11e9-b631-000c291569a0 8Gi RWO gluster-heketi 3d4h
persistentvolumeclaim/redis-data-my-redis-master-0 Pending 3d
persistentvolumeclaim/redis-data-my-redis-slave-0 Pending 3d
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pvc-320524b5-e0db-11e9-b631-000c291569a0 1Gi RWX Delete Bound default/gluster1-test gluster-heketi 3d4h
persistentvolume/pvc-376a9230-e0db-11e9-b631-000c291569a0 2Gi RWO Delete Bound default/my-prometheus-prometheus-alertmanager gluster-heketi 3d4h
persistentvolume/pvc-376b29bc-e0db-11e9-b631-000c291569a0 8Gi RWO Delete Bound default/my-prometheus-prometheus-server gluster-heketi 3d4h
persistentvolume/pvc-3915480c-e0db-11e9-b631-000c291569a0 10Gi RWO Delete Bound default/my-grafana gluster-heketi 3d4h
persistentvolume/pvc-bde4543d-e0f5-11e9-b631-000c291569a0 8Gi RWO Delete Bound default/my-mysql gluster-heketi 3d1h
NAME PROVISIONER AGE
storageclass.storage.k8s.io/gluster-heketi kubernetes.io/glusterfs 3d4h
===============================================监测node节点labels===============================================
NAME STATUS ROLES AGE VERSION LABELS
192.168.123.30 Ready master 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,dashboard=master,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.30,kubernetes.io/os=linux,node-role.kubernetes.io/master=master,storagenode=glusterfs
192.168.123.31 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.31,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.32 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.32,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.33 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.33,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.34 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.34,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.35 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.35,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.36 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.36,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.37 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.37,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.38 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.38,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
192.168.123.39 Ready node 3d4h v1.14.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.123.39,kubernetes.io/os=linux,node-role.kubernetes.io/node=node,storagenode=glusterfs
===============================================监测coredns是否正常工作===============================================
coredns-57656b67bb-s6bcj 1/1 Running 0 3d4h
Server: 10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
pod "dns-test" deleted
===============================================监测,pods状态===============================================
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default my-grafana-986448dfb-rvgw5 1/1 Running 0 3d4h 172.17.71.2 192.168.123.32 <none> <none>
default my-mysql-6bdb66578-5p6j2 2/2 Running 0 3d1h 172.17.71.3 192.168.123.32 <none> <none>
default my-prometheus-prometheus-alertmanager-79dfbddd64-x4l4h 2/2 Running 0 3d4h 172.17.40.2 192.168.123.33 <none> <none>
default my-prometheus-prometheus-kube-state-metrics-64dcd5d669-zwmvj 1/1 Running 0 3d4h 172.17.46.2 192.168.123.34 <none> <none>
default my-prometheus-prometheus-node-exporter-78q8k 1/1 Running 0 3d4h 192.168.123.31 192.168.123.31 <none> <none>
default my-prometheus-prometheus-node-exporter-7gkn4 1/1 Running 0 3d4h 192.168.123.38 192.168.123.38 <none> <none>
default my-prometheus-prometheus-node-exporter-bmr8b 1/1 Running 0 3d4h 192.168.123.33 192.168.123.33 <none> <none>
default my-prometheus-prometheus-node-exporter-f8q5g 1/1 Running 0 3d4h 192.168.123.36 192.168.123.36 <none> <none>
default my-prometheus-prometheus-node-exporter-jtg5f 1/1 Running 0 3d4h 192.168.123.35 192.168.123.35 <none> <none>
default my-prometheus-prometheus-node-exporter-mh2j2 1/1 Running 0 3d4h 192.168.123.34 192.168.123.34 <none> <none>
default my-prometheus-prometheus-node-exporter-mm5jf 1/1 Running 0 3d4h 192.168.123.37 192.168.123.37 <none> <none>
default my-prometheus-prometheus-node-exporter-rjjtf 1/1 Running 0 3d4h 192.168.123.32 192.168.123.32 <none> <none>
default my-prometheus-prometheus-node-exporter-v7ppl 1/1 Running 0 3d4h 192.168.123.39 192.168.123.39 <none> <none>
default my-prometheus-prometheus-node-exporter-vztqv 1/1 Running 0 3d4h 192.168.123.30 192.168.123.30 <none> <none>
default my-prometheus-prometheus-pushgateway-76d96d955d-6cpjj 1/1 Running 0 3d4h 172.17.88.4 192.168.123.31 <none> <none>
default my-prometheus-prometheus-server-558dc894b5-fzw95 2/2 Running 0 3d4h 172.17.101.3 192.168.123.30 <none> <none>
kube-system coredns-57656b67bb-s6bcj 1/1 Running 0 3d4h 172.17.88.3 192.168.123.31 <none> <none>
kube-system kubernetes-dashboard-5b5697d4-txgfw 1/1 Running 0 3d4h 172.17.101.2 192.168.123.30 <none> <none>
kube-system tiller-deploy-7f4d76c4b6-vcqgw 1/1 Running 0 3d4h 172.17.88.2 192.168.123.31 <none> <none>
===============================================监测node节点状态===============================================
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
192.168.123.30 Ready master 3d4h v1.14.4 192.168.123.30 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.31 Ready node 3d4h v1.14.4 192.168.123.31 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.32 Ready node 3d4h v1.14.4 192.168.123.32 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.33 Ready node 3d4h v1.14.4 192.168.123.33 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.34 Ready node 3d4h v1.14.4 192.168.123.34 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.35 Ready node 3d4h v1.14.4 192.168.123.35 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.36 Ready node 3d4h v1.14.4 192.168.123.36 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.37 Ready node 3d4h v1.14.4 192.168.123.37 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.38 Ready node 3d4h v1.14.4 192.168.123.38 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
192.168.123.39 Ready node 3d4h v1.14.4 192.168.123.39 <none> CentOS Linux 7 (Core) 3.10.0-957.el7.x86_64 docker://18.9.7
================================================监测helm版本================================================
Client: &version.Version{SemVer:"v2.14.3", GitCommit:"0e7f3b6637f7af8fcfddb3d2941fcc7cbebb0085", GitTreeState:"clean"}
Server: &version.Version{SemVer:"v2.14.3", GitCommit:"0e7f3b6637f7af8fcfddb3d2941fcc7cbebb0085", GitTreeState:"clean"}
[root@k8s-master-db2 ~]#
====
**
#如果不需要使用v1.14.0 v1.15.0直接默认一键安装即可。master分支默认的是v1.14.4
链接:https://pan.baidu.com/s/1Sb8WH_z-dUI8z2vLEYWa_w 提取码:0eyz
放入前务必执行以下操作
rm -fv K8s/Software_package/kubernetes-server-linux-amd64.tar.a*
#可选执行-----替换第三方yum源
rm -fv rm -f /etc/yum.repos.d/*
while [ true ]; do curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo && break 1 ;done
while [ true ]; do curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo && break 1 ;done
华丽分界线。。。。。。。。。。。。。。
一键安装命令(要求centos7系统为新装系统无任何软件环境可联网) 不推荐git下来仓库大概1.5gb左右比较大,可以直接下载离线包
##一键安装通道01(走私有服务器高速通道)
while [ true ]; do rm -f K8s_1.0.tar*;curl -o K8s_1.0.tar http://www.linuxtools.cn:42344/K8s_1.0.tar && break 1 ||sleep 5;echo 网络错误正在重试下载 ;done && tar -xzvf K8s_1.0.tar && cd K8s/ && sh install.sh
##一键安装通道02(走码云服务器)
#零时弃用
https://www.bilibili.com/video/av57242055?from=search&seid=4003077921686184728
网络 | 系统 | 内核版本 | IP获取方式 | docker版本 | Kubernetes版本 | K8s集群安装方式 |
---|---|---|---|---|---|---|
桥接模式 | 新装CentOS7.6.1810 (Core) | 3.10.0-957.el7.x86_64 | 手动设置固定IP(不能dhcp获取所有节点) | 18.06.1-ce | v1.14.4 | 二进制包安装 |
rm -f K8s_1.0.tar*
#下载通道01 走普通家庭宽带下载点
curl -o K8s_1.0.tar http://www.linuxtools.cn:42344/K8s_1.0.tar
#下载通道02 走群友无私赞助电信机房专线服务器--高速稳定下载----强烈推荐
curl -o K8s_1.0.tar http://117.27.146.72:42344/K8s_1.0.tar
tar -xzvf K8s_1.0.tar
cd K8s/ && sh install.sh
===
2019-10-19
修复一些bug+内核优化
2019-10-10
修复时区问题,所有pod默认中国上海时区
2019-9-16
2019-9-27
2019-9-25
2019-9-13
2019-8-26 1 新增node节点批量增删 2 新增glusterfs分布式复制卷---持久化存储(集群版4台及以上自动内置部署)
2019-7-11 修复部分环境IP取值不精确导致etcd安装失败的问题
2019-7-10
2019-7-1
新增单机版 web图形化控制台dashboard K8s单机版安装完毕,web控制界面dashboard地址为 http://IP:42345
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。