代码拉取完成,页面将自动刷新
#所有服务器都必须关闭selinux和firewalld
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i '7 s/enforcing/disabled/' /etc/selinux/config
sed -n '7p' /etc/selinux/config
sed -i "17a GATEWAY=192.168.100.2\nDNS1=192.168.100.2" /etc/sysconfig/network-scripts/ifcfg-ens32
systemctl restart network
~
SELINUX=disabled
~
#在master01上进行如下操作
#通过准备好的脚本cfssl.sh下载三个官方提供的自签证书工具
vim cfssl.sh
~
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
~
sh cfssl.sh
===================================================================================
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
===================================================================================
#执行以后,多了三个命令,这是我们用来签发证书的命令
which cfssl cfssljson cfssl-certinfo
~
/usr/local/bin/cfssl
/usr/local/bin/cfssljson
/usr/local/bin/cfssl-certinfo
~
#根据集群平台设定规划,在master01上准备集群所有节点的etcd证书
#通过准备好的etcd-cert.sh脚本进行所有etcd节点的证书签发
vim etcd-cert.sh
~
#!/bin/bash
#创建json格式的ca证书的配置文件
cat > ca-config.json <<FOF
{
"signing": {
"default": {
"expiry":"87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
FOF
#创建json格式的ca证书
cat > ca-csr.json <<FOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
FOF
#初始化json格式的ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#创建签名请求的json格式的ca证书
cat > server-csr.json <<FOF
{
"CN": "etcd",
"hosts": [
"192.168.100.101",
"192.168.100.102",
"192.168.100.103"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
~
#执行etcd-cert.sh的脚本,生成ca证书
sh etcd-cert.sh
#创建一个证书存放目录ctcd-cert,对生成的所有证书进行归类
mkdir etcd-cert
mv ca* etcd-cert
mv server* etcd-cert
ls etcd-cert
##Etcd数据库集群部署
#在master01上进行如下操作
wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
tar xf etcd-v3.3.12-linux-amd64.tar.gz
ls etcd-v3.3.12-linux-amd64
#创建etcd的程序目录并复制etcd和etcdctl这两个二进制文件到bin目录
mkdir -p /opt/etcd/{bin,cfg,ssl}
cd etcd-v3.3.12-linux-amd64
mv etcd etcdctl /opt/etcd/bin/
ls /opt/etcd/bin/
cd
#利用事先准备好的etcd.sh脚本生成etcd的配置文件
vim etcd.sh
~
#!/bin/bash
# example: ./etcd.sh etcd01 192.168.100.101 etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380
ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
WORK_DIR=/opt/etcd
cat <<FOF > $WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
FOF
cat <<FOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd
~
chmod +x etcd.sh
./etcd.sh etcd01 192.168.100.101 etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380
#etcd服务启动失败,暂时先不管
#查看etcd服务状态,启动失败,在这里暂时不处理
systemctl status etcd
#拷贝etcd的ssl认证证书到/opt/etcd/ssl目录下
cp etcd-cert/ca*pem /opt/etcd/ssl/
cp etcd-cert/server*pem /opt/etcd/ssl/
ls /opt/etcd/ssl/
systemctl start etcd
#再次启动etcd服务(这里会等待很长时间,因为连接不上另外两个节点)
#现在我们开始部署其他两个etcd节点,在master01上我们进行如下操作
#将master01上部署的etcd的/opt/etcd目录的所有内容拷贝到其他两个节点
scp -r /opt/etcd/ 192.168.100.102:/opt
scp -r /opt/etcd/ 192.168.100.103:/opt
#将master01上部署的etcd的/usr/lib/systemd/system/etcd.service文件拷贝到其他节点
scp /usr/lib/systemd/system/etcd.service 192.168.100.102:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service 192.168.100.103:/usr/lib/systemd/system/
#在master02(192.168.100.102)上进行如下操作
cd /opt/etcd/
vim cfg/etcd
~
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.102:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.102:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.102:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.102:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.101:2380,etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
~
#在node01(192.168.100.103)上进行如下操作
cd /opt/etcd/
vim cfg/etcd
~
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.103:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.103:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.103:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.103:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.101:2380,etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
~
#启动master02(192.168.100.102)的etcd服务
systemctl start etcd
systemctl status etcd
#启动node01(192.168.100.103)的etcd服务
systemctl start etcd
systemctl status etcd
#在master01上进行集群联通性测试
systemctl restart etcd
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" cluster-health
~
member 601d277a027bd76d is healthy: got healthy result from https://192.168.200.20 7:2379
member 98ab0e6b3b7cb99b is healthy: got healthy result from https://192.168.200.20 8:2379
member e31a433e4d78d916 is healthy: got healthy result from https://192.168.200.20 9:2379
cluster is healthy
~
##Node节点安装Docker
#在node01节点上进行如下操作(其他node节点同样的操作)
cd /etc/yum.repos.d/
mv local.repo a/
mv a/C* .
ls a
CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo
CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo
~
yum -y install yum-utils device-mapper-persistent-data lvm2
curl https://download.docker.com/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce
systemctl start docker
systemctl enable docker
systemctl status docker
#添加docker国内镜像源
echo -e '{\n"registry-mirrors":[ "https://registry.docker-cn.com" ]\n}' > /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker version
##Flannel容器集群网络部署
#在node01上进行如下操作
#下载flannel二进制安装包
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
#创建K8S程序目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
tar xf flannel-v0.11.0-linux-amd64.tar.gz
#将flanneld和mk-docker-opts.sh二进制文件拷贝到/opt/kubernetes/bin目录下
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
#在master01(etcd集群的master节点)上将docker的子网网段写入etcd数据库
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" set /coreos.com/network/config '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}'
#在master01从etcd数据库获取子网网段信息
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" get /coreos.com/network/config
#将master01上的认证文件拷贝到node01上的/opt/etcd/ssl目录下
cd /root/etcd-cert
scp ca.pem server*pem 192.168.100.103:/opt/etcd/ssl/
#在node01上查看认证文件拷贝情况
cd /opt/etcd/ssl/
ls
#在node01上直接编写flannel配置脚本
vim flannel.sh
~
#!/bin/bash
ETCD_ENDPOINTS=${1:-"https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379"} #替换成你的etcd的集群IP
cat <<FOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
FOF
cat <<FOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
cat <<FOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart docker
systemctl enable docker
~
#在node1上执行flannel.sh脚本(执行前要先关闭docker服务,因为脚本里重新制作了一个dockerd启动文件)
systemctl stop docker
systemctl disable docker
chmod +x flannel.sh
./flannel.sh
#检查node01节点etcd,flanneld,dockerd服务是否启动正常
systemctl status etcd | grep Active
systemctl status flanneld | grep Active
systemctl status docker | grep Active
#查看node1生成的子网信息文件
cat /run/flannel/subnet.env
##在node02节点上部署flannel
#在node01上操作
#将认证文件复制到其他node节点
scp -r /opt/etcd/ 192.168.100.104:/opt/
#将node1的flannel.sh复制到node2
scp flannel.sh 192.168.100.104:~/
#在node2上进行如下操作
tar xf flannel-v0.11.0-linux-amd64.tar.gz
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
ls /opt/kubernetes/bin/
systemctl stop docker
systemctl disable docker
./flannel.sh
systemctl status flanneld | grep Active
systemctl status docker | grep Active
cat /run/flannel/subnet.env
#在node01上对node02的docker0进行连通性测试(172.17.67.1)
ping 172.17.67.1
#在node02上对node01的docker0进行连通性测试(172.17.94.1)
ping 172.17.94.1
#在master01查看事先准备好的k8s-cert.sh证书脚本
vim k8s-cert.sh
~
#!/bin/bash
cat > ca-config.json <<FOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
FOF
cat > ca-csr.json <<FOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------------
cat > server-csr.json <<FOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.100.105",
"192.168.100.106",
"192.168.100.101",
"192.168.100.102",
"192.168.100.100",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#--------------------------------------------
cat > admin-csr.json <<FOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#------------------------------------------
cat > kube-proxy-csr.json <<FOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
~
#在master01上创建一个k8s认证文件的目录
#将k8s-cert.sh脚本复制到k8s-cert目录下,并执行脚本生成证书文件
mkdir k8s-cert
cp k8s-cert.sh k8s-cert/
cd k8s-cert
chmod +x k8s-cert.sh
./k8s-cert.sh
ls
##部署Master01组件(apiserver,controller,scheduler)
#在master01上下载kubernetes二进制包版本号V1.12.1
wget https://dl.k8s.io/v1.12.1/kubernetes-server-linux-amd64.tar.gz
#创建kubernetes程序目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
ls /opt/kubernetes/bin/
#首先要确保/opt/etcd/ssl下有指定认证文件
ls /opt/etcd/ssl/
#拷贝K8S证书到指定目录下
cd /root/k8s-cert
cp ca.pem ca-key.pem server*.pem /opt/kubernetes/ssl/
ls /opt/kubernetes/ssl/
cd
#查看事先写好的apiserver.sh脚本
vim apiserver.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
ETCD_SERVERS=$2
WORK_DIR=/opt/kubernetes
cat <<EOF >${WORK_DIR}/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \\
--log-dir=${WORK_DIR}/logs \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=${WORK_DIR}/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=${WORK_DIR}/ssl/server.pem \\
--tls-private-key-file=${WORK_DIR}/ssl/server-key.pem \\
--client-ca-file=${WORK_DIR}/ssl/ca.pem \\
--service-account-key-file=${WORK_DIR}/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-${WORK_DIR}/cfg/kube-apiserver
ExecStart=${WORK_DIR}/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
~
#通过脚本生成apiserver配置文件和systemd启动脚本
chmod +x apiserver.sh
./apiserver.sh 192.168.100.101 https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379
#查看kube-apiserver的配置文件
cat /opt/kubernetes/cfg/kube-apiserver
#我们此时还不能启动apiserver,因为配置文件里的token.csv认证文件还没有生成
#生成一段随机16位字符串作为token认证内容
head -c 16 /dev/urandom | od -An -t x | tr -d ' ' > /opt/kubernetes/cfg/token.csv
vim /opt/kubernetes/cfg/token.csv
~
df3334281501df44c2bea4db952c1ee8,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
~
#启动kube-apiserver
systemctl daemon-reload
systemctl restart kube-apiserver
ps -ef | grep kube-apiserver
##部署kube-controller-manager组件
#在Master01上查看已经写好的controller-manager.sh脚本
vim controller-manager.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
cat <<FOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.10.10.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
FOF
cat <<FOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
~
chmod +x controller-manager.sh
./controller-manager.sh 127.0.0.1
ps -ef | grep kube
##部署kube-scheduler组件
#在Master01上,查看事先准备好的scheduler.sh脚本
vim scheduler.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
cat <<FOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
FOF
cat <<FOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes.Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
~
chmod +x scheduler.sh
./scheduler.sh 127.0.0.1
ps -ef | grep kube-scheduler | grep -v grep
#查看一下schduler的配置文件:
cat /opt/kubernetes/cfg/kube-scheduler
#最后我们检查一个master集群的健康状态
#将K8S的命令做一个软链接
ln -s /opt/kubernetes/bin/* /usr/local/bin/
#执行集群健康状态检查命令
kubectl get cs
#在master节点上创建kubelet-bootstrap用户角色(用于验证Node访问Master apiserver),并绑定到系统集群
which kubectl
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
#在master01上查看准备好的kubeconfig.sh脚本
#注意脚本中的BOOTSTRAP_TOKEN需要设定成你自己的
#注意脚本中的KUBE_APISERVER需要设定成你的master节点IP
vim kubeconfig.sh
~
#!/bin/bash
#创建kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=`cat /opt/kubernetes/cfg/token.csv |awk -F',' '{print $1}'`
KUBE_APISERVER="https://192.168.100.101:6443"
#设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
#设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
#设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
#设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#-----------------------------------------
#创建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
~
#将脚本复制到/root/k8s-cert/目录下
#脚本必须在之前生成的k8s集群的证书目录下,以相对路径执行
chmod +x kubeconfig.sh
cp kubeconfig.sh k8s-cert/
cd k8s-cert/
./kubeconfig.sh
#查看生成的文件
ls
(bootstrap.kubeconfig kube-proxy.kubeconfig)
##将master01最后生成的这两个文件拷贝到Node节点的/opt/kubernetes/cfg目录下(node01)
scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.100.103:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.100.104:/opt/kubernetes/cfg/
##在Node节点部署kubelet组件
##在master01上完成如下操作
#将K8S源码包中的kubelet和kube-proxy拷贝到Node节点
cd /root/kubernetes/server/bin
scp kubelet kube-proxy 192.168.100.103:/opt/kubernetes/bin/
scp kubelet kube-proxy 192.168.100.104:/opt/kubernetes/bin/
#在Node节点上查看拷贝情况
cd /opt/kubernetes/bin/
ls
#切换到node01上来执行脚本
vim kubelet.sh
~
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
cat <<FOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
FOF
cat <<FOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["${DNS_SERVER_IP}"]
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
FOF
cat <<FOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
~
chmod +x kubelet.sh
./kubelet.sh 192.168.100.103
#查看kubelet是否启动
ps -ef | grep kubelet
#部署kube-proxy
#编辑kube-proxy.sh脚本
vim kube-proxy.sh
~
#!/bin/bash
NODE_ADDRESS=192.168.100.103
WORK_DIR=/opt/kubernetes
cat <<EOF >${WORK_DIR}/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=false \\
--log-dir=${WORK_DIR}/logs \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=${WORK_DIR}/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-${WORK_DIR}/cfg/kube-proxy
ExecStart=${WORK_DIR}/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
~
chmod +x kube-proxy.sh
./kube-proxy.sh
#在Master审批Node加入集群
kubectl get csr
~
NAME AGE REQUESTOR CONDITION
node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I 24m kubelet-bootstrap Pending
~
#手动通过kubectl通过申请,kubectl certificate approve 加上申请的名字即可
kubectl certificate approve node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I
#再次查看申请状态(Approved,Issued批准,发布)
kubectl get csr
~
NAME AGE REQUESTOR CONDITION
node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I 26m kubelet-bootstrap Approved,Issued
~
#在master上查看已经颁发证书的Node节点信息
kubectl get node
再部署一个Node节点
#因为Node1节点之间/opt/kubernetes目录里的内容是一样的
#因此,我们拷贝Node01节点的/opt/kubernetes目录的内容过去
scp -r /opt/kubernetes 192.168.100.104:/opt
#将Node01节点的systemd启动管理脚本拷贝到另外一个Node02节点的目录下
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service 192.168.100.104:/usr/lib/systemd/system/
#清空Node02节点的/opt/kubernets/ssl/目录下的所有证书(这是为之前Node01节点颁发的)
cd /opt/kubernetes/ssl/
rm -rf *
#进入新Node节点的/opt/kubernetes/cfg目录下
#修改kubelet,kubelet.config和kube-proxy三个文件里的IP地址(修改为新Node节点的IP地址)
cd /opt/kubernetes/cfg/
grep "103" *
kubelet:--hostname-override=192.168.100.103 \
kubelet.config:address: 192.168.100.103
kube-proxy:--hostname-override=192.168.100.103 \
#将以上内容中的103改为104
#修改完成后启动kubelet和kube-proxy
systemctl start kubelet
systemctl start kube-proxy
#回到master01上
#手动审批节点加入集群
kubectl get csr
kubectl certificate approve node-csr-BHffakfU5vthjszKVgn60WviaL0OcBuULoiBMbvG6Qc #认证信息应为自己集群的认证信息
kubectl get node
##在两个docker节点上登录镜像仓库
docker login
账号:Username (wangqian123): wangqian123
密码:Password: 000000
##运行一个测试示例检验集群工作状态
#创建一个nginx的pod并运行
kubectl run nginx --image=nginx
#查看pod情况,READY为0/1是还没有创建好;STATUS状态为容器正在建立
kubectl get pod
#让创建的Pod开放80端口,以便于外部能够访问
kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort
#查看集群服务的开启情况
kubectl get svc
##在K8S集群中,对Pod进行访问测试(两个Node节点上)
#特别注意:如果你的实验不是在一天完成的,那么你的虚拟机的Node节点的flannel服务可能已经挂掉了。因 此,我们需要重启动flanneld和dockerd服务
#我们在master上查看pod的访问日志
kubectl get pods
~
NAME READY STATUS RESTARTS AGE
nginx-7db9fccd9b-2png9 1/1 Running 0 72m
~
kubectl logs nginx-7db9fccd9b-2png9
#将匿名用户system:anonymous的权限绑定到集群的cluster-admin用户上
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
##部署Web UI(Dashboard)
#在Master01上,进行如下操作
#进入解压后的kubernetes源码包目录
cd kubernetes
tar xf kubernetes-src.tar.gz
cd cluster//addons/dashboard/
#创建dashboard各个组件
kubectl create -f dashboard-configmap.yaml
kubectl create -f dashboard-rbac.yaml
kubectl create -f dashboard-secret.yaml
#修改dashboard-controller.yaml文件的第34行的镜像下载地址,如下所示:
sed -n '34p' dashboard-controller.yaml
~修改前
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
~
sed -n '34p' dashboard-controller.yaml
~修改后
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.0
~
kubectl create -f dashboard-controller.yaml
#查看dashboard镜像运行情况
kubectl get pods
#指定命名空间后,我们看到了dashboard的pods
kubectl get pods -n kube-system
~
NAME READY STATUS RESTARTS AGE
kubernetes-dashboard-6d8f4f4df8-8xvrh 0/1 ContainerCreating 0 81s
~
#查看dashboard的pods日志,看看是否启动成功
kubectl logs kubernetes-dashboard-6d8f4f4df8-8xvrh -n kube-system
#打开dashboard-service.yaml配置文件,添加一行代码
vim dashboard-service.yaml
~
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
type: NodePort ##添加此行
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
~
kubectl create -f dashboard-service.yaml
#查看dashboad被创建在了哪个Node上
kubectl get pods -o wide -n kube-system
~
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubernetes-dashboard-6d8f4f4df8-8xvrh 0/1 ContainerCreating 0 5m51s <none> 192.168.100.103 <none> <none>
~
#查看dashboard开启的内/外部访问端口
kubectl get svc -n kube-system
#利用我们事先做好的k8s-admin.yaml认证配置文件,创建用户身份令牌
vim k8s-admin.yaml
~
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: dashboard-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
~
#生成用户令牌
kubectl create -f k8s-admin.yaml
#查看用户令牌名称
kubectl get secret -n kube-system
#以“dashboard-admin-token-”开头的为令牌名称,每次生成的不一样
kubectl describe secret dashboard-admin-token-57bxb -n kube-system
#将令牌粘贴
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。