1 Star 1 Fork 0

Soap/gulimall

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
k8s部署过程.txt 31.09 KB
一键复制 编辑 原始数据 按行查看 历史
Soap 提交于 2020-12-21 17:30 . k8s
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
#所有服务器都必须关闭selinux和firewalld
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i '7 s/enforcing/disabled/' /etc/selinux/config
sed -n '7p' /etc/selinux/config
sed -i "17a GATEWAY=192.168.100.2\nDNS1=192.168.100.2" /etc/sysconfig/network-scripts/ifcfg-ens32
systemctl restart network
~
SELINUX=disabled
~
#在master01上进行如下操作
#通过准备好的脚本cfssl.sh下载三个官方提供的自签证书工具
vim cfssl.sh
~
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
~
sh cfssl.sh
===================================================================================
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
===================================================================================
#执行以后,多了三个命令,这是我们用来签发证书的命令
which cfssl cfssljson cfssl-certinfo
~
/usr/local/bin/cfssl
/usr/local/bin/cfssljson
/usr/local/bin/cfssl-certinfo
~
#根据集群平台设定规划,在master01上准备集群所有节点的etcd证书
#通过准备好的etcd-cert.sh脚本进行所有etcd节点的证书签发
vim etcd-cert.sh
~
#!/bin/bash
#创建json格式的ca证书的配置文件
cat > ca-config.json <<FOF
{
"signing": {
"default": {
"expiry":"87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
FOF
#创建json格式的ca证书
cat > ca-csr.json <<FOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
FOF
#初始化json格式的ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#创建签名请求的json格式的ca证书
cat > server-csr.json <<FOF
{
"CN": "etcd",
"hosts": [
"192.168.100.101",
"192.168.100.102",
"192.168.100.103"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
~
#执行etcd-cert.sh的脚本,生成ca证书
sh etcd-cert.sh
#创建一个证书存放目录ctcd-cert,对生成的所有证书进行归类
mkdir etcd-cert
mv ca* etcd-cert
mv server* etcd-cert
ls etcd-cert
##Etcd数据库集群部署
#在master01上进行如下操作
wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
tar xf etcd-v3.3.12-linux-amd64.tar.gz
ls etcd-v3.3.12-linux-amd64
#创建etcd的程序目录并复制etcd和etcdctl这两个二进制文件到bin目录
mkdir -p /opt/etcd/{bin,cfg,ssl}
cd etcd-v3.3.12-linux-amd64
mv etcd etcdctl /opt/etcd/bin/
ls /opt/etcd/bin/
cd
#利用事先准备好的etcd.sh脚本生成etcd的配置文件
vim etcd.sh
~
#!/bin/bash
# example: ./etcd.sh etcd01 192.168.100.101 etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380
ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
WORK_DIR=/opt/etcd
cat <<FOF > $WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
FOF
cat <<FOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd
~
chmod +x etcd.sh
./etcd.sh etcd01 192.168.100.101 etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380
#etcd服务启动失败,暂时先不管
#查看etcd服务状态,启动失败,在这里暂时不处理
systemctl status etcd
#拷贝etcd的ssl认证证书到/opt/etcd/ssl目录下
cp etcd-cert/ca*pem /opt/etcd/ssl/
cp etcd-cert/server*pem /opt/etcd/ssl/
ls /opt/etcd/ssl/
systemctl start etcd
#再次启动etcd服务(这里会等待很长时间,因为连接不上另外两个节点)
#现在我们开始部署其他两个etcd节点,在master01上我们进行如下操作
#将master01上部署的etcd的/opt/etcd目录的所有内容拷贝到其他两个节点
scp -r /opt/etcd/ 192.168.100.102:/opt
scp -r /opt/etcd/ 192.168.100.103:/opt
#将master01上部署的etcd的/usr/lib/systemd/system/etcd.service文件拷贝到其他节点
scp /usr/lib/systemd/system/etcd.service 192.168.100.102:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service 192.168.100.103:/usr/lib/systemd/system/
#在master02(192.168.100.102)上进行如下操作
cd /opt/etcd/
vim cfg/etcd
~
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.102:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.102:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.102:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.102:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.101:2380,etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
~
#在node01(192.168.100.103)上进行如下操作
cd /opt/etcd/
vim cfg/etcd
~
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.103:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.103:2379"
#[Clustering] #节点的集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.103:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.103:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.101:2380,etcd02=https://192.168.100.102:2380,etcd03=https://192.168.100.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
~
#启动master02(192.168.100.102)的etcd服务
systemctl start etcd
systemctl status etcd
#启动node01(192.168.100.103)的etcd服务
systemctl start etcd
systemctl status etcd
#在master01上进行集群联通性测试
systemctl restart etcd
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" cluster-health
~
member 601d277a027bd76d is healthy: got healthy result from https://192.168.200.20 7:2379
member 98ab0e6b3b7cb99b is healthy: got healthy result from https://192.168.200.20 8:2379
member e31a433e4d78d916 is healthy: got healthy result from https://192.168.200.20 9:2379
cluster is healthy
~
##Node节点安装Docker
#在node01节点上进行如下操作(其他node节点同样的操作)
cd /etc/yum.repos.d/
mv local.repo a/
mv a/C* .
ls a
CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo
CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo
~
yum -y install yum-utils device-mapper-persistent-data lvm2
curl https://download.docker.com/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce
systemctl start docker
systemctl enable docker
systemctl status docker
#添加docker国内镜像源
echo -e '{\n"registry-mirrors":[ "https://registry.docker-cn.com" ]\n}' > /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker version
##Flannel容器集群网络部署
#在node01上进行如下操作
#下载flannel二进制安装包
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
#创建K8S程序目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
tar xf flannel-v0.11.0-linux-amd64.tar.gz
#将flanneld和mk-docker-opts.sh二进制文件拷贝到/opt/kubernetes/bin目录下
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
#在master01(etcd集群的master节点)上将docker的子网网段写入etcd数据库
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" set /coreos.com/network/config '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}'
#在master01从etcd数据库获取子网网段信息
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379" get /coreos.com/network/config
#将master01上的认证文件拷贝到node01上的/opt/etcd/ssl目录下
cd /root/etcd-cert
scp ca.pem server*pem 192.168.100.103:/opt/etcd/ssl/
#在node01上查看认证文件拷贝情况
cd /opt/etcd/ssl/
ls
#在node01上直接编写flannel配置脚本
vim flannel.sh
~
#!/bin/bash
ETCD_ENDPOINTS=${1:-"https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379"} #替换成你的etcd的集群IP
cat <<FOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
FOF
cat <<FOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
cat <<FOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart docker
systemctl enable docker
~
#在node1上执行flannel.sh脚本(执行前要先关闭docker服务,因为脚本里重新制作了一个dockerd启动文件)
systemctl stop docker
systemctl disable docker
chmod +x flannel.sh
./flannel.sh
#检查node01节点etcd,flanneld,dockerd服务是否启动正常
systemctl status etcd | grep Active
systemctl status flanneld | grep Active
systemctl status docker | grep Active
#查看node1生成的子网信息文件
cat /run/flannel/subnet.env
##在node02节点上部署flannel
#在node01上操作
#将认证文件复制到其他node节点
scp -r /opt/etcd/ 192.168.100.104:/opt/
#将node1的flannel.sh复制到node2
scp flannel.sh 192.168.100.104:~/
#在node2上进行如下操作
tar xf flannel-v0.11.0-linux-amd64.tar.gz
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
ls /opt/kubernetes/bin/
systemctl stop docker
systemctl disable docker
./flannel.sh
systemctl status flanneld | grep Active
systemctl status docker | grep Active
cat /run/flannel/subnet.env
#在node01上对node02的docker0进行连通性测试(172.17.67.1)
ping 172.17.67.1
#在node02上对node01的docker0进行连通性测试(172.17.94.1)
ping 172.17.94.1
#在master01查看事先准备好的k8s-cert.sh证书脚本
vim k8s-cert.sh
~
#!/bin/bash
cat > ca-config.json <<FOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
FOF
cat > ca-csr.json <<FOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------------
cat > server-csr.json <<FOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.100.105",
"192.168.100.106",
"192.168.100.101",
"192.168.100.102",
"192.168.100.100",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#--------------------------------------------
cat > admin-csr.json <<FOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#------------------------------------------
cat > kube-proxy-csr.json <<FOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
FOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
~
#在master01上创建一个k8s认证文件的目录
#将k8s-cert.sh脚本复制到k8s-cert目录下,并执行脚本生成证书文件
mkdir k8s-cert
cp k8s-cert.sh k8s-cert/
cd k8s-cert
chmod +x k8s-cert.sh
./k8s-cert.sh
ls
##部署Master01组件(apiserver,controller,scheduler)
#在master01上下载kubernetes二进制包版本号V1.12.1
wget https://dl.k8s.io/v1.12.1/kubernetes-server-linux-amd64.tar.gz
#创建kubernetes程序目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl}
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
ls /opt/kubernetes/bin/
#首先要确保/opt/etcd/ssl下有指定认证文件
ls /opt/etcd/ssl/
#拷贝K8S证书到指定目录下
cd /root/k8s-cert
cp ca.pem ca-key.pem server*.pem /opt/kubernetes/ssl/
ls /opt/kubernetes/ssl/
cd
#查看事先写好的apiserver.sh脚本
vim apiserver.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
ETCD_SERVERS=$2
WORK_DIR=/opt/kubernetes
cat <<EOF >${WORK_DIR}/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \\
--log-dir=${WORK_DIR}/logs \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=${WORK_DIR}/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=${WORK_DIR}/ssl/server.pem \\
--tls-private-key-file=${WORK_DIR}/ssl/server-key.pem \\
--client-ca-file=${WORK_DIR}/ssl/ca.pem \\
--service-account-key-file=${WORK_DIR}/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-${WORK_DIR}/cfg/kube-apiserver
ExecStart=${WORK_DIR}/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
~
#通过脚本生成apiserver配置文件和systemd启动脚本
chmod +x apiserver.sh
./apiserver.sh 192.168.100.101 https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379
#查看kube-apiserver的配置文件
cat /opt/kubernetes/cfg/kube-apiserver
#我们此时还不能启动apiserver,因为配置文件里的token.csv认证文件还没有生成
#生成一段随机16位字符串作为token认证内容
head -c 16 /dev/urandom | od -An -t x | tr -d ' ' > /opt/kubernetes/cfg/token.csv
vim /opt/kubernetes/cfg/token.csv
~
df3334281501df44c2bea4db952c1ee8,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
~
#启动kube-apiserver
systemctl daemon-reload
systemctl restart kube-apiserver
ps -ef | grep kube-apiserver
##部署kube-controller-manager组件
#在Master01上查看已经写好的controller-manager.sh脚本
vim controller-manager.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
cat <<FOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.10.10.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
FOF
cat <<FOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
~
chmod +x controller-manager.sh
./controller-manager.sh 127.0.0.1
ps -ef | grep kube
##部署kube-scheduler组件
#在Master01上,查看事先准备好的scheduler.sh脚本
vim scheduler.sh
~
#!/bin/bash
MASTER_ADDRESS=$1
cat <<FOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
FOF
cat <<FOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes.Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
~
chmod +x scheduler.sh
./scheduler.sh 127.0.0.1
ps -ef | grep kube-scheduler | grep -v grep
#查看一下schduler的配置文件:
cat /opt/kubernetes/cfg/kube-scheduler
#最后我们检查一个master集群的健康状态
#将K8S的命令做一个软链接
ln -s /opt/kubernetes/bin/* /usr/local/bin/
#执行集群健康状态检查命令
kubectl get cs
#在master节点上创建kubelet-bootstrap用户角色(用于验证Node访问Master apiserver),并绑定到系统集群
which kubectl
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
#在master01上查看准备好的kubeconfig.sh脚本
#注意脚本中的BOOTSTRAP_TOKEN需要设定成你自己的
#注意脚本中的KUBE_APISERVER需要设定成你的master节点IP
vim kubeconfig.sh
~
#!/bin/bash
#创建kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=`cat /opt/kubernetes/cfg/token.csv |awk -F',' '{print $1}'`
KUBE_APISERVER="https://192.168.100.101:6443"
#设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
#设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
#设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
#设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#-----------------------------------------
#创建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=./ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
~
#将脚本复制到/root/k8s-cert/目录下
#脚本必须在之前生成的k8s集群的证书目录下,以相对路径执行
chmod +x kubeconfig.sh
cp kubeconfig.sh k8s-cert/
cd k8s-cert/
./kubeconfig.sh
#查看生成的文件
ls
(bootstrap.kubeconfig kube-proxy.kubeconfig)
##将master01最后生成的这两个文件拷贝到Node节点的/opt/kubernetes/cfg目录下(node01)
scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.100.103:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig 192.168.100.104:/opt/kubernetes/cfg/
##在Node节点部署kubelet组件
##在master01上完成如下操作
#将K8S源码包中的kubelet和kube-proxy拷贝到Node节点
cd /root/kubernetes/server/bin
scp kubelet kube-proxy 192.168.100.103:/opt/kubernetes/bin/
scp kubelet kube-proxy 192.168.100.104:/opt/kubernetes/bin/
#在Node节点上查看拷贝情况
cd /opt/kubernetes/bin/
ls
#切换到node01上来执行脚本
vim kubelet.sh
~
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
cat <<FOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
FOF
cat <<FOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["${DNS_SERVER_IP}"]
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
FOF
cat <<FOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
FOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
~
chmod +x kubelet.sh
./kubelet.sh 192.168.100.103
#查看kubelet是否启动
ps -ef | grep kubelet
#部署kube-proxy
#编辑kube-proxy.sh脚本
vim kube-proxy.sh
~
#!/bin/bash
NODE_ADDRESS=192.168.100.103
WORK_DIR=/opt/kubernetes
cat <<EOF >${WORK_DIR}/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=false \\
--log-dir=${WORK_DIR}/logs \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=${WORK_DIR}/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-${WORK_DIR}/cfg/kube-proxy
ExecStart=${WORK_DIR}/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
~
chmod +x kube-proxy.sh
./kube-proxy.sh
#在Master审批Node加入集群
kubectl get csr
~
NAME AGE REQUESTOR CONDITION
node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I 24m kubelet-bootstrap Pending
~
#手动通过kubectl通过申请,kubectl certificate approve 加上申请的名字即可
kubectl certificate approve node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I
#再次查看申请状态(Approved,Issued批准,发布)
kubectl get csr
~
NAME AGE REQUESTOR CONDITION
node-csr-4sSW4lPHSdlSJSQjXtEMXKSofVzh6Fv-iRWlbe8_P6I 26m kubelet-bootstrap Approved,Issued
~
#在master上查看已经颁发证书的Node节点信息
kubectl get node
再部署一个Node节点
#因为Node1节点之间/opt/kubernetes目录里的内容是一样的
#因此,我们拷贝Node01节点的/opt/kubernetes目录的内容过去
scp -r /opt/kubernetes 192.168.100.104:/opt
#将Node01节点的systemd启动管理脚本拷贝到另外一个Node02节点的目录下
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service 192.168.100.104:/usr/lib/systemd/system/
#清空Node02节点的/opt/kubernets/ssl/目录下的所有证书(这是为之前Node01节点颁发的)
cd /opt/kubernetes/ssl/
rm -rf *
#进入新Node节点的/opt/kubernetes/cfg目录下
#修改kubelet,kubelet.config和kube-proxy三个文件里的IP地址(修改为新Node节点的IP地址)
cd /opt/kubernetes/cfg/
grep "103" *
kubelet:--hostname-override=192.168.100.103 \
kubelet.config:address: 192.168.100.103
kube-proxy:--hostname-override=192.168.100.103 \
#将以上内容中的103改为104
#修改完成后启动kubelet和kube-proxy
systemctl start kubelet
systemctl start kube-proxy
#回到master01上
#手动审批节点加入集群
kubectl get csr
kubectl certificate approve node-csr-BHffakfU5vthjszKVgn60WviaL0OcBuULoiBMbvG6Qc #认证信息应为自己集群的认证信息
kubectl get node
##在两个docker节点上登录镜像仓库
docker login
账号:Username (wangqian123): wangqian123
密码:Password: 000000
##运行一个测试示例检验集群工作状态
#创建一个nginx的pod并运行
kubectl run nginx --image=nginx
#查看pod情况,READY为0/1是还没有创建好;STATUS状态为容器正在建立
kubectl get pod
#让创建的Pod开放80端口,以便于外部能够访问
kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort
#查看集群服务的开启情况
kubectl get svc
##在K8S集群中,对Pod进行访问测试(两个Node节点上)
#特别注意:如果你的实验不是在一天完成的,那么你的虚拟机的Node节点的flannel服务可能已经挂掉了。因 此,我们需要重启动flanneld和dockerd服务
#我们在master上查看pod的访问日志
kubectl get pods
~
NAME READY STATUS RESTARTS AGE
nginx-7db9fccd9b-2png9 1/1 Running 0 72m
~
kubectl logs nginx-7db9fccd9b-2png9
#将匿名用户system:anonymous的权限绑定到集群的cluster-admin用户上
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
##部署Web UI(Dashboard)
#在Master01上,进行如下操作
#进入解压后的kubernetes源码包目录
cd kubernetes
tar xf kubernetes-src.tar.gz
cd cluster//addons/dashboard/
#创建dashboard各个组件
kubectl create -f dashboard-configmap.yaml
kubectl create -f dashboard-rbac.yaml
kubectl create -f dashboard-secret.yaml
#修改dashboard-controller.yaml文件的第34行的镜像下载地址,如下所示:
sed -n '34p' dashboard-controller.yaml
~修改前
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
~
sed -n '34p' dashboard-controller.yaml
~修改后
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.0
~
kubectl create -f dashboard-controller.yaml
#查看dashboard镜像运行情况
kubectl get pods
#指定命名空间后,我们看到了dashboard的pods
kubectl get pods -n kube-system
~
NAME READY STATUS RESTARTS AGE
kubernetes-dashboard-6d8f4f4df8-8xvrh 0/1 ContainerCreating 0 81s
~
#查看dashboard的pods日志,看看是否启动成功
kubectl logs kubernetes-dashboard-6d8f4f4df8-8xvrh -n kube-system
#打开dashboard-service.yaml配置文件,添加一行代码
vim dashboard-service.yaml
~
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
type: NodePort ##添加此行
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
~
kubectl create -f dashboard-service.yaml
#查看dashboad被创建在了哪个Node上
kubectl get pods -o wide -n kube-system
~
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kubernetes-dashboard-6d8f4f4df8-8xvrh 0/1 ContainerCreating 0 5m51s <none> 192.168.100.103 <none> <none>
~
#查看dashboard开启的内/外部访问端口
kubectl get svc -n kube-system
#利用我们事先做好的k8s-admin.yaml认证配置文件,创建用户身份令牌
vim k8s-admin.yaml
~
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: dashboard-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
~
#生成用户令牌
kubectl create -f k8s-admin.yaml
#查看用户令牌名称
kubectl get secret -n kube-system
#以“dashboard-admin-token-”开头的为令牌名称,每次生成的不一样
kubectl describe secret dashboard-admin-token-57bxb -n kube-system
#将令牌粘贴
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Java
1
https://gitee.com/SoapLiu/gulimall.git
git@gitee.com:SoapLiu/gulimall.git
SoapLiu
gulimall
gulimall
master

搜索帮助