系统要求
| 软硬件 | 最低配置 | 推荐配置 |
|---|---|---|
| cpu和内存 | Master: 至少2核和4GB内存 ;node: 4核和16GB内存 | master:至少4核和16GB内存 ;Node: 应根据需要运行的容器数量进行配置 |
| Docker | 1.9版本以上 | 1.12版本 |
| etcd | 2.0版本以上 | 3.0版本 |
使用kubeadm安装
服务器配置和准备
sed -ri 's/.*BOOTPROTO.*/#&/' /etc/sysconfig/network-scripts/ifcfg-enp0s3
cat >>/etc/sysconfig/network-scripts/ifcfg-enp0s3<<EOF
BOOTPROTO=static
IPADDR=192.168.20.31
NETMASK=255.255.255.0
GATEWAY=192.168.20.1
DNS=192.168.20.1
DNS1=8.8.8.8
DNS2=114.114.114.114
EOF
sed -ri 's/.*BOOTPROTO.*/#&/' /etc/sysconfig/network-scripts/ifcfg-enp0s3
cat >>/etc/sysconfig/network-scripts/ifcfg-enp0s3<<EOF
BOOTPROTO=static
IPADDR=192.168.20.32
NETMASK=255.255.255.0
GATEWAY=192.168.20.1
DNS=192.168.20.1
DNS1=8.8.8.8
DNS2=114.114.114.114
EOF
# 设计hostname
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
# 方便互相访问
cat >>/etc/hosts<<EOF
192.168.20.31 k8s-master
192.168.20.32 k8s-node1
EOF
# 安装常用软件
yum install vim wget curl ntpdate -y
# 时间同步 ,设置定时任务
echo "*/5 * * * * /usr/sbin/ntpdate ntp1.aliyun.com ntp.sjtu.edu.cn > /dev/null 2>&1 " >> /var/spool/cron/root
# 防止下载失败,提前下载
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5-3.el7.x86_64.rpm
#scp root@k8s-master:/root/*.rpm ~/
相关软件安装
# 关闭防火墙
systemctl disable firewalld
systemctl stop firewalld
firewall-cmd --state
# 修改selinux设置
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 添加网桥过滤及内核转发配置文件
cat >/etc/sysctl.d/k8s.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
# 加载br_netfilter模块
modprobe br_netfilter
lsmod | grep br_netfilter
# 配置ipvsadm模块加载方式
yum install ipset ipvsadm -y
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
# 关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
# docker安装启动
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum install docker-ce -y
systemctl enable docker
systemctl start docker
# 更新systemd
yum update systemd -y
# 修改cgroup方式
cat >/etc/docker/daemon.json<<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
systemctl restart docker
# cri-dockerd 安装,前面已下载
#wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5-3.el7.x86_64.rpm
rpm -ivh cri-dockerd-0.2.5-3.el7.x86_64.rpm
systemctl enable cri-docker
systemctl start cri-docker
# 配置yum源
cat >/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装kubeadm 版本1.25.4
#yum --showduplicates list kubelet
#yum install -y <package_name>-<version>
yum install -y --nogpgcheck kubelet-1.25.4-0 kubeadm-1.25.4-0 kubectl-1.25.4-0
systemctl enable kubelet && systemctl start kubelet
systemctl status kubelet
# 配置Kubelet启动参数
#vim /etc/sysconfig/kubelet
#KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
sed -i "s/\"$/ --cgroup-driver=systemd\"/" /etc/sysconfig/kubelet
sed -i "s/=$/=\"--cgroup-driver=systemd\"/" /etc/sysconfig/kubelet
systemctl restart kubelet
systemctl status kubelet
Master初始化
# 查看镜像要求和提前准备镜像
kubeadm config images list
#pause经常出问题,直接提前下载
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6
# kubeadm初始化
#kubeadm reset #如果初始化失败,需要重置后再初始化
kubeadm init --apiserver-advertise-address=192.168.20.31 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version=v1.25.4 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock
journalctl -xeu kubelet | grep Failed
journalctl -f -u kubelet
# 初始化成功
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 查看POD
kubectl get pods --all-namespaces
# 添加网络组件calico(可选网络组件Flannel)
wget https://docs.projectcalico.org/v3.18/manifests/calico.yaml --no-check-certificate
# kubectl delete -f calico.yaml
sed -i 's/192.168.0.0/10.244.0.0/g' calico.yaml
kubectl apply -f calico.yaml
# 查看日志
kubectl logs -f calico-node-6jtp2 -n kube-system
# 强行删除pod
kubectl delete pod <your-pod-name> -n <name-space> --force --grace-period=0
# grace-period表示过渡存活期,默认30s,在删除POD之前允许POD慢慢终止其上的容器进程,从而优雅退出,0表示立即终止POD
Master设置可调度
可否部署新的POD
# 将master节点设为可以调度
kubectl taint nodes --all node-role.kubernetes.io/master-
# 如果需要设为不允许调度
kubectl taint nodes master node-role.kubernetes.io/master=:NoSchedule
# 污点可选参数
NoSchedule: 一定不能被调度
PreferNoSchedule: 尽量不要调度
NoExecute: 不仅不会调度, 还会驱逐Node上已有的Pod
Node节点注册
# Node节点注册
# 查看token
kubeadm token list
# 生成新的token
kubeadm token create
# 获取ca证书sha256编码的hash值
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
# 加入集群
kubeadm join 192.168.x.x:6443 --token xxx --discovery-token-ca-cert-hash sha256:xxx --cri-socket unix:///var/run/cri-dockerd.sock
节点暂停和排水
# 节点暂停
#kubectl cordon node节点/IP
kubectl cordon 192.168.1.208
# 节点排水
#kubectl drain node节点/IP
kubectl drain 192.168.1.208
# –ignore-daemonsets:无视 DaemonSet 管理下的 Pod
# –delete-local-data:有 mount local volume 的 pod,会强制杀掉该 pod
# –force:强制释放不是控制器管理的 Pod,例如 kube-proxy
kubectl drain node2 --ignore-daemonsets --delete-local-data --force
# 节点恢复
#kubectl uncordon node节点/IP
kubectl uncordon node2
Dashboard控制台
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
kubectl get svc,pods -n kubernetes-dashboard
# 配置nginx访问内部地址
Token用户登录
# 创建用户
# 方法1
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml
kubectl apply -f dashboard-user.yaml
kubectl -n kubernetes-dashboard create token admin-user
# 方法2
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl -n kube-system create token dashboard-admin
端口和访问配置
ClusterIP
只对集群内部可见, 外部无法访问。
apiVersion: v1
kind: Service
metadata:
name: my-service
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376
# 创建 clusterIP
$ kubectl apply -f my-service.yaml
service/my-service-clusterip created
# 查看 clusterIP
$ kubectl get service |grep my
my-service ClusterIP 10.96.0.108 <none> 80/TCP 9s
$ kubectl describe service my-service
NodePort
对外部可见
nodePort是附加端口,指定在节点上打开哪个端口。如果不指定端口,会选择一个随机端口。
nodePort:是提供给外部流量访问k8s集群中service的入口。只能使用端口30000-32767
port:k8s集群内部服务之间访问service的入口
targetPort: 是pod上的端口
apiVersion: v1
kind: Service
metadata:
name: nodeport-my-service
spec:
type: NodePort
selector:
app: MyApp
ports:
# 默认情况下,为了方便起见,`targetPort` 被设置为与 `port` 字段相同的值。
- port: 80
targetPort: 80
# 可选字段
# 默认情况下,为了方便起见,Kubernetes 控制平面会从某个范围内分配一个端口号(默认:30000-32767)
nodePort: 30007
# 创建 nodeport
$ kubectl apply -f nodeport-my-service.yaml
service/my-service created
# 查看 nodeport
$ kubectl get service |grep my
nodeport-my-service NodePort 10.96.0.19 <none> 80:30007/TCP 17s
$ kubectl describe service nodeport-my-service
LoadBalancer
apiVersion: v1
kind: Service
metadata:
name: loadbalancer-my-service
spec:
selector:
app: MyApp
ports:
- protocol: TCP
port: 80
targetPort: 9376
type: LoadBalancer
status:
loadBalancer:
ingress:
- ip: 192.0.2.127
# 创建 loadbalancer
$ kubectl apply -f loadbalancer-my-service.yaml
service/loadbalancer-my-service created
# 查看 loadbalancer
$ kubectl describe service loadbalancer-my-service
Nginx/MicroService
kubectl create -f nginxweb.yaml
kubectl apply -f nginxweb.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginxweb
namespace: kservice
spec:
replicas: 2
selector:
matchLabels:
project: kservice
app: nginxweb
template:
metadata:
labels:
project: kservice
app: nginxweb
spec:
imagePullSecrets:
- name: registry-pull-secert
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
env:
- name: TZ
value: Asia/Shanghai
ports:
- protocol: TCP
containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginxweb
namespace: kservice
spec:
type: ClusterIP
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginxweb
project: kservice
Ingress初始化
# 获取yaml文件
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.5.1/deploy/static/provider/cloud/deploy.yaml
# 修改其中镜像为阿里云
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.5.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.2.2
# 这里可以使用host网络,与dnsPolicy: ClusterFirst平级
#hostNetwork: true # 使用node节点网络
# 部署ingress controller
kubectl apply -f deploy.yaml
# 创建ingress
kubectl create ingress kservice-ingress -n kservice --class=nginx --rule="n.k8s.halaz.cn/*=nginxweb:80"
# 删除ingress
kubectl delete ingress kservice-ingress -n kservice
# 使用yaml创建
kubectl create -f kservice-ingress.yaml
kservice-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kservice-ingress
namespace: kservice
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: a.k8s.halaz.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginxweb
port:
number: 80
- host: b.k8s.halaz.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginxweb
port:
number: 80
创建和管理资源
# 创建pod
kubectl create deployment ngweb --image=nginx
# 查看pod列表
kubectl get pods -o wide
# 如果Pod不处于Running状态,查看问题原因
kubectl -n kube-system describe pod <Pod名称>
# 暴露资源为service
kubectl expose deployment ngweb --port=80 --protocol=TCP --target-port=80 --name=nginx
# 查看service列表
kubectl get svc
# 查看服务描述信息,service映射到pod上了
kubectl describe svc nginx
# 通过service访问pod里的nginx
curl service_ip
# 删除pod
kubectl delete pod pod_name
# 查看pod列表
kubectl get pods -o wide
# 查看指定service绑定的pod
kubectl -n kube-system get pod -o wide -l k8s-app=kube-dns
# 查看yaml
kubectl get svc nginx -o yaml
# 修改
kubectl edit svc nginx
kubectl edit deployment/nginxweb -o yaml --save-config -n kservice
# 设置副本数
kubectl scale --replicas=3 deployment/nginxweb -n kservice
# 切换命名空间
kubectl config set-context $(kubectl config current-context) --namespace=kservice
kubectl config set-context $(kubectl config current-context) --namespace=default