环境
三台主机的主机名与ip:
ccka-master 192.168.30.135
ccka-worker1 192.168.30.136
ccka-worker2 192.168.30.137
采用 Ubuntu 作为我们的操作系统
源码
bash
#!/bin/bash
set -ueo pipefail
echo
echo
echo -n Have you done the above? yes or no:
read input
case $input in
yes)
echo
echo now starting deploy
;;
no)
echo please correct it && exit 1
;;
*)
echo please input yes or no
exit 1
;;
esac
echo
cd /root
#安装ansible
# 更新软件包列表
apt update
# 安装软件属性通用包(用于添加PPA)
apt install -y software-properties-common
# 添加 Ansible 官方 PPA 仓库
add-apt-repository --yes --update ppa:ansible/ansible
# 安装 Ansible
apt install -y ansible
#配置国内镜像站
cat > /etc/apt/sources.list <<EOF
deb https://mirror.nju.edu.cn/ubuntu focal main restricted
deb https://mirror.nju.edu.cn/ubuntu focal-updates main restricted
deb https://mirror.nju.edu.cn/ubuntu focal universe
deb https://mirror.nju.edu.cn/ubuntu focal-updates universe
deb https://mirror.nju.edu.cn/ubuntu focal multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-updates multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-backports main restricted universe multiverse
deb https://mirror.nju.edu.cn/ubuntu focal-security main restricted
deb https://mirror.nju.edu.cn/ubuntu focal-security universe
deb https://mirror.nju.edu.cn/ubuntu focal-security multiverse
EOF
apt update &> /dev/null
apt install sshpass wget bash-completion -y &> /dev/null
sed -i 's/^#host_key_checking = False/host_key_checking = False/' /etc/ansible/ansible.cfg
#配置互信(多台主机情况下使用后边的脚本)
echo 'Create and copy ssh key to workers'
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N '' &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-master &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-worker1 &> /dev/null
sshpass -p 1 ssh-copy-id -o StrictHostKeyChecking=no root@ccka-worker2 &> /dev/null
#配置主机清单
cd /root
cat > /etc/ansible/hosts <<EOF
[master]
ccka-master ansible_user=root ansible_password=1
[worker]
ccka-worker1 ansible_user=root ansible_password=1
ccka-worker2 ansible_user=root ansible_password=1
EOF
: <<'COMMENT'
生产环境中多台主机配置互信
#!/bin/bash
# 配置 Ansible 主机列表的 SSH 互信
set -euo pipefail # 启用严格错误处理
# 1. 检查 Ansible 主机文件是否存在
ANSIBLE_HOSTS="/etc/ansible/hosts"
if [ ! -f "$ANSIBLE_HOSTS" ]; then
echo "错误: Ansible 主机文件 $ANSIBLE_HOSTS 不存在"
exit 1
fi
# 2. 生成 SSH 密钥对(如果不存在)
SSH_KEY="$HOME/.ssh/id_rsa"
if [ ! -f "$SSH_KEY" ]; then
echo "生成新的 SSH 密钥对..."
ssh-keygen -t rsa -f "$SSH_KEY" -N '' -q
fi
# 3. 从 Ansible 主机文件提取所有唯一主机
echo "从 $ANSIBLE_HOSTS 提取主机列表..."
ALL_HOSTS=$(grep -Eo '^[a-zA-Z0-9_.-]+' "$ANSIBLE_HOSTS" | sort -u)
# 4. 验证主机列表
if [ -z "$ALL_HOSTS" ]; then
echo "错误: 未找到有效主机"
exit 1
fi
echo "找到以下主机:"
echo "$ALL_HOSTS"
echo ""
# 5. 配置 SSH 互信
for host in $ALL_HOSTS; do
echo "正在配置主机: $host"
# 跳过本地主机
if [ "$host" == "localhost" ] || [ "$host" == "127.0.0.1" ]; then
echo "跳过本地主机"
continue
fi
# 检查主机是否可达
if ! ping -c 1 -W 1 "$host" &> /dev/null; then
echo "警告: 无法连接到主机 $host"
continue
fi
# 复制公钥
if ssh-copy-id -o "StrictHostKeyChecking=accept-new" "$host"; then
echo "成功配置 $host"
else
echo "错误: 无法配置 $host,请手动执行:"
echo " ssh-copy-id $host"
fi
done
echo ""
echo "SSH 互信配置完成"
COMMENT
cat > create-k8s.yaml <<'EOF'
---
- name: Configure Kubernetes with Containerd
hosts: all
become: yes
remote_user: root
tasks:
- name: clean apt lock
shell: |
killall apt apt-get
rm -rf /var/lib/apt/lists/lock
rm -rf /var/cache/apt/archives/lock
rm -rf /var/lib/dpkg/lock*
dpkg --configure -a
- name: Install required packages for Docker
apt:
name:
- ca-certificates
- curl
- gnupg
- lsb-release
state: present
update_cache: no # 先不更新,节省时间
- name: Create keyrings directory
file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
- name: Download Docker GPG key
get_url:
url: https://mirrors.nju.edu.cn/docker-ce/linux/ubuntu/gpg
dest: /tmp/docker-key.gpg
mode: '0644'
- name: Process GPG key
command: gpg --dearmor -o /etc/apt/keyrings/docker.gpg /tmp/docker-key.gpg
args:
creates: /etc/apt/keyrings/docker.gpg
- name: Add Docker repository
apt_repository:
repo: "deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.nju.edu.cn/docker-ce/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
filename: docker
- name: Update apt cache (only for Docker)
apt:
update_cache: yes
cache_valid_time: 3600 # 1小时内不需要重新update
- name: clean apt lock
shell: |
killall apt apt-get
rm -rf /var/lib/apt/lists/lock
rm -rf /var/cache/apt/archives/lock
rm -rf /var/lib/dpkg/lock*
dpkg --configure -a
apt update
- name: Deploy chrony for make sure time on all node is same
apt:
pkg:
- chrony
- name: restart chronyd service for timesync
systemd:
state: restarted
daemon_reload: yes
name: chronyd
enabled: yes
- name: set timezone to Asia/Shanghai
shell: |
timedatectl set-timezone Asia/Shanghai
- name: Install and configure containerd
apt:
pkg:
- containerd
state: present
- name: Configure containerd for Kubernetes
shell: |
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sed -i 's|sandbox_image = ".*"|sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8"|' /etc/containerd/config.toml
- name: restart containerd service
systemd:
state: restarted
daemon_reload: yes
name: containerd
enabled: yes
- name: disable swap on /etc/fstab
lineinfile:
path: /etc/fstab
regexp: '.*swap.*'
state: absent
- name: disable swap runtime
shell: swapoff -a
- name: configure iptables module
lineinfile:
path: /etc/modules-load.d/k8s.conf
line: br_netfilter
state: present
create: true
- name: configure iptables bridge
lineinfile:
path: /etc/sysctl/k8s.conf
line: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
create: true
- name: apply sysctl
shell: |
modprobe br_netfilter
sysctl --system
- name: Add Kubernetes GPG key
apt_key:
url: https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/deb/Release.key
state: present
- name: Add Kubernetes repository
apt_repository:
repo: "deb https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.32/deb/ /"
state: present
filename: kubernetes
- name: Update apt cache
apt:
update_cache: yes
- name: clean apt lock
shell: |
killall apt apt-get
rm -rf /var/lib/apt/lists/lock
rm -rf /var/cache/apt/archives/lock
rm -rf /var/lib/dpkg/lock*
dpkg --configure -a
apt update
- name: install kubeadm kubectl kubelet
package:
name:
- kubeadm=1.32.0-1.1
- kubelet=1.32.0-1.1
- kubectl=1.32.0-1.1
- sshpass
state: present
allow_downgrades: yes
- name: clean apt lock
shell: |
killall apt apt-get
rm -rf /var/lib/apt/lists/lock
rm -rf /var/cache/apt/archives/lock
rm -rf /var/lib/dpkg/lock*
dpkg --configure
apt update
- name: configure crictl to use containerd
shell: crictl config runtime-endpoint unix:///run/containerd/containerd.sock
- name: creating kubeadm.yaml
shell: kubeadm config print init-defaults > kubeadm.yaml
when: "'master' in group_names"
- name: modify api server address
shell: sed -i '/.*advertiseAddress.*/d' kubeadm.yaml
when: "'master' in group_names"
- name: modify cluster name
lineinfile:
path: kubeadm.yaml
regexp: '.*name.*'
line: ' name: ccka-master'
state: present
when: "'master' in group_names"
- name: modify image repository
lineinfile:
path: kubeadm.yaml
regexp: 'imageRepo.*'
line: 'imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers'
state: present
when: "'master' in group_names"
- name: modify crisock to containerd
lineinfile:
path: kubeadm.yaml
regexp: ' criSocket.*'
line: ' criSocket: unix:///run/containerd/containerd.sock'
state: present
when: "'master' in group_names"
- name: restart containerd and kubelet service
systemd:
state: restarted
daemon_reload: yes
name: "{{ item }}"
enabled: yes
loop:
- containerd
- kubelet
- name: Deploy kubernetes on Master node
shell: kubeadm init --config kubeadm.yaml | tee /root/installdetails.log
when: "'master' in group_names"
- name: pause 30s after cluster init
shell: sleep 30s
when: "'master' in group_names"
- name: Create local kubeconfig directory
file:
path: /root/.kube
state: directory
mode: '0700'
when: "'master' in group_names"
- name: Copy admin config to local
copy:
src: /etc/kubernetes/admin.conf
dest: /root/.kube/config
remote_src: yes
owner: root
group: root
mode: '0600'
when: "'master' in group_names"
- name: Create .kube directory on workers
file:
path: /root/.kube
state: directory
mode: '0700'
delegate_to: "{{ item }}"
loop:
- ccka-worker1
- ccka-worker2
when: "'master' in group_names"
- name: Copy admin config to workers
copy:
src: /etc/kubernetes/admin.conf
dest: /root/.kube/config
owner: root
group: root
mode: '0600'
delegate_to: "{{ item }}"
loop:
- ccka-worker1
- ccka-worker2
when: "'master' in group_names"
- name: Download calico.yaml
get_url:
url: https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml
dest: /root/calico.yaml
mode: '0644'
when: "'master' in group_names"
- name: Modify calico.yaml to use domestic image registry
replace:
path: /root/calico.yaml
regexp: 'docker.io/calico/'
replace: 'registry.cn-hangzhou.aliyuncs.com/calico/'
when: "'master' in group_names"
- name: Deploy Calico
shell: |
kubectl apply -f /root/calico.yaml
sleep 30
when: "'master' in group_names"
- name: join workers with containerd
shell: |
sleep 30
join_command=$(sshpass -p 1 ssh -o StrictHostKeyChecking=no root@ccka-master "kubeadm token create --print-join-command")
echo "$join_command --cri-socket=unix:///run/containerd/containerd.sock" | bash
when: "'worker' in group_names"
- name: assign worker role label to workers
shell: |
sleep 30
kubectl label nodes ccka-worker2 ccka-worker1 node-role.kubernetes.io/worker=
when: "'master' in group_names"
EOF
cp /etc/ansible/ansible.cfg /root/ansible.cfg
if [ $? -ne 0 ];then
echo please review the output on screen and fix error before re-run && exit;
fi
sed -i '/^# command_warnings.*/a\command_warnings = False' /root/ansible.cfg
if [ $? -ne 0 ];then
exit;
fi
echo
echo 'Deploy K8S Cluster now'
ansible-playbook create-k8s.yaml
if [ $? -ne 0 ];then
exit;
fi
#rm -rf create-k8s.yaml /root/ansible.cfg /root/kubeadm.yaml /root/create-k8s-cluster.sh /root/installdetails.log
kubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm
source /etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubeadm
echo
echo "Please wait one minute for nodes ready"
echo
sleep 1m
kubectl get pod -A
echo
kubectl get nodes
echo
echo
#!/bin/bash
set -ueo pipefail
echo
echo
echo -n Have you done the above? yes or no:
read input
case $input in
yes)
echo
echo now starting deploy
;;
no)
echo please correct it && exit 1
;;
*)
echo please input yes or no
exit 1
;;
esac