many changes
This commit is contained in:
parent
d4cb32b9c3
commit
dccf8ca00e
@ -10,31 +10,32 @@
|
|||||||
vars:
|
vars:
|
||||||
# TODO: import hosts from a specific host group of the inventory instead of listing them here again
|
# TODO: import hosts from a specific host group of the inventory instead of listing them here again
|
||||||
kubernetes:
|
kubernetes:
|
||||||
# - hostname: podman-1
|
|
||||||
# ipv6: 2001:470:6d:22c:23::42
|
|
||||||
# mac_address: 52:54:00:b2:52:86
|
|
||||||
# memory: 8192
|
|
||||||
- hostname: k8s-master
|
- hostname: k8s-master
|
||||||
|
hostname_full: k8s-master.e2m
|
||||||
ipv6: 2001:470:6d:22c:42::1
|
ipv6: 2001:470:6d:22c:42::1
|
||||||
mac_address: 52:54:00:b2:52:86
|
mac_address: 52:54:00:b2:52:86
|
||||||
memory: 5072
|
memory: 5072
|
||||||
vcpu: 2
|
vcpu: 2
|
||||||
- hostname: k8s-node-1
|
- hostname: k8s-node-1
|
||||||
|
hostname_full: k8s-node-1.e2m
|
||||||
ipv6: 2001:470:6d:22c:42::10
|
ipv6: 2001:470:6d:22c:42::10
|
||||||
mac_address: 52:54:00:52:b2:b2
|
mac_address: 52:54:00:52:b2:b2
|
||||||
memory: 5072
|
memory: 5072
|
||||||
vcpu: 2
|
vcpu: 2
|
||||||
- hostname: k8s-node-2
|
- hostname: k8s-node-2
|
||||||
|
hostname_full: k8s-node-2.e2m
|
||||||
ipv6: 2001:470:6d:22c:42::20
|
ipv6: 2001:470:6d:22c:42::20
|
||||||
mac_address: 52:54:00:52:2f:86
|
mac_address: 52:54:00:52:2f:86
|
||||||
memory: 5072
|
memory: 5072
|
||||||
vcpu: 2
|
vcpu: 2
|
||||||
- hostname: k8s-node-3
|
- hostname: k8s-node-3
|
||||||
|
hostname_full: k8s-node-3.e2m
|
||||||
ipv6: 2001:470:6d:22c:42::30
|
ipv6: 2001:470:6d:22c:42::30
|
||||||
mac_address: 52:54:00:52:2f:b2
|
mac_address: 52:54:00:52:2f:b2
|
||||||
memory: 5072
|
memory: 5072
|
||||||
vcpu: 2
|
vcpu: 2
|
||||||
- hostname: k8s-node-4
|
- hostname: k8s-node-4
|
||||||
|
hostname_full: k8s-node-4.e2m
|
||||||
ipv6: 2001:470:6d:22c:42::40
|
ipv6: 2001:470:6d:22c:42::40
|
||||||
mac_address: 52:54:00:b2:2f:86
|
mac_address: 52:54:00:b2:2f:86
|
||||||
memory: 5072
|
memory: 5072
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
---
|
---
|
||||||
- hosts:
|
- hosts:
|
||||||
- k8s-masters
|
- k8s
|
||||||
- k8s-nodes
|
|
||||||
remote_user: hybris
|
remote_user: hybris
|
||||||
become: yes
|
become: yes
|
||||||
|
|
||||||
roles:
|
roles:
|
||||||
- dns
|
- dns
|
||||||
- proxy
|
- proxy
|
||||||
- kubernetes
|
- kubernetes-base
|
||||||
|
@ -1,37 +1,6 @@
|
|||||||
---
|
---
|
||||||
- hosts: k8s-masters
|
- hosts: k8s_masters
|
||||||
remote_user: hybris
|
remote_user: hybris
|
||||||
become: yes
|
become: yes
|
||||||
|
roles:
|
||||||
tasks:
|
- kubernetes-master
|
||||||
- name: initialize kubernetes master
|
|
||||||
shell: kubeadm init --apiserver-advertise-address=2001:470:6d:22c:42::1 --pod-network-cidr=2001:470:6d:22c:42::/80
|
|
||||||
delegate_to: k8s-master.e2m
|
|
||||||
run_once: yes
|
|
||||||
|
|
||||||
- shell: kubeadm token create --print-join-command
|
|
||||||
register: results
|
|
||||||
|
|
||||||
- debug:
|
|
||||||
var: results.stdout
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
k8s_token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
|
|
||||||
vars:
|
|
||||||
regexp: '([^\s]+\s){4}([^\s]+)'
|
|
||||||
|
|
||||||
- debug:
|
|
||||||
var: k8s_token
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
k8s_ca_cert_hash: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
|
|
||||||
vars:
|
|
||||||
regexp: '([^\s]+\s){6}([^\s]+)'
|
|
||||||
|
|
||||||
- debug:
|
|
||||||
var: k8s_ca_cert_hash
|
|
||||||
|
|
||||||
- name: join nodes to cluster
|
|
||||||
shell: kubeadm join [2001:470:6d:22c:42::1]:6443 --token {{ k8s_token }} --discovery-token-ca-cert-hash sha256:{{ k8s_ca_cert_hash }}
|
|
||||||
delegate_to: "{{ item }}"
|
|
||||||
with_items: "{{ k8s-nodes }}"
|
|
||||||
|
6
4-kubeadm-nodes.yml
Normal file
6
4-kubeadm-nodes.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- hosts: k8s_nodes
|
||||||
|
remote_user: hybris
|
||||||
|
become: yes
|
||||||
|
roles:
|
||||||
|
- kubernetes-node
|
@ -8,6 +8,7 @@ all:
|
|||||||
network_domain: e2m
|
network_domain: e2m
|
||||||
network_default_gw: 2001:470:6d:22c::1
|
network_default_gw: 2001:470:6d:22c::1
|
||||||
network_dns: 2001:470:6d:22c::1
|
network_dns: 2001:470:6d:22c::1
|
||||||
|
network_cidr_pods: 2001:470:6d:22c:42::/80
|
||||||
|
|
||||||
# ansible user vars
|
# ansible user vars
|
||||||
remote_user_ssh_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXh0iMMtbTEhRSgIbgCunAuE+Q1iKycRVRBYWDCQ45xE8nHsg7K98fdv6LqQwkTqlojEHKp2/TPawk8V6w3MTgOHXi/WO9FyvhMjUcfdxTvny1z3Pj1j0NglKjyQ3t30snwOBYJiC2qDDtvRYNJs+Re/aSdVbygOHMf0UqCyP8DL7Tcj5sNPgnleEC7jE+YNbyDJo2KtNFXy3+AMbwa742D5Ygi5FqaBO090gVQfb3hbnZLw9CNSK7vjo7k5hJo8d1kcPDth1Vo12IDejL8Al3ArfGSy5IqYXs8Stj4YVRW+78AOpQZiInCaFybqjsGG+5tezrXHrxhjJPHuSCuXk9vxkcUk7aC5eSDqoCP7RTB+iQCPJ1ZvkVbaebExZGgHpD9t2Xo2bd/3BRagIxPTzKo9q4+qCCClhWR4iUSOr/YKAZBFDdoVWg2IMqbUHzvHX7TE7I8hbAdg6CpgwBoJm8OWmfjkDU6aYnF8z9l36VV7xjvvuYbOrhpCJ9LGFSfmG3wswo8OH4XyRSHd3y7qrSYIGujtymrzozXo2MrMU3Rj2bW5MIEZSxleIVje4PHWOfZwwn3hwvqM/nyknzfNwPCidTsP3Q2FwfBtZYo1cIXH41bzjs4Bxb8+KJqwTcerzB1pT4qsoPnoYW0fnZE7NHVNhfNx53LdctclFerHTLMw== hybris@sparks
|
remote_user_ssh_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXh0iMMtbTEhRSgIbgCunAuE+Q1iKycRVRBYWDCQ45xE8nHsg7K98fdv6LqQwkTqlojEHKp2/TPawk8V6w3MTgOHXi/WO9FyvhMjUcfdxTvny1z3Pj1j0NglKjyQ3t30snwOBYJiC2qDDtvRYNJs+Re/aSdVbygOHMf0UqCyP8DL7Tcj5sNPgnleEC7jE+YNbyDJo2KtNFXy3+AMbwa742D5Ygi5FqaBO090gVQfb3hbnZLw9CNSK7vjo7k5hJo8d1kcPDth1Vo12IDejL8Al3ArfGSy5IqYXs8Stj4YVRW+78AOpQZiInCaFybqjsGG+5tezrXHrxhjJPHuSCuXk9vxkcUk7aC5eSDqoCP7RTB+iQCPJ1ZvkVbaebExZGgHpD9t2Xo2bd/3BRagIxPTzKo9q4+qCCClhWR4iUSOr/YKAZBFDdoVWg2IMqbUHzvHX7TE7I8hbAdg6CpgwBoJm8OWmfjkDU6aYnF8z9l36VV7xjvvuYbOrhpCJ9LGFSfmG3wswo8OH4XyRSHd3y7qrSYIGujtymrzozXo2MrMU3Rj2bW5MIEZSxleIVje4PHWOfZwwn3hwvqM/nyknzfNwPCidTsP3Q2FwfBtZYo1cIXH41bzjs4Bxb8+KJqwTcerzB1pT4qsoPnoYW0fnZE7NHVNhfNx53LdctclFerHTLMw== hybris@sparks
|
||||||
@ -15,16 +16,15 @@ all:
|
|||||||
## does this make sense??
|
## does this make sense??
|
||||||
source_cloud_image_name: CentOS-7-x86_64-GenericCloud.qcow2
|
source_cloud_image_name: CentOS-7-x86_64-GenericCloud.qcow2
|
||||||
source_cloud_image_url: "https://cloud.centos.org/centos/7/images/{{ source_cloud_image_name }}"
|
source_cloud_image_url: "https://cloud.centos.org/centos/7/images/{{ source_cloud_image_name }}"
|
||||||
|
|
||||||
children:
|
children:
|
||||||
k8s:
|
k8s:
|
||||||
children:
|
children:
|
||||||
k8s-masters:
|
k8s_masters:
|
||||||
hosts:
|
hosts:
|
||||||
k8s-master.e2m:
|
k8s-master.e2m:
|
||||||
host_ipv6: 2001:470:6d:22c:42::1
|
host_ipv6: 2001:470:6d:22c:42::1
|
||||||
hostname: k8s-master.e2m
|
hostname: k8s-master.e2m
|
||||||
k8s-nodes:
|
k8s_nodes:
|
||||||
hosts:
|
hosts:
|
||||||
k8s-node-1.e2m:
|
k8s-node-1.e2m:
|
||||||
host_hostname: k8s-node-1.e2m
|
host_hostname: k8s-node-1.e2m
|
||||||
|
98
notes
Normal file
98
notes
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
# kubeadm config images pull
|
||||||
|
|
||||||
|
# kubeadm init phase preflight
|
||||||
|
|
||||||
|
# kubeadm init phase certs all --apiserver-advertise-address 2001:470:6d:22c:42::1 --apiserver-cert-extra-sans k8s-master.e2m
|
||||||
|
|
||||||
|
# kubeadm init phase kubeconfig all --apiserver-advertise-address 2001:470:6d:22c:42::1 --node-name k8s-master.e2m
|
||||||
|
|
||||||
|
# kubeadm init phase kubelet-start
|
||||||
|
|
||||||
|
# echo "KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=kubenet --pod-infra-container-image=k8s.gcr.io/pause:3.1"" > /var/lib/kubelet/kubeadm-flags.env
|
||||||
|
|
||||||
|
# kubeadm init phase control-plane all
|
||||||
|
|
||||||
|
# kubeadm init phase upload-certs --config=kubeadm-master-config.cfg --upload-certs
|
||||||
|
|
||||||
|
# kubeadm init phase mark-control-plane --config kubeadm-master-config.cfg
|
||||||
|
|
||||||
|
# kubeadm init phase bootstrap-token --config kubeadm-master-config.cfg
|
||||||
|
|
||||||
|
# kubeadm init phase upload-config all --config kubeadm-master-config.cfg
|
||||||
|
|
||||||
|
# kubeadm init phase addon all --config kubeadm-master-config.cfg --apiserver-advertise-address 2001:470:6d:22c:42::1 --pod-network-cidr 2001:470:6d:22c:42::/80 --service-cidr 2001:470:6d:22c:49::/80
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
08:14 <hybris> weil wenn wir --network-plugin=kubenet nutzen moechten muessen wir mit kubeadm die einzelnen phasen einzeln ausfuehren
|
||||||
|
08:14 <hybris> https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-kubelet-start
|
||||||
|
08:15 <hybris> die 'phase kubelet-start' erstellt dir dann die file /var/lib/kubelet/kubeadm-flags.env wo per default '--network-plugin=cni' drinsteht
|
||||||
|
20:34 <mue> wennde das mal auch noch in ne wikiseite packst, waer super, uebermorgen hab ich das iweder vergessen
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
mkdir -p $HOME/.kube
|
||||||
|
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||||
|
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/etc/cni/net.d/10-bridge-v6.conf
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"cniVersion": "0.3.0",
|
||||||
|
"name": "mynet",
|
||||||
|
"type": "bridge",
|
||||||
|
"bridge": "cbr0",
|
||||||
|
"isDefaultGateway": true,
|
||||||
|
"ipMasq": true,
|
||||||
|
"hairpinMode": true,
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"ranges": [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"subnet": "2001:470:6d:22c:42::/80",
|
||||||
|
"gateway": "2001:470:6d:22c::1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
ansible snippet
|
||||||
|
```yaml
|
||||||
|
- name: create cni config directory
|
||||||
|
file:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
state: directory
|
||||||
|
- name:
|
||||||
|
template:
|
||||||
|
src: ../files/kubenet-config.json.conf
|
||||||
|
dest: /etc/cni/net.d/10-bridge-v6.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
~/kubeadm_v6.cfg
|
||||||
|
```
|
||||||
|
apiVersion: kubeadm.k8s.io/v1beta1
|
||||||
|
kind: ClusterConfiguration
|
||||||
|
kubernetesVersion: v1.15.1
|
||||||
|
apiServer:
|
||||||
|
extraArgs:
|
||||||
|
advertise-address: 2001:470:6d:22c:42::1
|
||||||
|
anonymous-auth: "false"
|
||||||
|
enable-admission-plugins: AlwaysPullImages,DefaultStorageClass
|
||||||
|
controllerManager:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: ::1
|
||||||
|
```
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/var/lib/kubelet/kubeadm-flags.env
|
||||||
|
```
|
||||||
|
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=kubenet --pod-infra-container-image=k8s.gcr.io/pause:3.1"
|
||||||
|
```
|
||||||
|
|
20
roles/kubernetes-base/files/kubenet-config.json.conf
Normal file
20
roles/kubernetes-base/files/kubenet-config.json.conf
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"cniVersion": "0.3.0",
|
||||||
|
"name": "mynet",
|
||||||
|
"type": "bridge",
|
||||||
|
"bridge": "cbr0",
|
||||||
|
"isDefaultGateway": true,
|
||||||
|
"ipMasq": true,
|
||||||
|
"hairpinMode": true,
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"ranges": [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"subnet": "2001:470:6d:22c:42::/80",
|
||||||
|
"gateway": "2001:470:6d:22c::1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
@ -14,29 +14,19 @@
|
|||||||
enabled: yes
|
enabled: yes
|
||||||
state: started
|
state: started
|
||||||
|
|
||||||
- name: enable port 6443/tcp
|
- name: open ports
|
||||||
firewalld:
|
firewalld:
|
||||||
port: 6443/tcp
|
port: "{{ item }}"
|
||||||
permanent: yes
|
|
||||||
state: enabled
|
|
||||||
|
|
||||||
- name: enable port 10250/tcp
|
|
||||||
firewalld:
|
|
||||||
port: 10250/tcp
|
|
||||||
permanent: yes
|
|
||||||
state: enabled
|
|
||||||
|
|
||||||
- name: enable port 6443/udp
|
|
||||||
firewalld:
|
|
||||||
port: 6443/udp
|
|
||||||
permanent: yes
|
|
||||||
state: enabled
|
|
||||||
|
|
||||||
- name: enable port 10250/udp
|
|
||||||
firewalld:
|
|
||||||
port: 10250/udp
|
|
||||||
permanent: yes
|
permanent: yes
|
||||||
state: enabled
|
state: enabled
|
||||||
|
with_items:
|
||||||
|
- 6443/tcp
|
||||||
|
- 2379/tcp
|
||||||
|
- 2380/tcp
|
||||||
|
- 10250/tcp
|
||||||
|
- 10251/tcp
|
||||||
|
- 10252/tcp
|
||||||
|
- 10255/tcp
|
||||||
|
|
||||||
## reload firewalld after setting rules
|
## reload firewalld after setting rules
|
||||||
|
|
@ -1,18 +1,25 @@
|
|||||||
---
|
---
|
||||||
- name: clear yum package cache
|
|
||||||
shell: "{{ item }}"
|
- name: reboot
|
||||||
args:
|
reboot:
|
||||||
warn: false
|
|
||||||
with_items:
|
# - name: clear yum package cache
|
||||||
- yum clean all
|
# shell: "{{ item }}"
|
||||||
- yum makecache
|
# args:
|
||||||
|
# warn: false
|
||||||
|
# with_items:
|
||||||
|
# # - yum clean all
|
||||||
|
# # - yum-config-manager --enable updates
|
||||||
|
|
||||||
- name: upgrade all packages
|
- name: upgrade all packages
|
||||||
yum:
|
yum:
|
||||||
name: '*'
|
name: '*'
|
||||||
state: latest
|
state: latest
|
||||||
|
|
||||||
- name: permanently disable selinux
|
- name: temporarily disable SELinux
|
||||||
|
shell: setenforce 0
|
||||||
|
|
||||||
|
- name: permanently disable SELinux
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/sysconfig/selinux
|
dest: /etc/sysconfig/selinux
|
||||||
regexp: "^SELINUX="
|
regexp: "^SELINUX="
|
||||||
@ -63,39 +70,29 @@
|
|||||||
src: ../files/kubernetes.repo
|
src: ../files/kubernetes.repo
|
||||||
dest: /etc/yum.repos.d/kubernetes.repo
|
dest: /etc/yum.repos.d/kubernetes.repo
|
||||||
|
|
||||||
- name: create cni config directory
|
|
||||||
file:
|
|
||||||
path: /etc/cni/net.d
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: copy cni config
|
|
||||||
template:
|
|
||||||
src: ../files/kube-router-cni.conf.j2
|
|
||||||
dest: /etc/cni/net.d/10-kuberouter.config
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
- name: clear yum package cache
|
- name: clear yum package cache
|
||||||
shell: yum clean all
|
shell: yum clean all
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
|
|
||||||
- name: install packages
|
- name: install dependencies
|
||||||
package:
|
package:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: installed
|
state: installed
|
||||||
with_items:
|
with_items:
|
||||||
- yum-utils
|
|
||||||
- device-mapper-persistent-data
|
- device-mapper-persistent-data
|
||||||
- lvm2
|
|
||||||
- kubernetes-cni
|
|
||||||
|
|
||||||
- name: installing kubeadm, kubelet and kubectl
|
- name: install lvm2
|
||||||
|
shell: yum install -y lvm2
|
||||||
|
args:
|
||||||
|
warn: false
|
||||||
|
|
||||||
|
- name: installing kubernetes binaries (kubeadm, kubelet and kubectl)
|
||||||
shell: "{{ item }}"
|
shell: "{{ item }}"
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
with_items:
|
with_items:
|
||||||
- yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
|
- yum install -y kubelet kubeadm kubectl
|
||||||
- systemctl enable --now kubelet
|
- systemctl enable --now kubelet
|
||||||
|
|
||||||
- name: add docker-ce repo
|
- name: add docker-ce repo
|
||||||
@ -116,21 +113,18 @@
|
|||||||
src: ../files/docker-daemon.json
|
src: ../files/docker-daemon.json
|
||||||
dest: /etc/docker/daemon.json
|
dest: /etc/docker/daemon.json
|
||||||
|
|
||||||
- name: create systemd unit directory for kubelet
|
- name: touch /etc/default/kubelet
|
||||||
file:
|
file:
|
||||||
path: /etc/systemd/system/kubelet.service.d/
|
path: /etc/default/kubelet
|
||||||
state: directory
|
state: touch
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
|
||||||
- name: deploy kubelet service file
|
- name: set kubelet extra args
|
||||||
copy:
|
|
||||||
src: ../files/systemd-unit-kubelet.conf
|
|
||||||
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
|
||||||
|
|
||||||
- name: set cgroup
|
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
dest: /etc/default/kubelet
|
||||||
regexp: "^cgroup-driver="
|
regexp: "^KUBELET_EXTRA_ARGS="
|
||||||
line: "cgroup-driver=cgroupfs"
|
line: "KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip={{ host_ipv6 }}"
|
||||||
|
|
||||||
- name: restart services
|
- name: restart services
|
||||||
systemd:
|
systemd:
|
||||||
@ -145,5 +139,8 @@
|
|||||||
- name: open firewall ports
|
- name: open firewall ports
|
||||||
import_tasks: firewalld.yml
|
import_tasks: firewalld.yml
|
||||||
|
|
||||||
- name: update linux kernel
|
# - name: update linux kernel
|
||||||
import_tasks: upgrade_kernel.yml
|
# import_tasks: upgrade_kernel.yml
|
||||||
|
|
||||||
|
- name: reboot
|
||||||
|
reboot:
|
20
roles/kubernetes-base/templates/kubenet-config.json.j2
Normal file
20
roles/kubernetes-base/templates/kubenet-config.json.j2
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"cniVersion": "0.3.0",
|
||||||
|
"name": "mynet",
|
||||||
|
"type": "bridge",
|
||||||
|
"bridge": "cbr0",
|
||||||
|
"isDefaultGateway": true,
|
||||||
|
"ipMasq": true,
|
||||||
|
"hairpinMode": true,
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"ranges": [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"subnet": "2001:470:6d:22c:42::/80",
|
||||||
|
"gateway": "2001:470:6d:22c::1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
11
roles/kubernetes-master/files/kubeadm-master-config.cfg
Normal file
11
roles/kubernetes-master/files/kubeadm-master-config.cfg
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: kubeadm.k8s.io/v1beta1
|
||||||
|
kind: ClusterConfiguration
|
||||||
|
kubernetesVersion: v1.15.1
|
||||||
|
apiServer:
|
||||||
|
extraArgs:
|
||||||
|
advertise-address: 2001:470:6d:22c:42::1
|
||||||
|
anonymous-auth: "false"
|
||||||
|
enable-admission-plugins: AlwaysPullImages,DefaultStorageClass
|
||||||
|
controllerManager:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: ::1
|
39
roles/kubernetes-master/tasks/main.yml
Normal file
39
roles/kubernetes-master/tasks/main.yml
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: copy master config
|
||||||
|
template:
|
||||||
|
src: ../files/kubeadm-master-config.cfg
|
||||||
|
dest: /root/kubeadm-master-config.cfg
|
||||||
|
|
||||||
|
|
||||||
|
# - name: initialize kubernetes master
|
||||||
|
# # shell: kubeadm init --apiserver-advertise-address=2001:470:6d:22c:42::1 --pod-network-cidr=2001:470:6d:22c:42::/80
|
||||||
|
# # shell: kubeadm init --config=kubeadm-master-config.cfg --pod-network-cidr=2001:470:6d:22c:42::/80
|
||||||
|
# shell: echo test
|
||||||
|
# run_once: yes
|
||||||
|
|
||||||
|
# - name: Generate join command
|
||||||
|
# command: kubeadm token create --print-join-command
|
||||||
|
# register: join_command
|
||||||
|
|
||||||
|
# - name: Copy join command to local file
|
||||||
|
# local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command"
|
||||||
|
|
||||||
|
###
|
||||||
|
### kubectl init -> https://github.com/kubernetes/kubernetes/issues/44665#issuecomment-295216655
|
||||||
|
####
|
||||||
|
|
||||||
|
# - name: Setup kubeconfig
|
||||||
|
# command: "{{ item }}"
|
||||||
|
# with_items:
|
||||||
|
# - sudo cp /etc/kubernetes/admin.conf $HOME/
|
||||||
|
# - sudo chown $(id -u):$(id -g) $HOME/admin.conf
|
||||||
|
# - export KUBECONFIG=$HOME/admin.conf
|
||||||
|
# become: no
|
||||||
|
|
||||||
|
# - name: Setup kubeconfig
|
||||||
|
# command: "{{ item }}"
|
||||||
|
# with_items:
|
||||||
|
# - mkdir -p ~/.kube
|
||||||
|
# - cp -i /etc/kubernetes/admin.conf ~/.kube/config
|
||||||
|
# - chown root:root ~/.kube/config
|
19
roles/kubernetes-node/tasks/main.yml
Normal file
19
roles/kubernetes-node/tasks/main.yml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: restart services
|
||||||
|
systemd:
|
||||||
|
name: kubelet
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: yes
|
||||||
|
|
||||||
|
# - name: Copy the join command to server location
|
||||||
|
# copy: src=join-command dest=/tmp/join-command.sh mode=0777
|
||||||
|
# - name: meh
|
||||||
|
# wait_for_connection:
|
||||||
|
# reboot:
|
||||||
|
|
||||||
|
# - name: Join the node to cluster
|
||||||
|
# command: sh /tmp/join-command.sh
|
||||||
|
# # command: kubeadm join [2001:470:6d:22c:42::1]:6443 --token 7d39kx.nlwuwxi2gx37yjod --discovery-token-ca-cert-hash sha256:bd0e8d215d8b7738f7c5a928484c9589cd4455fc8e292a8308cdd164e40766fd
|
||||||
|
# async: '{{ async_timeout|default(1000) }}'
|
||||||
|
# poll: '{{ async_poll|default(10) }}'
|
@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"name":"e2m",
|
|
||||||
"type":"bridge",
|
|
||||||
"bridge":"kube-bridge",
|
|
||||||
"isDefaultGateway":true,
|
|
||||||
"ipam": {
|
|
||||||
"type":"host-local"
|
|
||||||
}
|
|
||||||
}
|
|
@ -3,9 +3,9 @@
|
|||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/yum.conf
|
dest: /etc/yum.conf
|
||||||
regexp: "^proxy="
|
regexp: "^proxy="
|
||||||
|
insertafter: "^[main]"
|
||||||
line: "proxy=http://[2001:470:6d:22c::1]:3128"
|
line: "proxy=http://[2001:470:6d:22c::1]:3128"
|
||||||
|
|
||||||
|
|
||||||
- name: ensure wgetrc exists
|
- name: ensure wgetrc exists
|
||||||
file:
|
file:
|
||||||
path: /etc/wgetrc
|
path: /etc/wgetrc
|
||||||
@ -49,7 +49,6 @@
|
|||||||
- { regexp: '^http_proxy', line: 'http_proxy=http://[2001:470:6d:22c::1]:3128' }
|
- { regexp: '^http_proxy', line: 'http_proxy=http://[2001:470:6d:22c::1]:3128' }
|
||||||
- { regexp: '^https_proxy', line: 'https_proxy=http://[2001:470:6d:22c::1]:3128' }
|
- { regexp: '^https_proxy', line: 'https_proxy=http://[2001:470:6d:22c::1]:3128' }
|
||||||
|
|
||||||
|
|
||||||
- name: ensure systemd conf folder exists
|
- name: ensure systemd conf folder exists
|
||||||
file:
|
file:
|
||||||
path: /etc/systemd/system.conf.d
|
path: /etc/systemd/system.conf.d
|
||||||
|
@ -1,24 +1,27 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
|
- name: set terraform state directory
|
||||||
|
set_fact:
|
||||||
|
terraform_state_dir: "/home/{{ ansible_ssh_user }}/terraform/"
|
||||||
|
|
||||||
- name: remove files and directories
|
- name: remove files and directories
|
||||||
file:
|
file:
|
||||||
path: "/home/{{ ansible_ssh_user }}/terraform/"
|
path: "{{ terraform_state_dir }}"
|
||||||
state: "{{ item }}"
|
state: "{{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- absent
|
|
||||||
- directory
|
- directory
|
||||||
|
|
||||||
# TODO: make it actually download the image, lol (module:uri)
|
# TODO: make it actually download the image, lol (module:uri)
|
||||||
- name: "download qcow2 cloud image"
|
- name: "download qcow2 cloud image"
|
||||||
copy:
|
copy:
|
||||||
src: "/home/{{ ansible_ssh_user }}/images/{{ source_cloud_image_name }}"
|
src: "/home/{{ ansible_ssh_user }}/images/{{ source_cloud_image_name }}"
|
||||||
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ source_cloud_image_name }}"
|
dest: "{{ terraform_state_dir }}{{ source_cloud_image_name }}"
|
||||||
remote_src: yes
|
remote_src: yes
|
||||||
|
|
||||||
- name: create vm definitions
|
- name: create vm definitions
|
||||||
template:
|
template:
|
||||||
src: "cloud-init.tf.j2"
|
src: "terraform-libvirt-domain.tf.j2"
|
||||||
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.tf"
|
dest: "{{ terraform_state_dir }}{{ item.hostname }}.tf"
|
||||||
owner: "{{ ansible_ssh_user }}"
|
owner: "{{ ansible_ssh_user }}"
|
||||||
group: "{{ ansible_ssh_user }}"
|
group: "{{ ansible_ssh_user }}"
|
||||||
with_items: "{{ kubernetes }}"
|
with_items: "{{ kubernetes }}"
|
||||||
@ -26,7 +29,7 @@
|
|||||||
- name: create cloud-init config
|
- name: create cloud-init config
|
||||||
template:
|
template:
|
||||||
src: "cloud-init.cfg.j2"
|
src: "cloud-init.cfg.j2"
|
||||||
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.cloud_init.cfg"
|
dest: "{{ terraform_state_dir }}{{ item.hostname }}.cloud_init.cfg"
|
||||||
owner: "{{ ansible_ssh_user }}"
|
owner: "{{ ansible_ssh_user }}"
|
||||||
group: "{{ ansible_ssh_user }}"
|
group: "{{ ansible_ssh_user }}"
|
||||||
with_items: "{{ kubernetes }}"
|
with_items: "{{ kubernetes }}"
|
||||||
@ -34,7 +37,7 @@
|
|||||||
- name: create cloud-init network config
|
- name: create cloud-init network config
|
||||||
template:
|
template:
|
||||||
src: "cloud-init-network.cfg.j2"
|
src: "cloud-init-network.cfg.j2"
|
||||||
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.cloud_init_network.cfg"
|
dest: "{{ terraform_state_dir }}{{ item.hostname }}.cloud_init_network.cfg"
|
||||||
owner: "{{ ansible_ssh_user }}"
|
owner: "{{ ansible_ssh_user }}"
|
||||||
group: "{{ ansible_ssh_user }}"
|
group: "{{ ansible_ssh_user }}"
|
||||||
with_items: "{{ kubernetes }}"
|
with_items: "{{ kubernetes }}"
|
||||||
@ -42,16 +45,18 @@
|
|||||||
- name: create libvirt_provider config
|
- name: create libvirt_provider config
|
||||||
template:
|
template:
|
||||||
src: "libvirt_provider.tf.j2"
|
src: "libvirt_provider.tf.j2"
|
||||||
dest: "/home/{{ ansible_ssh_user }}/terraform/libvirt_provider.tf"
|
dest: "{{ terraform_state_dir }}libvirt_provider.tf"
|
||||||
owner: "{{ ansible_ssh_user }}"
|
owner: "{{ ansible_ssh_user }}"
|
||||||
group: "{{ ansible_ssh_user }}"
|
group: "{{ ansible_ssh_user }}"
|
||||||
|
with_items:
|
||||||
|
- "{{ terraform_state_dir }}"
|
||||||
|
|
||||||
- name: initialize terraform
|
- name: initialize terraform
|
||||||
shell: terraform init
|
shell: terraform init
|
||||||
args:
|
args:
|
||||||
chdir: /home/{{ ansible_ssh_user }}/terraform/
|
chdir: "{{ terraform_state_dir }}"
|
||||||
|
|
||||||
- name: terraform deploy
|
# - name: terraform deploy
|
||||||
terraform:
|
# terraform:
|
||||||
project_path: /home/{{ ansible_ssh_user }}/terraform
|
# project_path: /home/{{ ansible_ssh_user }}/terraform
|
||||||
state: present
|
# state: present
|
||||||
|
@ -8,10 +8,11 @@ users:
|
|||||||
write_files:
|
write_files:
|
||||||
- content: |
|
- content: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# inet6_addr=$(ip a s | grep "inet6 2001:470:6d:22c:" | awk '{print substr($2,0)}')
|
sudo hostnamectl set-hostname {{ item.hostname_full }}
|
||||||
sudo hostnamectl set-hostname {{ item.hostname }}
|
|
||||||
#sudo ip -6 addr add {{ item.ipv6 }}/64 dev eth0
|
|
||||||
sudo chown -R {{ ansible_ssh_user }}:{{ ansible_ssh_user }} /home/{{ ansible_ssh_user }}
|
sudo chown -R {{ ansible_ssh_user }}:{{ ansible_ssh_user }} /home/{{ ansible_ssh_user }}
|
||||||
|
#yum install qemu-guest-agent
|
||||||
|
#systemctl start qemu-guest-agent
|
||||||
|
#systemctl enable qemu-guest-agent
|
||||||
path: /home/{{ ansible_ssh_user }}/cloud-init.sh
|
path: /home/{{ ansible_ssh_user }}/cloud-init.sh
|
||||||
owner: {{ ansible_ssh_user }}:{{ ansible_ssh_user }}
|
owner: {{ ansible_ssh_user }}:{{ ansible_ssh_user }}
|
||||||
permissions: '0744'
|
permissions: '0744'
|
||||||
|
@ -46,4 +46,3 @@ data "template_file" "user-data-{{ item.hostname }}" {
|
|||||||
data "template_file" "network-config-{{ item.hostname }}" {
|
data "template_file" "network-config-{{ item.hostname }}" {
|
||||||
template = "${file("${path.module}/{{ item.hostname }}.cloud_init_network.cfg")}"
|
template = "${file("${path.module}/{{ item.hostname }}.cloud_init_network.cfg")}"
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user