many changes

This commit is contained in:
hybris 2019-08-08 21:20:54 +02:00
parent d4cb32b9c3
commit dccf8ca00e
22 changed files with 297 additions and 133 deletions

View File

@ -10,31 +10,32 @@
vars:
# TODO: import hosts from a specific host group of the inventory instead of listing them here again
kubernetes:
# - hostname: podman-1
# ipv6: 2001:470:6d:22c:23::42
# mac_address: 52:54:00:b2:52:86
# memory: 8192
- hostname: k8s-master
hostname_full: k8s-master.e2m
ipv6: 2001:470:6d:22c:42::1
mac_address: 52:54:00:b2:52:86
memory: 5072
vcpu: 2
- hostname: k8s-node-1
hostname_full: k8s-node-1.e2m
ipv6: 2001:470:6d:22c:42::10
mac_address: 52:54:00:52:b2:b2
memory: 5072
vcpu: 2
- hostname: k8s-node-2
hostname_full: k8s-node-2.e2m
ipv6: 2001:470:6d:22c:42::20
mac_address: 52:54:00:52:2f:86
memory: 5072
vcpu: 2
- hostname: k8s-node-3
hostname_full: k8s-node-3.e2m
ipv6: 2001:470:6d:22c:42::30
mac_address: 52:54:00:52:2f:b2
memory: 5072
vcpu: 2
- hostname: k8s-node-4
hostname_full: k8s-node-4.e2m
ipv6: 2001:470:6d:22c:42::40
mac_address: 52:54:00:b2:2f:86
memory: 5072

View File

@ -1,11 +1,10 @@
---
- hosts:
- k8s-masters
- k8s-nodes
- k8s
remote_user: hybris
become: yes
roles:
- dns
- proxy
- kubernetes
- kubernetes-base

View File

@ -1,37 +1,6 @@
---
- hosts: k8s-masters
- hosts: k8s_masters
remote_user: hybris
become: yes
tasks:
- name: initialize kubernetes master
shell: kubeadm init --apiserver-advertise-address=2001:470:6d:22c:42::1 --pod-network-cidr=2001:470:6d:22c:42::/80
delegate_to: k8s-master.e2m
run_once: yes
- shell: kubeadm token create --print-join-command
register: results
- debug:
var: results.stdout
- set_fact:
k8s_token: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){4}([^\s]+)'
- debug:
var: k8s_token
- set_fact:
k8s_ca_cert_hash: "{{ results.stdout | regex_search(regexp, '\\2') | first }}"
vars:
regexp: '([^\s]+\s){6}([^\s]+)'
- debug:
var: k8s_ca_cert_hash
- name: join nodes to cluster
shell: kubeadm join [2001:470:6d:22c:42::1]:6443 --token {{ k8s_token }} --discovery-token-ca-cert-hash sha256:{{ k8s_ca_cert_hash }}
delegate_to: "{{ item }}"
with_items: "{{ k8s-nodes }}"
roles:
- kubernetes-master

6
4-kubeadm-nodes.yml Normal file
View File

@ -0,0 +1,6 @@
---
- hosts: k8s_nodes
remote_user: hybris
become: yes
roles:
- kubernetes-node

View File

@ -8,6 +8,7 @@ all:
network_domain: e2m
network_default_gw: 2001:470:6d:22c::1
network_dns: 2001:470:6d:22c::1
network_cidr_pods: 2001:470:6d:22c:42::/80
# ansible user vars
remote_user_ssh_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXh0iMMtbTEhRSgIbgCunAuE+Q1iKycRVRBYWDCQ45xE8nHsg7K98fdv6LqQwkTqlojEHKp2/TPawk8V6w3MTgOHXi/WO9FyvhMjUcfdxTvny1z3Pj1j0NglKjyQ3t30snwOBYJiC2qDDtvRYNJs+Re/aSdVbygOHMf0UqCyP8DL7Tcj5sNPgnleEC7jE+YNbyDJo2KtNFXy3+AMbwa742D5Ygi5FqaBO090gVQfb3hbnZLw9CNSK7vjo7k5hJo8d1kcPDth1Vo12IDejL8Al3ArfGSy5IqYXs8Stj4YVRW+78AOpQZiInCaFybqjsGG+5tezrXHrxhjJPHuSCuXk9vxkcUk7aC5eSDqoCP7RTB+iQCPJ1ZvkVbaebExZGgHpD9t2Xo2bd/3BRagIxPTzKo9q4+qCCClhWR4iUSOr/YKAZBFDdoVWg2IMqbUHzvHX7TE7I8hbAdg6CpgwBoJm8OWmfjkDU6aYnF8z9l36VV7xjvvuYbOrhpCJ9LGFSfmG3wswo8OH4XyRSHd3y7qrSYIGujtymrzozXo2MrMU3Rj2bW5MIEZSxleIVje4PHWOfZwwn3hwvqM/nyknzfNwPCidTsP3Q2FwfBtZYo1cIXH41bzjs4Bxb8+KJqwTcerzB1pT4qsoPnoYW0fnZE7NHVNhfNx53LdctclFerHTLMw== hybris@sparks
@ -15,16 +16,15 @@ all:
## does this make sense??
source_cloud_image_name: CentOS-7-x86_64-GenericCloud.qcow2
source_cloud_image_url: "https://cloud.centos.org/centos/7/images/{{ source_cloud_image_name }}"
children:
k8s:
children:
k8s-masters:
k8s_masters:
hosts:
k8s-master.e2m:
host_ipv6: 2001:470:6d:22c:42::1
hostname: k8s-master.e2m
k8s-nodes:
k8s_nodes:
hosts:
k8s-node-1.e2m:
host_hostname: k8s-node-1.e2m

98
notes Normal file
View File

@ -0,0 +1,98 @@
# kubeadm config images pull
# kubeadm init phase preflight
# kubeadm init phase certs all --apiserver-advertise-address 2001:470:6d:22c:42::1 --apiserver-cert-extra-sans k8s-master.e2m
# kubeadm init phase kubeconfig all --apiserver-advertise-address 2001:470:6d:22c:42::1 --node-name k8s-master.e2m
# kubeadm init phase kubelet-start
# echo "KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=kubenet --pod-infra-container-image=k8s.gcr.io/pause:3.1"" > /var/lib/kubelet/kubeadm-flags.env
# kubeadm init phase control-plane all
# kubeadm init phase upload-certs --config=kubeadm-master-config.cfg --upload-certs
# kubeadm init phase mark-control-plane --config kubeadm-master-config.cfg
# kubeadm init phase bootstrap-token --config kubeadm-master-config.cfg
# kubeadm init phase upload-config all --config kubeadm-master-config.cfg
# kubeadm init phase addon all --config kubeadm-master-config.cfg --apiserver-advertise-address 2001:470:6d:22c:42::1 --pod-network-cidr 2001:470:6d:22c:42::/80 --service-cidr 2001:470:6d:22c:49::/80
--------------------------------------------------------------------------------------------------------------
08:14 <hybris> weil wenn wir --network-plugin=kubenet nutzen moechten muessen wir mit kubeadm die einzelnen phasen einzeln ausfuehren
08:14 <hybris> https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-kubelet-start
08:15 <hybris> die 'phase kubelet-start' erstellt dir dann die file /var/lib/kubelet/kubeadm-flags.env wo per default '--network-plugin=cni' drinsteht
20:34 <mue> wennde das mal auch noch in ne wikiseite packst, waer super, uebermorgen hab ich das iweder vergessen
--------------------------------------------------------------------------------------------------------------
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
--------------------------------------------------------------------------------------------------------------
/etc/cni/net.d/10-bridge-v6.conf
```json
{
"cniVersion": "0.3.0",
"name": "mynet",
"type": "bridge",
"bridge": "cbr0",
"isDefaultGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "2001:470:6d:22c:42::/80",
"gateway": "2001:470:6d:22c::1"
}
]
]
}
}
```
ansible snippet
```yaml
- name: create cni config directory
file:
path: /etc/cni/net.d
state: directory
- name:
template:
src: ../files/kubenet-config.json.conf
dest: /etc/cni/net.d/10-bridge-v6.conf
```
--------------------------------------------------------------------------------------------------------------
~/kubeadm_v6.cfg
```
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
apiServer:
extraArgs:
advertise-address: 2001:470:6d:22c:42::1
anonymous-auth: "false"
enable-admission-plugins: AlwaysPullImages,DefaultStorageClass
controllerManager:
extraArgs:
bind-address: ::1
```
--------------------------------------------------------------------------------------------------------------
/var/lib/kubelet/kubeadm-flags.env
```
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=kubenet --pod-infra-container-image=k8s.gcr.io/pause:3.1"
```

View File

@ -0,0 +1,20 @@
{
"cniVersion": "0.3.0",
"name": "mynet",
"type": "bridge",
"bridge": "cbr0",
"isDefaultGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "2001:470:6d:22c:42::/80",
"gateway": "2001:470:6d:22c::1"
}
]
]
}
}

View File

@ -14,29 +14,19 @@
enabled: yes
state: started
- name: enable port 6443/tcp
- name: open ports
firewalld:
port: 6443/tcp
permanent: yes
state: enabled
- name: enable port 10250/tcp
firewalld:
port: 10250/tcp
permanent: yes
state: enabled
- name: enable port 6443/udp
firewalld:
port: 6443/udp
permanent: yes
state: enabled
- name: enable port 10250/udp
firewalld:
port: 10250/udp
port: "{{ item }}"
permanent: yes
state: enabled
with_items:
- 6443/tcp
- 2379/tcp
- 2380/tcp
- 10250/tcp
- 10251/tcp
- 10252/tcp
- 10255/tcp
## reload firewalld after setting rules

View File

@ -1,18 +1,25 @@
---
- name: clear yum package cache
shell: "{{ item }}"
args:
warn: false
with_items:
- yum clean all
- yum makecache
- name: reboot
reboot:
# - name: clear yum package cache
# shell: "{{ item }}"
# args:
# warn: false
# with_items:
# # - yum clean all
# # - yum-config-manager --enable updates
- name: upgrade all packages
yum:
name: '*'
state: latest
- name: permanently disable selinux
- name: temporarily disable SELinux
shell: setenforce 0
- name: permanently disable SELinux
lineinfile:
dest: /etc/sysconfig/selinux
regexp: "^SELINUX="
@ -63,39 +70,29 @@
src: ../files/kubernetes.repo
dest: /etc/yum.repos.d/kubernetes.repo
- name: create cni config directory
file:
path: /etc/cni/net.d
state: directory
- name: copy cni config
template:
src: ../files/kube-router-cni.conf.j2
dest: /etc/cni/net.d/10-kuberouter.config
owner: root
group: root
- name: clear yum package cache
shell: yum clean all
args:
warn: false
- name: install packages
- name: install dependencies
package:
name: "{{ item }}"
state: installed
with_items:
- yum-utils
- device-mapper-persistent-data
- lvm2
- kubernetes-cni
- name: installing kubeadm, kubelet and kubectl
- name: install lvm2
shell: yum install -y lvm2
args:
warn: false
- name: installing kubernetes binaries (kubeadm, kubelet and kubectl)
shell: "{{ item }}"
args:
warn: false
with_items:
- yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
- yum install -y kubelet kubeadm kubectl
- systemctl enable --now kubelet
- name: add docker-ce repo
@ -116,21 +113,18 @@
src: ../files/docker-daemon.json
dest: /etc/docker/daemon.json
- name: create systemd unit directory for kubelet
- name: touch /etc/default/kubelet
file:
path: /etc/systemd/system/kubelet.service.d/
state: directory
path: /etc/default/kubelet
state: touch
owner: root
group: root
- name: deploy kubelet service file
copy:
src: ../files/systemd-unit-kubelet.conf
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
- name: set cgroup
- name: set kubelet extra args
lineinfile:
dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
regexp: "^cgroup-driver="
line: "cgroup-driver=cgroupfs"
dest: /etc/default/kubelet
regexp: "^KUBELET_EXTRA_ARGS="
line: "KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip={{ host_ipv6 }}"
- name: restart services
systemd:
@ -145,5 +139,8 @@
- name: open firewall ports
import_tasks: firewalld.yml
- name: update linux kernel
import_tasks: upgrade_kernel.yml
# - name: update linux kernel
# import_tasks: upgrade_kernel.yml
- name: reboot
reboot:

View File

@ -0,0 +1,20 @@
{
"cniVersion": "0.3.0",
"name": "mynet",
"type": "bridge",
"bridge": "cbr0",
"isDefaultGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"ranges": [
[
{
"subnet": "2001:470:6d:22c:42::/80",
"gateway": "2001:470:6d:22c::1"
}
]
]
}
}

View File

@ -0,0 +1,11 @@
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
apiServer:
extraArgs:
advertise-address: 2001:470:6d:22c:42::1
anonymous-auth: "false"
enable-admission-plugins: AlwaysPullImages,DefaultStorageClass
controllerManager:
extraArgs:
bind-address: ::1

View File

@ -0,0 +1,39 @@
---
- name: copy master config
template:
src: ../files/kubeadm-master-config.cfg
dest: /root/kubeadm-master-config.cfg
# - name: initialize kubernetes master
# # shell: kubeadm init --apiserver-advertise-address=2001:470:6d:22c:42::1 --pod-network-cidr=2001:470:6d:22c:42::/80
# # shell: kubeadm init --config=kubeadm-master-config.cfg --pod-network-cidr=2001:470:6d:22c:42::/80
# shell: echo test
# run_once: yes
# - name: Generate join command
# command: kubeadm token create --print-join-command
# register: join_command
# - name: Copy join command to local file
# local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command"
###
### kubectl init -> https://github.com/kubernetes/kubernetes/issues/44665#issuecomment-295216655
####
# - name: Setup kubeconfig
# command: "{{ item }}"
# with_items:
# - sudo cp /etc/kubernetes/admin.conf $HOME/
# - sudo chown $(id -u):$(id -g) $HOME/admin.conf
# - export KUBECONFIG=$HOME/admin.conf
# become: no
# - name: Setup kubeconfig
# command: "{{ item }}"
# with_items:
# - mkdir -p ~/.kube
# - cp -i /etc/kubernetes/admin.conf ~/.kube/config
# - chown root:root ~/.kube/config

View File

@ -0,0 +1,19 @@
---
- name: restart services
systemd:
name: kubelet
state: restarted
daemon_reload: yes
# - name: Copy the join command to server location
# copy: src=join-command dest=/tmp/join-command.sh mode=0777
# - name: meh
# wait_for_connection:
# reboot:
# - name: Join the node to cluster
# command: sh /tmp/join-command.sh
# # command: kubeadm join [2001:470:6d:22c:42::1]:6443 --token 7d39kx.nlwuwxi2gx37yjod --discovery-token-ca-cert-hash sha256:bd0e8d215d8b7738f7c5a928484c9589cd4455fc8e292a8308cdd164e40766fd
# async: '{{ async_timeout|default(1000) }}'
# poll: '{{ async_poll|default(10) }}'

View File

@ -1,9 +0,0 @@
{
"name":"e2m",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam": {
"type":"host-local"
}
}

View File

@ -3,9 +3,9 @@
lineinfile:
dest: /etc/yum.conf
regexp: "^proxy="
insertafter: "^[main]"
line: "proxy=http://[2001:470:6d:22c::1]:3128"
- name: ensure wgetrc exists
file:
path: /etc/wgetrc
@ -49,7 +49,6 @@
- { regexp: '^http_proxy', line: 'http_proxy=http://[2001:470:6d:22c::1]:3128' }
- { regexp: '^https_proxy', line: 'https_proxy=http://[2001:470:6d:22c::1]:3128' }
- name: ensure systemd conf folder exists
file:
path: /etc/systemd/system.conf.d
@ -64,4 +63,4 @@
lineinfile:
dest: /etc/systemd/system.conf.d/10-default-env.conf
regexp: "^DefaultEnvironment=HTTP_PROXY"
line: "DefaultEnvironment=HTTP_PROXY=http://[2001:470:6d:22c::1]:3128"
line: "DefaultEnvironment=HTTP_PROXY=http://[2001:470:6d:22c::1]:3128"

View File

@ -1,24 +1,27 @@
---
- name: set terraform state directory
set_fact:
terraform_state_dir: "/home/{{ ansible_ssh_user }}/terraform/"
- name: remove files and directories
file:
path: "/home/{{ ansible_ssh_user }}/terraform/"
path: "{{ terraform_state_dir }}"
state: "{{ item }}"
with_items:
- absent
- directory
# TODO: make it actually download the image, lol (module:uri)
- name: "download qcow2 cloud image"
copy:
src: "/home/{{ ansible_ssh_user }}/images/{{ source_cloud_image_name }}"
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ source_cloud_image_name }}"
dest: "{{ terraform_state_dir }}{{ source_cloud_image_name }}"
remote_src: yes
- name: create vm definitions
template:
src: "cloud-init.tf.j2"
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.tf"
src: "terraform-libvirt-domain.tf.j2"
dest: "{{ terraform_state_dir }}{{ item.hostname }}.tf"
owner: "{{ ansible_ssh_user }}"
group: "{{ ansible_ssh_user }}"
with_items: "{{ kubernetes }}"
@ -26,7 +29,7 @@
- name: create cloud-init config
template:
src: "cloud-init.cfg.j2"
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.cloud_init.cfg"
dest: "{{ terraform_state_dir }}{{ item.hostname }}.cloud_init.cfg"
owner: "{{ ansible_ssh_user }}"
group: "{{ ansible_ssh_user }}"
with_items: "{{ kubernetes }}"
@ -34,7 +37,7 @@
- name: create cloud-init network config
template:
src: "cloud-init-network.cfg.j2"
dest: "/home/{{ ansible_ssh_user }}/terraform/{{ item.hostname }}.cloud_init_network.cfg"
dest: "{{ terraform_state_dir }}{{ item.hostname }}.cloud_init_network.cfg"
owner: "{{ ansible_ssh_user }}"
group: "{{ ansible_ssh_user }}"
with_items: "{{ kubernetes }}"
@ -42,16 +45,18 @@
- name: create libvirt_provider config
template:
src: "libvirt_provider.tf.j2"
dest: "/home/{{ ansible_ssh_user }}/terraform/libvirt_provider.tf"
dest: "{{ terraform_state_dir }}libvirt_provider.tf"
owner: "{{ ansible_ssh_user }}"
group: "{{ ansible_ssh_user }}"
with_items:
- "{{ terraform_state_dir }}"
- name: initialize terraform
shell: terraform init
args:
chdir: /home/{{ ansible_ssh_user }}/terraform/
chdir: "{{ terraform_state_dir }}"
- name: terraform deploy
terraform:
project_path: /home/{{ ansible_ssh_user }}/terraform
state: present
# - name: terraform deploy
# terraform:
# project_path: /home/{{ ansible_ssh_user }}/terraform
# state: present

View File

@ -8,10 +8,11 @@ users:
write_files:
- content: |
#!/bin/bash
# inet6_addr=$(ip a s | grep "inet6 2001:470:6d:22c:" | awk '{print substr($2,0)}')
sudo hostnamectl set-hostname {{ item.hostname }}
#sudo ip -6 addr add {{ item.ipv6 }}/64 dev eth0
sudo hostnamectl set-hostname {{ item.hostname_full }}
sudo chown -R {{ ansible_ssh_user }}:{{ ansible_ssh_user }} /home/{{ ansible_ssh_user }}
#yum install qemu-guest-agent
#systemctl start qemu-guest-agent
#systemctl enable qemu-guest-agent
path: /home/{{ ansible_ssh_user }}/cloud-init.sh
owner: {{ ansible_ssh_user }}:{{ ansible_ssh_user }}
permissions: '0744'

View File

@ -46,4 +46,3 @@ data "template_file" "user-data-{{ item.hostname }}" {
data "template_file" "network-config-{{ item.hostname }}" {
template = "${file("${path.module}/{{ item.hostname }}.cloud_init_network.cfg")}"
}