Init commit

This commit is contained in:
Nikolai Rodionov 2024-03-28 18:29:07 +01:00
parent 8d7f97bf16
commit 542d3af8da
Signed by: allanger
GPG Key ID: 0AA46A90E25592AD
59 changed files with 1205 additions and 1 deletions

2
.containerignore Normal file
View File

@ -0,0 +1,2 @@
venv
.ansible

3
.gitignore vendored
View File

@ -1,3 +1,4 @@
# ---> Ansible
*.retry
.ansible
venv

5
.sops.yaml Normal file
View File

@ -0,0 +1,5 @@
creation_rules:
- path_regex: .*
key_groups:
- age:
- age1lzythn62c4yug8w2wskckpgyjyja6rreyvgmwl9hj4mjvm0tvq6sl68d4z

27
Containerfile Normal file
View File

@ -0,0 +1,27 @@
FROM python:3.12.2-slim-bullseye AS compile-image
RUN apt-get update
RUN apt-get install -y --no-install-recommends build-essential gcc libffi-dev git vim ssh
RUN mkdir ~/.ssh
COPY ./resources/known_hosts ~/.ssh/known_hosts
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
COPY requirements.yml .
RUN pip install hcloud ansible
RUN ansible-galaxy install -r ./requirements.yml --force
FROM ghcr.io/allanger/dumb-downloader as dudo
ARG SOPS_VERSION=v3.8.1
ENV RUST_LOG=info
RUN mkdir -p /out
RUN dudo -l "https://github.com/getsops/sops/releases/download/{{ version }}/sops-{{ version }}.{{ os }}.{{ arch }}" -d /out/sops -p $SOPS_VERSION
RUN chmod +x /out/sops
FROM python:3.12.2-slim-bullseye AS build-image
COPY --from=compile-image /opt/venv /opt/venv
RUN apt-get update && \
apt-get install age -y
COPY . /src
COPY --from=dudo /out/sops /usr/bin/sops
ENV PATH="/opt/venv/bin:$PATH"
CMD ['ansible']

37
Makefile Normal file
View File

@ -0,0 +1,37 @@
CUSTOMER ?= softplayer
ENV ?= default
venv-create:
python3 -m venv venv
source venv/bin/activate && \
export ANSIBLE_COLLECTIONS_PATH=./.ansible/collections && \
export ANSIBLE_ROLES_PATH=./.ansible/roles && \
python3 -m pip install -r ./requirements.txt && \
python3 -m ansible galaxy install -r ./requirements.yml --force
bootstrap:
source venv/bin/activate && \
export ANSIBLE_HOST_KEY_CHECKING=false && \
export ANSIBLE_COLLECTIONS_PATH=./.ansible/collections && \
export ANSIBLE_ROLES_PATH=./.ansible/roles && \
export SP_CUSTOMER=softplayer && \
export SP_ENV=dev && \
export SP_STATE=present && \
python3 -m ansible playbook ./playbooks/other/ssh-key-gen/playbook.yml && \
python3 -m ansible playbook ./playbooks/providers/hetzner/playbook.yml && \
export ANSIBLE_INVENTORY=/tmp/outputs/inventory.yaml && \
export ANSIBE_PRIVATE_KEY_FILE=/tmp/outputs/ssh_key && \
sleep 8 && \
python3 -m ansible playbook ./playbooks/systems/system-bootstrap/playbook.yml && \
python3 -m ansible playbook ./playbooks/systems/k3s-bootstrap/playbook.yml && \
python3 -m ansible playbook ./playbooks/other/k8s-create-user/playbook.yml
cleanup:
source venv/bin/activate && \
export ANSIBLE_HOST_KEY_CHECKING=false && \
export COLLECTIONS_PATHS=./.ansible/collections && \
export ANSIBLE_ROLES_PATH=./.ansible/roles && \
export SP_CUSTOMER=softplayer && \
export SP_ENV=dev && \
export SP_STATE=absent && \
python3 -m ansible playbook ./playbooks/providers/hetzner/playbook.yml

View File

@ -1,2 +1,17 @@
# softplayer-ansible
## Hetzner
There must be 3 kinds of playbooks:
- Bootstrap
- Maintenance
- Cleanup
Bootstrap playbooks should only be executed when a new environment is created, for example, it should
1. Prepare the cloud infrastructure
2. Install setup nodes
3. Bootstrap a k3s cluster
4. Create an admin k8s config
Cleanup should simply remove the whole cloud infrastructure

3
bin/ssh-to-every-host Executable file
View File

@ -0,0 +1,3 @@
#/usr/bin/env bash
for i in $(yq '.kubernetes.hosts | keys' inventory.yaml -o tsv); do ssh $i; done

View File

@ -0,0 +1,21 @@
api_token: ENC[AES256_GCM,data:espaqpkBv6yn/Jsz6Eiv/xp0NJIiv871AioqeXbZ4epBk9FUsBLSDEIbbak7hN8EL3wLkLDVNZmPF1G8b1UQBg==,iv:CWL6uX1lRz1hisYYWu5K/btWJyIn4OfNpBuylHaY5UI=,tag:GWcqBqNjrwLDL7RfChvM+g==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1lzythn62c4yug8w2wskckpgyjyja6rreyvgmwl9hj4mjvm0tvq6sl68d4z
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpcDNMeGtWeC9PdUFzMmlj
MUxFekZXVkVtREVSVFVnTFJoQzBJN05Bb2c0CnVaUmEwWlk0Z21KbHp5QmxBQnlB
aUxGUjJXNXVXRXMrVFArMDRXOVJFRmsKLS0tICt2SlFtT1o2QTFVYkZoeHpFU3d4
U0FzT1h2V1ZUSVoxcndVL2tlYnNpZDQKrLmIImEyY/2DNsX8xzthSNRL9/BGxB82
eksaJS+98LD7byPoxtBlbqqbxvnj5qr8D7G3wWe0eIfHzj/a1MtKLQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-03-12T13:34:49Z"
mac: ENC[AES256_GCM,data:t3OtDE8hH4hiqfE0CemCm0rQ4psLeMnMcqFtz1qki5gybmAxxFIiF+Y/9NuRZk2h8YPLyLAafWVfUBQ2vrcL4BkfexrSS3ouomibbfK94C5FZ7XIhl0K0grHKQa8oTvL3ErACPbnsKCa59WhGxmyJPi3MbZ9cWnMNRiv/BdYb6Y=,iv:o0cGK/WSwaA7EfkxofJEez3fwt0kkGhJrZR5Ls2sJAs=,tag:I/LFakV5N6SQqdXY7Cuauw==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1

13
group_vars/packages.yaml Normal file
View File

@ -0,0 +1,13 @@
common_packages:
- name: python3-cryptography
state: present
- name: iscsi-initiator-utils
state: present
- name: nfs-utils
state: present
- name: cifs-utils
state: present
common_services:
- name: iscsid
state: started
enabled: true

13
group_vars/sysctl.yaml Normal file
View File

@ -0,0 +1,13 @@
common_sysctl:
- name: fs.inotify.max_user_watches
value: 2099999999
set: true
state: present
- name: fs.inotify.max_user_instances
value: 2099999999
set: true
state: present
- name: fs.inotify.max_queued_events
value: 2099999999
set: true
state: present

11
group_vars/users.yaml Normal file
View File

@ -0,0 +1,11 @@
user_groups:
- name: wheel
state: present
sudo: true
users:
- name: admin
groups:
- wheel
ssh_keys:
- key: "{{ lookup('file', '/tmp/outputs/ssh_key.pub') }}"
state: present

View File

@ -0,0 +1,6 @@
# ---> Ansible
*.retry
venv
.ansible
config

View File

@ -0,0 +1,34 @@
venv: venv-create venv-install-deps venv-install-reqs
run: venv venv-run
check: venv venv-check
venv-create:
python3 -m venv venv
venv-install-deps:
source venv/bin/activate &&\
python3 -m pip install hcloud ansible ansible-lint
venv-install-reqs:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml -p ./.ansible/roles
venv-install-reqs-force:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml --force -p ./.ansible/roles
venv-check:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ROLES_PATH=./.ansible/roles &&\
python3 -m ansible playbook playbook.yml --check -i ./inventory.yaml
venv-run:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i /tmp/outputs/inventory.yaml
venv-lint:
venv/bin/ansible-lint playbook.yml

View File

@ -0,0 +1,15 @@
# k8s-ansible-playbooks
## Get a config
To get a k8s config, just run the script:
```bash
./bin/get_configs $USERNAME
```
It will create a config file in the repo root dir
```bash
cat config
```

View File

@ -0,0 +1,2 @@
[defaults]
vars_plugins_enabled = host_group_vars,community.sops.sops

View File

@ -0,0 +1,11 @@
- hosts: k8s
name: Create k8s users
remote_user: admin
become: true
roles:
- ansible-create-k8s-user
vars:
kubectl:
version: v1.27.4
arch: amd64
working_dir: /home/admin/.k8s

View File

@ -0,0 +1,5 @@
# ---> Ansible
*.retry
# ---> Python
venv

View File

@ -0,0 +1,5 @@
creation_rules:
- path_regex: .*
key_groups:
- age:
- age1lzythn62c4yug8w2wskckpgyjyja6rreyvgmwl9hj4mjvm0tvq6sl68d4z

View File

@ -0,0 +1,17 @@
Permission is hereby granted, without written agreement and without
license or royalty fees, to use, copy, modify, and distribute this
software and its documentation for any purpose, provided that the
above copyright notice and the following two paragraphs appear in
all copies of this software.
IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@ -0,0 +1,38 @@
venv: venv-create venv-install-deps
run: venv venv-run
check: venv venv-check
venv-create:
python3 -m venv venv
venv-install-deps:
source venv/bin/activate &&\
python3 -m pip install ansible ansible-lint
venv-check:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ROLES_PATH=./.ansible/roles &&\
python3 -m ansible playbook playbook.yml --check -i ./inventory.yaml
venv-run:
source venv/bin/activate &&\
export SP_CUSTOMER=allanger && \
export SP_ENV=default && \
export SP_STATE=present && \
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i ./inventory.yaml
venv-run-cleanup:
source venv/bin/activate &&\
export SP_CUSTOMER=allanger && \
export SP_ENV=default && \
export SP_STATE=absent && \
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i ./inventory.yaml
venv-lint:
venv/bin/ansible-lint playbook.yml

View File

@ -0,0 +1,22 @@
# hetzner-ansilbe-playbook
Repo for managing the Hetzner infrastructure
## Removing stuff
Since the state of the config is the ansible code itself, you can't just remove something from the code and expect that it's going to be removed from Hetzner.
Each entity has a variable `state`, to remove anything, you need to set state to `absent` and run the playbook. And only after that you can remove it from the code.
Also, please, create a git commit, where on object with the `absent` state is tracked.
## Outputs
After running the role you'll see three variables being logged in the last step:
- Server public IP -> It should be used for `ssh` connection to the server
- Load balancer public IP -> It should be used by k8s as the load balancer IP
- Volume device name -> It's the name of device that should be mounted to Longhorn
## Notes
### Resize the volume
Don't forget to resize the filesystem, it should be done manually
- https://docs.hetzner.com/cloud/volumes/faq/

View File

@ -0,0 +1,20 @@
- name: Generate a SSH key
hosts: localhost
connection: local
vars:
output_dir: /tmp/outputs
tasks:
- name: Create the output directory
ansible.builtin.file:
path: "{{ output_dir }}"
state: directory
mode: '0755'
register: workdir
- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa)
community.crypto.openssh_keypair:
path: "{{ output_dir}}/ssh_key"
type: ed25519
- name: Setting host facts using complex arguments
ansible.builtin.set_fact:
pub_key: "{{ lookup('file', '/tmp/outputs/ssh_key.pub') }}"

View File

@ -0,0 +1,17 @@
Permission is hereby granted, without written agreement and without
license or royalty fees, to use, copy, modify, and distribute this
software and its documentation for any purpose, provided that the
above copyright notice and the following two paragraphs appear in
all copies of this software.
IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@ -0,0 +1,41 @@
venv: venv-create venv-install-deps venv-install-reqs
run: venv venv-run
check: venv venv-check
venv-create:
python3 -m venv venv
venv-install-deps:
source venv/bin/activate &&\
python3 -m pip install hcloud ansible ansible-lint
venv-install-reqs:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml -p ./.ansible/roles
venv-install-reqs-force:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml --force -p ./.ansible/roles
venv-check:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ROLES_PATH=./.ansible/roles &&\
python3 -m ansible playbook playbook.yml --check -i ./inventory.yaml
venv-run:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i ./inventory.yaml
venv-run-cleanup:
source venv/bin/activate &&\
export SP_STATE=absent && \
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i ./inventory.yaml
venv-lint:
venv/bin/ansible-lint playbook.yml

View File

@ -0,0 +1,22 @@
# hetzner-ansilbe-playbook
Repo for managing the Hetzner infrastructure
## Removing stuff
Since the state of the config is the ansible code itself, you can't just remove something from the code and expect that it's going to be removed from Hetzner.
Each entity has a variable `state`, to remove anything, you need to set state to `absent` and run the playbook. And only after that you can remove it from the code.
Also, please, create a git commit, where on object with the `absent` state is tracked.
## Outputs
After running the role you'll see three variables being logged in the last step:
- Server public IP -> It should be used for `ssh` connection to the server
- Load balancer public IP -> It should be used by k8s as the load balancer IP
- Volume device name -> It's the name of device that should be mounted to Longhorn
## Notes
### Resize the volume
Don't forget to resize the filesystem, it should be done manually
- https://docs.hetzner.com/cloud/volumes/faq/

View File

@ -0,0 +1,88 @@
- name: Hetzner playbook
hosts: localhost
connection: local
vars:
output_dir: /tmp/outputs
customer: "{{ lookup('ansible.builtin.env', 'SP_CUSTOMER') }}"
env: "{{ lookup('ansible.builtin.env', 'SP_ENV') }}"
state: "{{ lookup('ansible.builtin.env', 'SP_STATE') }}"
tags:
- cloud
tasks:
- name: Load encrypted credentials
community.sops.load_vars:
file: ../../../group_vars/hetzner.sops.yaml
# --------------------------------------------------------
# -- Prepare SSH keys
# --------------------------------------------------------
- name: Add SSH keys to the hetzner subscription
hetzner.hcloud.hcloud_ssh_key:
api_token: "{{ api_token }}"
name: "{{ customer }}-{{ env }}"
public_key: "{{ lookup('file', '/tmp/outputs/ssh_key.pub') }}"
state: "{{ state }}"
# --------------------------------------------------------
# -- Prepare private networks
# --------------------------------------------------------
- name: Prepare private networks
hetzner.hcloud.hcloud_network:
name: "{{ customer }}-{{ env }}"
ip_range: 10.0.0.0/16
state: "{{ state }}"
api_token: "{{ api_token }}"
# --------------------------------------------------------
# -- Prepare subnets
# --------------------------------------------------------
- name: Prepare subnets
when: state == "present"
hetzner.hcloud.hcloud_subnetwork:
network: "{{ customer }}-{{ env }}"
ip_range: 10.0.0.0/16
state: "{{ state }}"
network_zone: eu-central
type: cloud
api_token: "{{ api_token }}"
# --------------------------------------------------------
# -- Create servers
# --------------------------------------------------------
- name: Create server
hetzner.hcloud.hcloud_server:
name: "{{ customer }}-{{ env }}"
server_type: cx21
image: rocky-9
state: "{{ state }}"
api_token: "{{ api_token }}"
ssh_keys:
- "{{ customer }}-{{ env }}"
firewalls:
- admin
private_networks:
- "{{ customer }}-{{ env }}"
location: hel1
register: server_data
# --------------------------------------------------------
# -- Create floating IPs
# --------------------------------------------------------
- name: Create a floating IP
hetzner.hcloud.hcloud_floating_ip:
name: "{{ customer }}-{{ env }}"
server: "{{ customer }}-{{ env }}"
type: ipv4
state: "{{ state }}"
api_token: "{{ api_token }}"
register: floating_ips_data
- name: Display important output data
block:
- name: Display info about servers public IPs
ansible.builtin.debug:
msg:
- "{{ server_data }}"
when: 'server_data'
- name: Generate new inventory
ansible.builtin.template:
src: templates/inventory.yaml.j2
dest: "{{ output_dir }}/inventory.yaml"
vars:
ipv4_address: "{{ server_data.hcloud_server.ipv4_address }}"
cluster_name: "{{ customer }}-{{ env }}"

View File

@ -0,0 +1,46 @@
servers:
hosts:
{{ ipv4_address }}:
vars:
ansible_ssh_private_key_file: /tmp/outputs/ssh_key
k3s_cluster:
children:
server:
hosts:
{{ ipv4_address }}:
vars:
ansible_port: 22
k3s_version: v1.26.9+k3s1
token: "mytoken" # Use ansible vault if you want to keep it secret
api_endpoint: "{{ ipv4_address }}"
extra_server_args: |-
--write-kubeconfig-mode=644 \
--node-name="k3s-1" \
--tls-san="{{ ipv4_address }}" \
--disable-cloud-controller \
--disable-helm-controller \
--disable metrics-server \
--disable local-storage \
--disable traefik \
--cluster-cidr="10.244.0.0/16" \
--flannel-backend none \
--disable coredns
extra_agent_args: ""
ansible_ssh_private_key_file: /tmp/outputs/ssh_key
k8s:
hosts:
{{ ipv4_address }}:
name: {{ cluster_name }}
kubernetes_server: https://{{ ipv4_address }}:6443
k8s_config_path: /etc/rancher/k3s/k3s.yaml
k8s_cert_path: /var/lib/rancher/k3s/server/tls
k8s_cert_crt_file: client-ca.crt
k8s_cert_key_file: client-ca.key
users:
- cluster: default
host_user: admin
username: admin
download_config: true
vars:
ansible_ssh_private_key_file: /tmp/outputs/ssh_key

View File

@ -0,0 +1 @@
disk_device_name: {{ disk_device_name }}

View File

@ -0,0 +1,5 @@
# ---> Ansible
*.retry
# ---> Python
venv

View File

@ -0,0 +1,17 @@
Permission is hereby granted, without written agreement and without
license or royalty fees, to use, copy, modify, and distribute this
software and its documentation for any purpose, provided that the
above copyright notice and the following two paragraphs appear in
all copies of this software.
IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@ -0,0 +1,33 @@
venv: venv-create venv-install-deps venv-install-reqs
run: venv venv-run
check: venv venv-check
venv-create:
python3 -m venv venv
venv-install-deps:
source venv/bin/activate &&\
python3 -m pip install hcloud ansible ansible-lint
venv-install-reqs:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml -p ./.ansible/roles
venv-install-reqs-force:
source venv/bin/activate &&\
python3 -m ansible galaxy role install -r ./requirements.yml --force -p ./.ansible/roles
venv-check:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ROLES_PATH=./.ansible/roles &&\
python3 -m ansible playbook playbook.yml --check -i /tmp/outputs/inventory.yaml
venv-run:
source venv/bin/activate &&\
export COLLECTIONS_PATHS=./.ansible/collections &&\
export ANSIBLE_ROLES_PATH=$(PWD)/.ansible/roles &&\
python3 -m ansible playbook playbook.yml -i /tmp/outputs/inventory.yaml
venv-lint:
venv/bin/ansible-lint playbook.yml

View File

@ -0,0 +1,22 @@
# systems-ansible-playbooks
Manage servers with ansible
## K3s
I'm not configuring k3s with ansble, it's configured with following flags
curl -sfL https://get.k3s.io | sh -s - server \
--write-kubeconfig-mode=644 \
--node-name="$(hostname -f)" \
--kubelet-arg "allowed-unsafe-sysctls=net.ipv4.ip_forward" \
--tls-san="$(hostname -I | awk '{print $1}')" \
--disable-cloud-controller \
--disable-helm-controller \
--disable metrics-server \
--disable local-storage \
--disable traefik \
--cluster-cidr="10.244.0.0/16" \
--flannel-backend none \
--disable coredns \
--cluster-init \

View File

@ -0,0 +1,2 @@
[defaults]
vars_plugins_enabled = host_group_vars,community.sops.sops

View File

@ -0,0 +1,22 @@
---
- name: Cluster prep
hosts: k3s_cluster
remote_user: admin
gather_facts: true
become: true
roles:
- role: ./.ansible/roles/k3s-ansible/roles/prereq
- name: Setup K3S server
hosts: server
remote_user: admin
become: true
roles:
- role: ./.ansible/roles/k3s-ansible/roles/k3s_server
- name: Setup K3S agent
hosts: agent
remote_user: admin
become: true
roles:
- role: ./.ansible/roles/k3s-ansible/roles/k3s_agent

View File

@ -0,0 +1,32 @@
---
- name: Netplan config
when: ansible_distribution == 'Ubuntu'
block:
- name: Update the netplan config
become: true
ansible.builtin.template:
src: templates/60-floating-ip.j2
dest: /etc/netplan/60-floating-ip.yaml
owner: root
group: root
mode: '0600'
vars:
publicIP: 195.201.250.50
- name: Apply a new config
become: true
ansible.builtin.shell: netplan apply
- name: Ifcfg onfig
when: ansible_distribution == 'Rocky'
block:
- name: Update the ifcfg config
notify: Restart the network service
become: true
ansible.builtin.template:
src: templates/ifcfg-eth0:1.j2
dest: /etc/sysconfig/network-scripts/ifcfg-eth0:1
owner: root
group: root
mode: '0600'
vars:
publicIP: 195.201.249.91

View File

@ -0,0 +1,79 @@
- name: Update ubuntu repos
when: ansible_distribution == 'Ubuntu'
become: true
ansible.builtin.apt:
update_cache: true
- name: Update yum repos
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
ansible.builtin.yum:
update_cache: true
# ---------------------------------------------------------------------
# Upgrade system if it's the first ansible run
# ---------------------------------------------------------------------
- name: Check that the ansible_initialized file exists
become: true
stat:
path: /etc/ansible_initialized
register: stat_result
- name: Upgrade system
when: not stat_result.stat.exists
block:
- name: Upgrade ubuntu repos
when: ansible_distribution == 'Ubuntu'
become: true
ansible.builtin.apt:
upgrade: full
- name: Upgrade all packages
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
ansible.builtin.yum:
name: '*'
state: latest
- name: Reboot to apply upgrades
when: not stat_result.stat.exists
become: true
ansible.builtin.reboot:
- name: Create the ansible_initialized file
become: true
file:
path: /etc/ansible_initialized
state: touch
when: not stat_result.stat.exists
- name: Install host-specific packages
when: packages is defined
become: true
ansible.builtin.package:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ packages }}"
- name: Install common packages
become: true
ansible.builtin.package:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ common_packages }}"
- name: Configure host-specific services
when: services is defined
become: true
ansible.builtin.systemd:
state: "{{ item.state }}"
name: "{{ item.name }}"
enabled: "{{ item.enabled }}"
loop: "{{ services }}"
- name: Configure common services
become: true
ansible.builtin.systemd:
state: "{{ item.state }}"
name: "{{ item.name }}"
enabled: "{{ item.enabled }}"
loop: "{{ common_services }}"

View File

@ -0,0 +1,20 @@
- name: Install host-specific sysctl
when: sysctl is defined
become: true
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
sysctl_set: "{{ item.set }}"
state: "{{ item.state }}"
reload: true
loop: "{{ sysctl }}"
- name: Install common sysctl
become: true
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
sysctl_set: "{{ item.set }}"
state: "{{ item.state }}"
reload: true
loop: "{{ common_sysctl }}"

View File

@ -0,0 +1,33 @@
- name: Prepare global users
block:
- name: Ensure required groups exist
ansible.builtin.group:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ user_groups }}"
- name: Allow passwordless sudo for certain groups
ansible.builtin.lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%wheel'
line: '%wheel ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
when: 'item.sudo'
loop: "{{ user_groups }}"
- name: Create the users user
ansible.builtin.user:
name: "{{ item.name }}"
shell: /bin/bash
groups: "{% for grp in item.groups %}{{ grp }}{% if not loop.last %},{% endif %}{% endfor %}"
append: false
password:
loop: "{{ users }}"
- name: Set authorized keys for user
ansible.posix.authorized_key:
user: "{{ item.0.name }}"
state: "{{ item.1.state }}"
key: "{{ item.1.key }}"
loop: "{{ users | subelements('ssh_keys') }}"

View File

@ -0,0 +1,33 @@
# -----------------------------------
# -- Configure volumes
# -----------------------------------
- name: Configure volumes
when: volumes is defined
block:
- name: Ensure volumes exist
become: true
community.general.parted:
device: "{{ item.device }}"
unit: KiB
loop: "{{ volumes }}"
- name: Create a directory if it does not exist
become: true
ansible.builtin.file:
path: "{{ item.dir }}"
state: directory
mode: '0755'
loop: "{{ volumes }}"
- name: Create a ext4 filesystem
become: true
community.general.filesystem:
fstype: "{{ item.fstype }}"
dev: "{{ item.device }}"
loop: "{{ volumes }}"
- name: Remount volume to another location
become: true
ansible.posix.mount:
path: "{{ item.dir }}"
src: "{{ item.device }}"
state: mounted
fstype: "{{ item.fstype }}"
loop: "{{ volumes }}"

View File

@ -0,0 +1,8 @@
---
network:
version: 2
renderer: networkd
ethernets:
eth0:
addresses:
- {{ publicIP }}/32

View File

@ -0,0 +1,7 @@
BOOTPROTO=static
DEVICE=eth0:1
IPADDR={{ publicIP }}
PREFIX=32
TYPE=Ethernet
USERCTL=no
ONBOOT=yes

View File

@ -0,0 +1,5 @@
# ---> Ansible
*.retry
# ---> Python
venv

View File

@ -0,0 +1,17 @@
Permission is hereby granted, without written agreement and without
license or royalty fees, to use, copy, modify, and distribute this
software and its documentation for any purpose, provided that the
above copyright notice and the following two paragraphs appear in
all copies of this software.
IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@ -0,0 +1,23 @@
check: venv-install-deps venv-install-reqs venv-check
run: venv venv-install-deps venv-install-reqs venv-run
venv:
python3 -m venv venv
venv-install-deps:
source venv/bin/activate &&\
python3 -m pip install hcloud ansible
venv-install-reqs:
source venv/bin/activate &&\
python3 -m ansible galaxy install -r ./requirements.yml
venv-check:
source venv/bin/activate &&\
python3 -m ansible playbook playbook.yml --check -i ./inventory.yaml
venv-run:
source venv/bin/activate &&\
python3 -m ansible playbook playbook.yml -i /tmp/outputs/inventory.yaml

View File

@ -0,0 +1,22 @@
# systems-ansible-playbooks
Manage servers with ansible
## K3s
I'm not configuring k3s with ansble, it's configured with following flags
curl -sfL https://get.k3s.io | sh -s - server \
--write-kubeconfig-mode=644 \
--node-name="$(hostname -f)" \
--kubelet-arg "allowed-unsafe-sysctls=net.ipv4.ip_forward" \
--tls-san="$(hostname -I | awk '{print $1}')" \
--disable-cloud-controller \
--disable-helm-controller \
--disable metrics-server \
--disable local-storage \
--disable traefik \
--cluster-cidr="10.244.0.0/16" \
--flannel-backend none \
--disable coredns \
--cluster-init \

View File

@ -0,0 +1,2 @@
[defaults]
vars_plugins_enabled = host_group_vars,community.sops.sops

View File

@ -0,0 +1,25 @@
---
- name: Prepare users
hosts: servers
remote_user: root
vars_files:
- ../../../group_vars/users.yaml
tags:
- system
tasks:
- name: Prepare Users
ansible.builtin.import_tasks: ./tasks/users.yaml
- name: Prepare systems
hosts: servers
remote_user: admin
vars_files:
- ../../../group_vars/packages.yaml
- ../../../group_vars/sysctl.yaml
tags:
- system
tasks:
- name: Install packages
ansible.builtin.import_tasks: ./tasks/packages.yaml
- name: Setup sysctl
ansible.builtin.import_tasks: ./tasks/sysctl.yaml

View File

@ -0,0 +1,32 @@
---
- name: Netplan config
when: ansible_distribution == 'Ubuntu'
block:
- name: Update the netplan config
become: true
ansible.builtin.template:
src: templates/60-floating-ip.j2
dest: /etc/netplan/60-floating-ip.yaml
owner: root
group: root
mode: '0600'
vars:
publicIP: 195.201.250.50
- name: Apply a new config
become: true
ansible.builtin.shell: netplan apply
- name: Ifcfg onfig
when: ansible_distribution == 'Rocky'
block:
- name: Update the ifcfg config
notify: Restart the network service
become: true
ansible.builtin.template:
src: templates/ifcfg-eth0:1.j2
dest: /etc/sysconfig/network-scripts/ifcfg-eth0:1
owner: root
group: root
mode: '0600'
vars:
publicIP: 195.201.249.91

View File

@ -0,0 +1,95 @@
- name: Update ubuntu repos
when: ansible_distribution == 'Ubuntu'
become: true
ansible.builtin.apt:
update_cache: true
- name: Add Kubernetes repo
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
template:
src: kubernetes.repo.j2
dest: /etc/yum.repos.d/kubernetes.repo
force: true
- name: Add Docker repo
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
template:
src: docker.repo.j2
dest: /etc/yum.repos.d/docker.repo
force: true
- name: Update yum repos
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
ansible.builtin.yum:
update_cache: true
# ---------------------------------------------------------------------
# Upgrade system if it's the first ansible run
# ---------------------------------------------------------------------
- name: Check that the ansible_initialized file exists
become: true
stat:
path: /etc/ansible_initialized
register: stat_result
- name: Upgrade system
when: not stat_result.stat.exists
block:
- name: Upgrade ubuntu repos
when: ansible_distribution == 'Ubuntu'
become: true
ansible.builtin.apt:
upgrade: full
- name: Upgrade all packages
when: ansible_distribution == 'Fedora' or ansible_distribution == 'Rocky'
become: true
ansible.builtin.yum:
name: '*'
state: latest
- name: Reboot to apply upgrades
when: not stat_result.stat.exists
become: true
ansible.builtin.reboot:
- name: Create the ansible_initialized file
become: true
file:
path: /etc/ansible_initialized
state: touch
when: not stat_result.stat.exists
- name: Install host-specific packages
when: packages is defined
become: true
ansible.builtin.package:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ packages }}"
- name: Install common packages
become: true
ansible.builtin.package:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ common_packages }}"
- name: Configure host-specific services
when: services is defined
become: true
ansible.builtin.systemd:
state: "{{ item.state }}"
name: "{{ item.name }}"
enabled: "{{ item.enabled }}"
loop: "{{ services }}"
- name: Configure common services
become: true
ansible.builtin.systemd:
state: "{{ item.state }}"
name: "{{ item.name }}"
enabled: "{{ item.enabled }}"
loop: "{{ common_services }}"

View File

@ -0,0 +1,20 @@
- name: Install host-specific sysctl
when: sysctl is defined
become: true
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
sysctl_set: "{{ item.set }}"
state: "{{ item.state }}"
reload: true
loop: "{{ sysctl }}"
- name: Install common sysctl
become: true
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
sysctl_set: "{{ item.set }}"
state: "{{ item.state }}"
reload: true
loop: "{{ common_sysctl }}"

View File

@ -0,0 +1,33 @@
- name: Prepare global users
block:
- name: Ensure required groups exist
ansible.builtin.group:
name: "{{ item.name }}"
state: "{{ item.state }}"
loop: "{{ user_groups }}"
- name: Allow passwordless sudo for certain groups
ansible.builtin.lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%wheel'
line: '%wheel ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
when: 'item.sudo'
loop: "{{ user_groups }}"
- name: Create the users user
ansible.builtin.user:
name: "{{ item.name }}"
shell: /bin/bash
groups: "{% for grp in item.groups %}{{ grp }}{% if not loop.last %},{% endif %}{% endfor %}"
append: false
password:
loop: "{{ users }}"
- name: Set authorized keys for user
ansible.posix.authorized_key:
user: "{{ item.0.name }}"
state: "{{ item.1.state }}"
key: "{{ item.1.key }}"
loop: "{{ users | subelements('ssh_keys') }}"

View File

@ -0,0 +1,33 @@
# -----------------------------------
# -- Configure volumes
# -----------------------------------
- name: Configure volumes
when: volumes is defined
block:
- name: Ensure volumes exist
become: true
community.general.parted:
device: "{{ item.device }}"
unit: KiB
loop: "{{ volumes }}"
- name: Create a directory if it does not exist
become: true
ansible.builtin.file:
path: "{{ item.dir }}"
state: directory
mode: '0755'
loop: "{{ volumes }}"
- name: Create a ext4 filesystem
become: true
community.general.filesystem:
fstype: "{{ item.fstype }}"
dev: "{{ item.device }}"
loop: "{{ volumes }}"
- name: Remount volume to another location
become: true
ansible.posix.mount:
path: "{{ item.dir }}"
src: "{{ item.device }}"
state: mounted
fstype: "{{ item.fstype }}"
loop: "{{ volumes }}"

View File

@ -0,0 +1,8 @@
---
network:
version: 2
renderer: networkd
ethernets:
eth0:
addresses:
- {{ publicIP }}/32

View File

@ -0,0 +1,6 @@
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg

View File

@ -0,0 +1,7 @@
BOOTPROTO=static
DEVICE=eth0:1
IPADDR={{ publicIP }}
PREFIX=32
TYPE=Ethernet
USERCTL=no
ONBOOT=yes

View File

@ -0,0 +1,6 @@
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key

2
requirements.txt Normal file
View File

@ -0,0 +1,2 @@
hcloud
ansible

15
requirements.yml Normal file
View File

@ -0,0 +1,15 @@
---
collections:
- hetzner.hcloud
- community.sops
- name: ansible.utils
- name: community.general
version: 8.5.0
- name: ansible.posix
- name: kubernetes.core
roles:
- src: git+https://git.badhouseplants.net/allanger/k3s-ansible.git
version: master
- src: git+https://git.badhouseplants.net/allanger/ansible-create-k8s-user.git
version: download-config
- xanmanning.k3s

2
resources/known_hosts Normal file
View File

@ -0,0 +1,2 @@
git.badhouseplants.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTsGvo2KESuvWLf4QEwg+1DdetksEgowGAW04OezLhpA6bqT/6WIx0O6NaQCSN+ZxBRV2a/+ICSGFtU+RFTtbcu30Lcsl/yD9qwbzBqNTUweaWPNze5LrHbIV0/J8nTSt4vzCw3eF0wdvnIQWLhdjQ4qjtV1OWlYJDwo1rHPDJ5JAuYKeLz50iEhs7R6oj2ccws2olGUssG4voi/EHWVxQa3khmSNWdHTNR6H6sdkehKWflGokS5oRf5dIUFj+BbFbq6JxJQXzoiVzOoLdfGz1S6m5i1O0UbDgnoL/my+0ZiswbIa5ApbP+oOjvW5NWF1lzehowNj7Kd1Ow9+FN+2eHSPN+g5rXapBNclQ5/JT5pcSP3vjVuvNoUE/GHhLLFLG3nsPd0GuSB9PDWqxBdnAybOjjldL2cTdDajcdkYbyUGT5HCvdjgpCeJe22JQElvTgKWnICDsnCaIXPwrp1ek5soAjY6ylsOv2KYUnoo+gy7P0+Bzf9JzngfgcN/4x5s8keRQ6YNC9I3NiN1XRdJA52/RGMQfuWjqcqG04yk6AHZZLD3OdBJRjLhXSRn4/DixQ6BxT25mZfZVuyS9pOuDXaI9ZmYtX+xSXAp1XTfIH6t/ud1A+jTKbrCeV5AS6CmlGVfge+9ewDw7RRGd3MW5i5N3N7RwJVMfpcTXVPY2sQ==