Implement dynamic envs (#19)
continuous-integration/drone/push Build is failing Details

Reviewed-on: allanger/badhouseplants-net#19
This commit is contained in:
Nikolai Rodionov 2023-02-25 20:25:16 +00:00
parent c85c686e80
commit 80058b11b1
16 changed files with 725 additions and 89 deletions

View File

@ -1 +1,3 @@
node_modules
static

View File

@ -7,8 +7,6 @@ type: kubernetes
name: Build badhouseplants.net
trigger:
branch:
- main
event:
- push
@ -29,15 +27,10 @@ steps:
commands:
- git submodule update --init --recursive
- name: Get static content
image: rclone/rclone:latest
environment:
RCLONE_CONFIG_CONTENT:
from_secret: RCLONE_CONFIG_CONTENT
RCLONE_CONFIG: /tmp/rclone.conf
- name: Test a build
image: git.badhouseplants.net/badhouseplants/hugo-builder
commands:
- echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG
- rclone copy -P badhouseplants-public:/badhouseplants-static static
- hugo
- name: Build and push the docker image
image: plugins/docker
@ -47,10 +40,63 @@ steps:
password:
from_secret: GITEA_TOKEN
repo: git.badhouseplants.net/allanger/badhouseplants-net
tags: latest
depends_on:
- Init git submodules with themes
- Get static content
tags: ${DRONE_COMMIT_SHA}
- name: Deploy a preview ApplicationSet
image: alpine/k8s:1.24.10
when:
branch:
exclude:
- main
environment:
KUBECONFIG_CONTENT:
from_secret: KUBECONFIG_CONTENT
commands:
- mkdir $HOME/.kube
- echo $KUBECONFIG_CONTENT | base64 -d > $HOME/.kube/config
- apk update --no-cache && apk add yq gettext
- export ARGO_APP_CHART_VERSION=`cat chart/Chart.yaml | yq '.version'`
- export ARGO_APP_BRANCH=$DRONE_BRANCH
- export ARGO_APP_HOSTNAME="${DRONE_BRANCH}-dev.badhouseplants.net"
- export ARGO_APP_IMAGE_TAG=$DRONE_COMMIT_SHA
- kubectl get -f ./kube/applicationset.yaml -o yaml > /tmp/old_appset.yaml
- yq "del(.spec.generators[].list.elements[] | select(.name == \"$ARGO_APP_BRANCH\"))" /tmp/old_appset.yaml > /tmp/clean_appset.yaml
- envsubst < ./kube/template.yaml > /tmp/elements.yaml
- yq '.spec.generators[].list.elements += load("/tmp/elements.yaml")' /tmp/clean_appset.yaml > /tmp/new_appset.yaml
- kubectl apply -f /tmp/new_appset.yaml
- name: Deploy a main ApplicationSet
image: alpine/k8s:1.24.10
when:
branch:
- main
environment:
KUBECONFIG_CONTENT:
from_secret: KUBECONFIG_CONTENT
commands:
- mkdir $HOME/.kube
- echo $KUBECONFIG_CONTENT | base64 -d > $HOME/.kube/config
- apk update --no-cache && apk add yq gettext
- export ARGO_APP_CHART_VERSION=`cat chart/Chart.yaml | yq '.version'`
- export ARGO_APP_BRANCH=$DRONE_BRANCH
- export ARGO_APP_IMAGE_TAG=$DRONE_COMMIT_SHA
- kubectl get -f ./kube/applicationset.yaml -o yaml > /tmp/old_appset.yaml
- yq "del(.spec.generators[].list.elements[] | select(.name == \"$ARGO_APP_BRANCH\"))" /tmp/old_appset.yaml > /tmp/clean_appset1.yaml
- yq "del(.spec.generators[].list.elements[] | select(.commit_sha == \"$ARGO_APP_IMAGE_TAG\"))" /tmp/clean_appset1.yaml > /tmp/clean_appset.yaml
- envsubst < ./kube/template.yaml > /tmp/elements.yaml
- yq '.spec.generators[].list.elements += load("/tmp/elements.yaml")' /tmp/clean_appset.yaml > /tmp/new_appset.yaml
- kubectl apply -f /tmp/new_appset.yaml
- name: Sync application
image: argoproj/argocd
environment:
ARGOCD_SERVER:
from_secret: ARGOCD_SERVER
ARGOCD_AUTH_TOKEN:
from_secret: ARGOCD_AUTH_TOKEN
commands:
- argocd app sync -l app=badhouseplants -l branch=$DRONE_BRANCH
- argocd app wait -l app=badhouseplants -l branch=$DRONE_BRANCH
---
# ----------------------------------------------

View File

@ -1,16 +1,4 @@
FROM alpine:latest AS builder
FROM git.badhouseplants.net/badhouseplants/hugo-builder
WORKDIR /src
COPY . /src
ARG GOHUGO_LINK=https://github.com/gohugoio/hugo/releases/download/v0.110.0/hugo_0.110.0_linux-amd64.tar.gz
RUN apk update && apk add curl tar
RUN curl -LJO ${GOHUGO_LINK} && tar -xf hugo_0.110.0_linux-amd64.tar.gz
RUN mv /src/hugo /usr/local/bin/hugo
RUN chmod +x /usr/local/bin/hugo
RUN hugo
FROM nginx:stable-alpine
COPY --from=builder /src/public /var/www
COPY configs/nginx.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD [ "nginx", "-g", "daemon off;" ]
ENTRYPOINT ["hugo"]

View File

@ -2,5 +2,5 @@ apiVersion: v2
name: badhouseplants-net
description: A Helm chart for Kubernetes
type: application
version: 0.2.0
version: 0.3.7
appVersion: "1.16.0"

View File

@ -24,35 +24,73 @@ spec:
labels:
{{- include "badhouseplants-net.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
initContainers:
- name: {{ .Values.rclone.container.name }}
image: "{{ .Values.rclone.image.repository}}:{{ .Values.rclone.image.tag}}"
imagePullPolicy: {{ .Values.rclone.image.pullPolicy }}
env:
- name: RCLONE_CONFIG
value: /tmp/rclone.conf
command:
- sh
args:
- "-c"
- "rclone copy -P badhouseplants-public:/badhouseplants-static /static"
volumeMounts:
- name: rclone-config
mountPath: "/tmp"
readOnly: true
- name: {{ .Values.volumes.rclone.name }}
mountPath: /static
readOnly: false
resources:
{{- toYaml .Values.rclone.container.resources | nindent 12 }}
- name: {{ .Values.hugo.container.name }}
args:
- --baseURL
- {{ .Values.hugo.baseURL }}
{{- if .Values.hugo.buildDrafts }}
- --buildDrafts
{{- end }}
volumeMounts:
- name: {{ .Values.volumes.rclone.name }}
mountPath: /src/static
readOnly: true
- name: {{ .Values.volumes.public.name }}
mountPath: /src/public
readOnly: false
image: "{{ .Values.hugo.image.repository}}:{{ .Values.hugo.image.tag}}"
imagePullPolicy: {{ .Values.hugo.image.pullPolicy }}
resources:
{{- toYaml .Values.hugo.container.resources | nindent 12 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
- name: {{ .Values.nginx.container.name }}
image: "{{ .Values.nginx.image.repository}}:{{ .Values.nginx.image.tag}}"
imagePullPolicy: {{ .Values.nginx.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
command:
{{ toYaml .Values.command | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- toYaml .Values.nginx.container.resources | nindent 12 }}
volumeMounts:
- name: {{ .Values.volumes.public.name }}
mountPath: /var/www
readOnly: true
- name: nginx-config
mountPath: /etc/nginx/conf.d
readOnly: true
volumes:
- name: rclone-config
secret:
secretName: rclone-config
- name: {{ .Values.volumes.rclone.name }}
emptyDir:
sizeLimit: {{ .Values.volumes.rclone.sizeLimit }}
- name: {{ .Values.volumes.public.name }}
emptyDir:
sizeLimit: {{ .Values.volumes.public.sizeLimit }}
- name: nginx-config
configMap:
name: nginx-config

View File

@ -0,0 +1,6 @@
{{- if .Values.istio.enabled -}}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespace.name }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
data:
default.conf: |
server {
listen 80;
listen [::]:80;
server_name ~.;
root /var/www;
index index.html;
location / {
try_files $uri $uri/ =404;
}
}

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: rclone-config
type: generic
data:
# the data is abbreviated in this example
rclone.conf: {{ .Values.rclone.config | b64enc }}

View File

@ -0,0 +1,21 @@
{{- if .Values.istio.enabled -}}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
annotations:
labels:
name: blog-virtual-service
spec:
gateways:
- istio-system/badhouseplants-net
hosts: {{ .Values.istio.hosts }}
http:
- match:
- uri:
prefix: {{ .Values.istio.prefix }}
route:
- destination:
host: {{ include "badhouseplants-net.fullname" . }}
port:
number: {{ .Values.service.port }}
{{- end }}

View File

@ -1,10 +1,72 @@
replicaCount: 1
image:
repository: git.badhouseplants.net/allanger/badhouseplants-net
pullPolicy: Always
tag: latest
namespace:
enabled: true
name: badhouseplants-application
nginx:
container:
name: nginx
resources: {}
image:
repository: nginx
pullPolicy: Always
tag: latest
rclone:
container:
name: rclone
resources: {}
image:
repository: rclone/rclone
pullPolicy: Always
tag: latest
config: |-
[badhouseplants-public]
type = s3
provider = Minio
region = us-west-1
endpoint = s3.badhouseplants.net
location_constraint = us-west-1
hugo:
container:
name: badhouseplants-net
resources: {}
image:
repository: git.badhouseplants.net/allanger/badhouseplants-net
pullPolicy: Always
tag: latest
baseURL: badhouseplants.net
buildDrafts: false
istio:
enabled: true
hosts:
- badhouseplants.net
- www.badhouseplants.net
prefix: /
volumes:
# ----------------------------------------------
# -- An emptydir volume where hugo should
# -- put the static content
# ----------------------------------------------
public:
name: public-content
sizeLimit: 1Gi
# ----------------------------------------------
# -- An emptydir volume where rclone should
# -- download pictures
# ----------------------------------------------
rclone:
name: s3-data
sizeLimit: 1Gi
# -------------------------------------
# -- Default values that I haven't
# -- touched
# -------------------------------------
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
@ -30,29 +92,6 @@ service:
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: istio
hosts:
- host: badhouseplants.net
paths:
- path: /
pathType: Prefix
tls:
- secretName: badhouseplants-wildcard-tls
hosts:
- badhouseplants.net
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
@ -60,9 +99,3 @@ autoscaling:
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,4 +1,4 @@
baseURL: 'https://badhouseplants.net/'
#baseURL: 'https://badhouseplants.net/'
languageCode: 'en-us'
title: 'Bad Houseplants'
theme: 'papermod'

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 MiB

View File

@ -0,0 +1,424 @@
---
title: "Argocd Dynamic Environment Per Branch: Part 1"
date: 2023-02-25T14:00:00+01:00
draft: true
ShowToc: true
cover:
image: "cover.png"
caption: "Argocd Dynamic Environment Per Branch Part 1"
relative: false
responsiveImages: false
---
[Do you remember?]({{< ref "dont-use-argocd-for-infrastructure" >}})
> And using `helmfile`, I will install `ArgoCD` to my clusters, of course, because it's an awesome tool, without any doubts. But don't manage your infrastructure with it, because it's a part of your infrastructure, and it's a service that you provide to other teams. And I'll talk about in one of the next posts.
Yes, I have written 4 posts where I was almost absuletely negative about `ArgoCD`. But I was talking about infrastructure then. I've got some ideas about how to describe it in a better way, but I think I will write another post about it.
Here, I want to talk about dynamic *(preview)* environments, and I'm going to describe how to create them using my blog as an example. My blog is a pretty easy application. From `Kubernetes` perspective, it's just a container with some static content. And here, you already can notice that static is an opposite of dynamic, so it's the first problem that I'll have to tackle. Turning static content into dynamic. So my blog consists of `markdown` files that are used by `hugo` for a web page generation.
>Initially I was using `hugo` server to serve the static, but it needs way more resources than `nginx`, so I've decided in favor of `nginx`.
I think that I'll write 2 of 3 posts about it, because it's too much to cover in only one. So here, I'd share how I was preparing my blog to be ready for dynamic environments.
So this is how my workflow looked like before I decided to use dynamic environments.
- I'm editing `hugo` content while using `hugo server` locally
- Pushing changes to a `non-main` branch
- When everything is ready, I'm uploading pictures to the `minio` storage
- And merging a non-main branch to the main
- Drone-CI is downloading images from `minio` and builds a docker image with the `latest` tag
- First step is to generate a static content by `hugo`
- Second step is to put that static content in `nginx` container
- Drone-CI is pushing a new image to my registry
- `Keel` spots that images was updated and pulls it.
- Pod with a static is being recreated, and I have my blog with a new content
What I don't like about it? I can't test something unless it's in `production`. And when I stated to work on adding comments (that is still WIP) I've understood that I'd like to have a real environemnt where I can test everything before firing the main pipeline. Even though having a static development environment would be fine for me, because I'm the only one who do the development here, I don't like the concept of static envs, and I want to be able to work on different posts in the same time. Also, adding a new static environemnt for development purposes it kind of the same amount of work as implementing a solution for deploying them dynamically.
Before I can start deploying them, I have to prepare the application for that. At the first glance changes looks like that:
1. Container must not contain any static content
2. I can't use only latest tags anymore
3. Helm chart has a lot of stuff that's hardcoded
4. CI pipelines must be adjusted
5. Deployment process should be rethought
### Static Container
Static content doesn't play well with dynamic environments. I'd even say, doesn't play at all. So at least I must stop defining hostname for my blog on the build stage. One container should be able to run anywhere with the same result. So I've decided that instedd of putting the generated static content in the container with `nginx` on the build stage, I need to ship a container with source code to `Kubernetes`, generate static there and put it to a container with `nginx`. So before my deployment looked like that:
```YAML
spec:
containers:
- image: git.badhouseplants.net/allanger/badhouseplants-net:latest
imagePullPolicy: Always
name: badhouseplants-net
```
And it was enough. Now it looks like that:
```YAML
containers:
- image: nginx:latest
imagePullPolicy: Always
name: nginx
ports:
- containerPort: 80
name: http
protocol: TCP
resources: {}
volumeMounts:
- mountPath: /var/www
name: public-content
readOnly: true
- mountPath: /etc/nginx/conf.d
name: nginx-config
readOnly: true
initContainers:
- args:
- --baseURL
- https://dynamic-charts-dev.badhouseplants.net/
image: git.badhouseplants.net/allanger/badhouseplants-net:d727a51c0443eb4194bdaebf8ab0e94c0f228b06
imagePullPolicy: Always
name: badhouseplants-net
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /src/static
name: s3-data
readOnly: true
- mountPath: /src/public
name: public-content
restartPolicy: Always
- emptyDir:
sizeLimit: 1Gi
name: public-content
- configMap:
defaultMode: 420
name: nginx-config
name: nginx-config
```
So in the `init` container I'm generating a static content (`--baseUrL` flag is templated with `Helm`). Putting the result to the directory that is mounted as en `emptyDir` volume. And then later I'm mounting this folder to a container with `nginx`. Now I can use my docker image wherever I'd like with the same result It doesn't depend on the hostmame that was fixed during the build.
### No more `latest`
Since I want to have my envs updated on each commit, I can't push only `latest` anymore. So I've decided to use `commit sha` as tags for my images. But it means that I'll have a lot of them now and having `300Mb` of images and other media is becoming very painful. That means that I need to stop putting images directly to container during the build. So instead of using `rclone` to get data from `minio` in a `drone` pipeline, I'm adding another `init` container to my deployment.
```YAML
initContainers:
- args:
- -c
- rclone copy -P badhouseplants-public:/badhouseplants-static /static
command:
- sh
env:
- name: RCLONE_CONFIG
value: /tmp/rclone.conf
image: rclone/rclone:latest
imagePullPolicy: Always
name: rclone
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp
name: rclone-config
readOnly: true
- mountPath: /static
name: s3-data
volumes:
- name: rclone-config
secret:
defaultMode: 420
secretName: rclone-config
- emptyDir:
sizeLimit: 1Gi
name: s3-data
```
And also, I'm mounting the `s3-data` volume to the `hugo` container, so it can generate my blog with all images.
### Helm chart should be more flexible
I had to find all the values, that should be different between different environments. And turned out, it's not a lot.
1. Istio `VirtualServices` hostnames (Or Ingress hostname, if you don't use Istio)
2. Image tag for the container with the source code
3. And a hostname that should be passed to hugo as a base URL
4. Preview environments should display pages that are still `drafts`
So all of that I've put to `values.yaml`
```YAML
istio:
hosts:
- badhouseplants.net
hugo:
image:
tag: $COMMIT_SHA
baseURL: https://badhouseplants.net/
buildDrafts: false
```
### CI pipelines
Now I need to push a new image on each commit instead of pushing only once the code made it to the main branch, But I also don't want to have something that doesn't work completely in my registry, because I'm self-hosting and ergo I care about storage. So before building and pushing an image, I need to to test it,
```YAML
# ---------------------------------------------------------------
# -- My Dockerfile is very small and easy, so it's not a problem
# -- to duplicate its logic in a job. But I think that
# -- a better way to implement this, would be to build an image
# -- with Dockerfile, run it, and push, if everything is fine
# ---------------------------------------------------------------
- name: Test a build
image: klakegg/hugo
commands:
- hugo
- name: Build and push the docker image
image: plugins/docker
settings:
registry: git.badhouseplants.net
username: allanger
password:
from_secret: GITEA_TOKEN
repo: git.badhouseplants.net/allanger/badhouseplants-net
tags: ${DRONE_COMMIT_SHA}
```
Now if my code is not really broken, I'll have an image for each commit. And when I merge my branch to `main` I can use a tag from the latest preview build on for the production instance. So I'm almost sure that what I've tested before is what a visitor will see.
> But with this kind of setup I've reached docker pull limit pretty fast, so I've decided that I need to have a builder image in my registry too. Of course, it must be an automated action, but right off the bat, I've just pushed the `hugo` image to my registry with the `latest` tag and created an issue to fix it later
```BASH
docker pull klakegg/hugo
docker tag klakegg/hugo git.badhouseplants.net/badhouseplants/hugo-builder
docker push
```
And update my Dockerfile to look like this:
```DOCKERFILE
FROM git.badhouseplants.net/badhouseplants/hugo-builder
WORKDIR /src
COPY . /src
ENTRYPOINT ["hugo"]
```
### How to deploy
Previously I was using the same helmfile that I use for everything else in my k8s cluster. It was fine for static envs, but when I need to deploy them dynamically, it's not an option anymore. And here `ArgoCD` enters the room. I'm creating an `ApplicationSet` that looks like that:
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: badhouseplants-net
namespace: argo-system
spec:
generators:
- list:
elements:
- name: application # just not to lose a backward compability with the prevouos setup
app: badhouseplants
branch: main
chart_version: 0.3.6
# Image that is lates now, we'll get there later
value: |
hugo:
image:
tag: latest
# And this is an example of environemnt that I want to be created.
- name: dynamic-charts
app: badhouseplants
branch: dynamic-charts
chart_version: 0.3.6
value: |
istio:
hosts:
- dynamic-charts-dev.badhouseplants.net
hugo:
image:
tag: 5d742a71731320883db698432303c92aee4d68a1
baseURL: https://dynamic-charts-dev.badhouseplants.net/
buildDrafts: true
template:
metadata:
name: "{{ app }}-{{ name }}"
namespace: argo-system
spec:
project: "default"
source:
helm:
valueFiles:
- values.yaml
values: "{{ value }}"
repoURL: https://git.badhouseplants.net/api/packages/allanger/helm
targetRevision: "{{ chart_version }}"
chart: badhouseplants-net
destination:
server: "https://kubernetes.default.svc"
namespace: "{{ app }}-{{ name }}"
syncPolicy:
syncOptions:
- CreateNamespace=true
```
But storing I don't like an idea of storing something like that in the repository. So in the git I'm putting something like that.
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: badhouseplants-net
namespace: argo-system
spec:
generators:
- list:
elements:
- name: application
app: badhouseplants
branch: main
chart_version: 0.3.6
value: |
hugo:
image:
tag: $ARGO_IMAGE_TAG
...
```
Since I'm not using latest anymore, I need to add use a new tag every time a new image is pushed. But let's test with the preview env first:
```YAML
# ./kube/template.yaml
...
- name: $ARGO_APP_BRANCH
app: badhouseplants
branch: $ARGO_APP_BRANCH
chart_version: $ARGO_APP_CHART_VERSION
value: |
istio:
hosts:
- $ARGO_APP_HOSTNAME
hugo:
image:
tag: $ARGO_APP_IMAGE_TAG
baseURL: https://$ARGO_APP_HOSTNAME/
buildDrafts: true
...
```
And the logic that I would like to have in my setup would be
- In the git repo there is only application set with the main instance only (production)
- After a new image is pushed to registry, I'm getting this application set as `yaml` and appending new generator to it.
- Applying a new `ApplicationSet` and syncing application using the `argo` cli tool
First, let's set environment variables:
```
- $ARGO_APP_BRANCH = $DRONE_BRANCH | I don't want to use it directly, in case if I want to stop using Drone
- $ARGO_APP_CHART_VERSION should be taken from the `./chart/Chart.yaml` file. `cat chart/Chart.yaml | yq '.version'`
- $ARGO_APP_HOSTNAME, I want it to look like that: "$DRONE_BRANCH-dev.badhouseplants.net"
- $ARGO_APP_IMAGE_TAG = $DRONE_COMMIT_SHA
```
So after setting all these variables, I can use `envsubst < ./kube/template.yaml` to create a correct generator. After that I only need to append it to one that is already in k8s. *And not to append if it's already there*.
So my pipeline for a non-main branch looks like that:
```YAML
- name: Deploy a preview ApplicationSet
image: alpine/k8s:1.24.10
when:
branch:
exclude:
- main
environment:
KUBECONFIG_CONTENT:
from_secret: KUBECONFIG_CONTENT
commands:
- mkdir $HOME/.kube
- echo $KUBECONFIG_CONTENT | base64 -d > $HOME/.kube/config
- apk update --no-cache && apk add yq gettext
- export ARGO_APP_CHART_VERSION=`cat chart/Chart.yaml | yq '.version'`
- export ARGO_APP_BRANCH=$DRONE_BRANCH
- export ARGO_APP_HOSTNAME="${DRONE_BRANCH}-dev.badhouseplants.net"
- export ARGO_APP_IMAGE_TAG=$DRONE_COMMIT_SHA
- kubectl get -f ./kube/applicationset.yaml -o yaml > /tmp/old_appset.yaml
- yq "del(.spec.generators[].list.elements[] | select(.name == \"$ARGO_APP_BRANCH\"))" /tmp/old_appset.yaml > /tmp/clean_appset.yaml
- envsubst < ./kube/template.yaml > /tmp/elements.yaml
- yq '.spec.generators[].list.elements += load("/tmp/elements.yaml")' /tmp/clean_appset.yaml > /tmp/new_appset.yaml
- kubectl apply -f /tmp/new_appset.yaml
```
And even though it's very ugly, I already like it. Because it works.
![Drone pipeline result](/dyn-envs/drone-pipeline.png)
I would like to move the whole pipeline logic out of the `.drone.yml` file. But I will do it later.
After our application set is deployed, we need to update the application the is created by it. I would like to use the `argocd` cli tool for that. to sync one app we need to use selectors, and I'd like to go with labels. So let's first add labels to our `ApplicationSet`
```YAML
...
template:
metadata:
name: "{{ app }}-{{ name }}"
namespace: argo-system
labels:
branch: "{{ name }}"
application: "{{ app }}"
...
```
And now let's create a job like that:
```YAML
- name: Sync application
image: argoproj/argocd
environment:
ARGOCD_SERVER:
from_secret: ARGOCD_SERVER
ARGOCD_AUTH_TOKEN:
from_secret: ARGOCD_AUTH_TOKEN
commands:
- argocd app sync -l app=badhouseplants -l branch=$DRONE_BRANCH
- argocd app wait -l app=badhouseplants -l branch=$DRONE_BRANCH
```
And the last step would be to remove an application when branch is removed. It could be easy with `Gitlab` because there you can use `environments` and `triggers` for removing branch *(as I remember)* But with drone it can be harder. Because drone won't be triggered by a removed branch. So I has to be an additional step for the `main` pipeline.
I'm always using squash commits that means that after merging a Pull Request the commit will have the same `SHA`. So when merging to the main branch, I can use the commit hash to remove a generator.
So I've created a file `./kube/main-template.yaml`, that looks like that:
```YAML
- name: application
app: badhouseplants
branch: main
chart_version: $ARGO_APP_CHART_VERSION
value: |
hugo:
image:
tag: $ARGO_APP_IMAGE_TAG
```
And a job:
```YAML
- name: Deploy a main ApplicationSet
image: alpine/k8s:1.24.10
when:
branch:
- main
environment:
KUBECONFIG_CONTENT:
from_secret: KUBECONFIG_CONTENT
commands:
- mkdir $HOME/.kube
- echo $KUBECONFIG_CONTENT | base64 -d > $HOME/.kube/config
- apk update --no-cache && apk add yq gettext
- export ARGO_APP_CHART_VERSION=`cat chart/Chart.yaml | yq '.version'`
- export ARGO_APP_BRANCH=$DRONE_BRANCH
- export ARGO_APP_IMAGE_TAG=$DRONE_COMMIT_SHA
- kubectl get -f ./kube/applicationset.yaml -o yaml > /tmp/old_appset.yaml
- yq "del(.spec.generators[].list.elements[] | select(.name == \"$ARGO_APP_BRANCH\"))" /tmp/old_appset.yaml > /tmp/clean_appset1.yaml
- yq "del(.spec.generators[].list.elements[] | select(.commit_sha == \"$ARGO_APP_IMAGE_TAG\"))" /tmp/clean_appset1.yaml > /tmp/clean_appset.yaml
- envsubst < ./kube/main.yaml > /tmp/elements.yaml
- yq '.spec.generators[].list.elements += load("/tmp/elements.yaml")' /tmp/clean_appset.yaml > /tmp/new_appset.yaml
- kubectl apply -f /tmp/new_appset.yaml
```
Then I just need to upgrade `./kube/template.yaml`, so it contains `commit_sha: $ARGO_APP_IMAGE_TAG`.
> Also, I've found out that `ArgoCD` won't remove a namespace if it was created by a `SyncPolicy`, so I've added it to the helm chart, and add a new `value` to provide a name.

29
kube/applicationset.yaml Normal file
View File

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: badhouseplants-net
namespace: argo-system
spec:
generators:
- list:
elements: []
template:
metadata:
name: "{{ app }}-{{ name }}"
namespace: argo-system
labels:
branch: "{{ name }}"
application: "{{ app }}"
spec:
project: "default"
source:
helm:
valueFiles:
- values.yaml
values: "{{ value }}"
repoURL: https://git.badhouseplants.net/api/packages/allanger/helm
targetRevision: "{{ chart_version }}"
chart: badhouseplants-net
destination:
server: "https://kubernetes.default.svc"
namespace: "{{ app }}-{{ name }}"

8
kube/main.yaml Normal file
View File

@ -0,0 +1,8 @@
- name: application
app: badhouseplants
branch: $ARGO_APP_BRANCH
chart_version: $ARGO_APP_CHART_VERSION
value: |
hugo:
image:
tag: $ARGO_APP_IMAGE_TAG

16
kube/template.yaml Normal file
View File

@ -0,0 +1,16 @@
- name: $ARGO_APP_BRANCH
app: badhouseplants
branch: $ARGO_APP_BRANCH
commit_sha: $ARGO_APP_IMAGE_TAG
chart_version: $ARGO_APP_CHART_VERSION
value: |
namespace:
name: badhouseplants-$ARGO_APP_BRANCH
istio:
hosts:
- $ARGO_APP_HOSTNAME
hugo:
image:
tag: $ARGO_APP_IMAGE_TAG
baseURL: https://$ARGO_APP_HOSTNAME/
buildDrafts: true