Init Commit
continuous-integration/drone/push Build is passing Details

Start following the GitFLow
This commit is contained in:
Nikolai Rodionov 2023-02-17 15:19:49 +01:00
commit 8c825f7c84
50 changed files with 4153 additions and 0 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
node_modules

84
.drone.yml Normal file
View File

@ -0,0 +1,84 @@
---
kind: pipeline
type: kubernetes
name: Build badhouseplants.net
steps:
- name: Publish the Helm chart
when:
branch:
- main
image: alpine/helm
environment:
GITEA_TOKEN:
from_secret: GITEA_TOKEN
commands:
- helm plugin install https://github.com/chartmuseum/helm-push
- helm package chart -d chart-package
- helm repo add --username allanger --password $GITEA_TOKEN allanger-charts https://git.badhouseplants.net/api/packages/allanger/helm
- helm cm-push "./chart-package/$(ls chart-package)" allanger-charts
- name: Init git submodules
image: alpine/git
when:
branch:
- main
commands:
- git submodule update --init --recursive
- name: Get static content
image: rclone/rclone:latest
when:
branch:
- main
environment:
RCLONE_CONFIG_CONTENT:
from_secret: RCLONE_CONFIG_CONTENT
RCLONE_CONFIG: /tmp/rclone.conf
commands:
- echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG
- rclone copy -P badhouseplants-public:/badhouseplants-static static
- name: Build and push the docker image
when:
branch:
- main
image: plugins/docker
settings:
registry: git.badhouseplants.net
username: allanger
password:
from_secret: GITEA_TOKEN
repo: git.badhouseplants.net/allanger/badhouseplants-net
tags: latest
depends_on:
- Init git submodules
- Get static content
---
kind: pipeline
type: kubernetes
name: CV Builder
when:
branch:
- main
steps:
- name: Build the CV
image: ghcr.io/puppeteer/puppeteer
commands:
- cp -R ./content/cv/* $HOME
- cd $HOME
- npm install md-to-pdf
- npx md-to-pdf index.md
- mkdir $DRONE_WORKSPACE/cv
- mv index.pdf $DRONE_WORKSPACE/cv/n.rodionov.pdf
- name: Upload the CV
image: rclone/rclone:latest
environment:
RCLONE_CONFIG_CONTENT:
from_secret: RCLONE_CONFIG_CONTENT_PRIVATE
RCLONE_CONFIG: /tmp/rclone.conf
commands:
- echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG
- rclone copy -P $DRONE_WORKSPACE/cv badhouseplants-minio:/public-download

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
node_modules
static
content/cv/index.pdf

6
.gitmodules vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "themes/ananke"]
path = themes/ananke
url = https://github.com/theNewDynamic/gohugo-theme-ananke
[submodule "themes/papermod"]
path = themes/papermod
url = https://github.com/adityatelange/hugo-PaperMod.git

0
.hugo_build.lock Normal file
View File

13
Dockerfile Normal file
View File

@ -0,0 +1,13 @@
FROM alpine:latest AS builder
WORKDIR /src
ARG GOHUGO_LINK=https://github.com/gohugoio/hugo/releases/download/v0.110.0/hugo_0.110.0_linux-amd64.tar.gz
RUN apk update && apk add curl tar
RUN curl -LJO ${GOHUGO_LINK} && tar -xf hugo_0.110.0_linux-amd64.tar.gz
RUN chmod +x /src/hugo
FROM alpine:latest
WORKDIR /src
COPY --from=builder /src/hugo /usr/bin/hugo
COPY . /src
ENTRYPOINT ["/usr/bin/hugo"]
CMD ["--help"]

5
Makefile Normal file
View File

@ -0,0 +1,5 @@
upload_static:
rclone copy -P static badhouseplants-minio:/badhouseplants-static
get_static:
rclone copy -P badhouseplants-public:/badhouseplants-static static

4
README.md Normal file
View File

@ -0,0 +1,4 @@
# Badhouseplants NET
## Static content
Storing static content in the repo is painful, because there are massive. That's why for storing them I'm using a S3 bucket that is publicly available for downstream operations

12
archetypes/default.md Normal file
View File

@ -0,0 +1,12 @@
---
title: "{{ replace .Name "-" " " | title }}"
date: {{ .Date }}
draft: true
ShowToc: true
cover:
image: "cover.png"
caption: "{{ replace .Name "-" " " | title }}"
relative: false
responsiveImages: false
---

23
chart/.helmignore Normal file
View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

6
chart/Chart.yaml Normal file
View File

@ -0,0 +1,6 @@
apiVersion: v2
name: badhouseplants-net
description: A Helm chart for Kubernetes
type: application
version: 0.1.12
appVersion: "1.16.0"

22
chart/templates/NOTES.txt Normal file
View File

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "badhouseplants-net.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "badhouseplants-net.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "badhouseplants-net.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "badhouseplants-net.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "badhouseplants-net.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "badhouseplants-net.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "badhouseplants-net.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "badhouseplants-net.labels" -}}
helm.sh/chart: {{ include "badhouseplants-net.chart" . }}
{{ include "badhouseplants-net.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "badhouseplants-net.selectorLabels" -}}
app.kubernetes.io/name: {{ include "badhouseplants-net.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "badhouseplants-net.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "badhouseplants-net.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,58 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "badhouseplants-net.fullname" . }}
labels:
{{- include "badhouseplants-net.labels" . | nindent 4 }}
{{- with .Values.deployAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "badhouseplants-net.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "badhouseplants-net.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
command:
{{ toYaml .Values.command | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "badhouseplants-net.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "badhouseplants-net.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "badhouseplants-net.fullname" . }}
labels:
{{- include "badhouseplants-net.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "badhouseplants-net.selectorLabels" . | nindent 4 }}

73
chart/values.yaml Normal file
View File

@ -0,0 +1,73 @@
replicaCount: 1
image:
repository: git.badhouseplants.net/allanger/badhouseplants-net
pullPolicy: Always
tag: latest
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
deployAnnotations:
keel.sh/trigger: poll
keel.sh/policy: 'force'
podSecurityContext: {}
# fsGroup: 2000
command:
- "/bin/sh"
- "-c"
- "hugo server --bind 0.0.0.0 -p 80 -b https://badhouseplants.net/ --appendPort=false"
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: istio
hosts:
- host: badhouseplants.net
paths:
- path: /
pathType: Prefix
tls:
- secretName: badhouseplants-wildcard-tls
hosts:
- badhouseplants.net
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

65
config.yaml Normal file
View File

@ -0,0 +1,65 @@
baseURL: 'https://badhouseplants.net/'
languageCode: 'en-us'
title: 'Bad Houseplants'
theme: 'papermod'
menu:
main:
- name: Posts
url: /posts
weight: 10
- name: Music
url: /music
weight: 11
- name: Beats
url: /beats
weight: 12
- name: About
url: /about
weight: 13
- name: Search
url: /search
weight: 14
taxonomies:
tag: tags
params:
ShowBreadCrumbs: true
ShowReadingTime: true
ShowPostNavLinks: true
ShowCodeCopyButtons: true
profileMode:
enabled: true
title: "Bad Houseplants"
subtitle: "... by allanger"
imageUrl: "Finish.png"
imageWidth: 150
imageHeight: 150
buttons:
- name: Source
url: "https://git.badhouseplants.net/allanger/badhouseplants-net"
- name: My Music
url: "https://funkwhale.badhouseplants.net/library/artists"
socialIcons:
- name: "telegram"
url: "https://t.me/allanger"
- name: "twitter"
url: "https://twitter.com/_allanger"
- name: "mastodon"
url: "https://mastodon.social/@allanger"
- name: github
url: 'https://github.com/allanger'
- name: email
url: 'mailto:allanger@zohomail.com'
ShowShareButtons: true
ShareButtons: ["telegram", "twitter", "reddit", "linkedin"]
env: production
title: Bad Houseplants
description: "...by allanger"
keywords: [Blog, Portfolio]
author: allanger
DateFormat: "January 2, 2006"
defaultTheme: auto
outputs:
home:
- HTML
- RSS
- JSON

47
content/about/_index.md Normal file
View File

@ -0,0 +1,47 @@
---
title: About
date: 2023-01-24T09:26:52+01:00
draft: false
---
> It was supposed to be just yet another web page with musical releases reviews, but after trying to write something about them, I've found out that I'm not good at it. So it's just a blog where I'm talking about everything that comes to my mind.
[![Build Status](https://drone.badhouseplants.net/api/badges/allanger/badhouseplants-net/status.svg?ref=refs/heads/main)](https://drone.badhouseplants.net/allanger/badhouseplants-net/latest)
### Who am I?
> If you're hiring, you can find [my CV here]({{< ref "cv" >}} )
I'm a musician and a geek, who works full time as a DevOps engineer, whatever it means. Thanks to my job, I know how to run self-hosted services pretty well, and that's helping me achieve my goal of bringing the indie culture everywhere I can. I'm trying to separate myself from global companies as a user as much as it's possible in my daily life.
Also, I'm a Linux lover, what doesn't really correlate with my will to make music. I hope that one day we will see that developers will see that Linux is a real OS that can be used as a daily driver. And building software for Linux is important just like building for MacOS and Windows. I hope that we will be able to use not only open source solutions working on Linux, but also closed-source proprietary ones.
### Music, Beats and Arrangements
## Music
> I always thought I was a musician
[Check out what I've got](https://funkwhale.badhouseplants.net)
You can find everything I consider ready enough to be shown on my [FunkWhale](https://funkwhale.badhouseplants.net/library) instance. Also, my music can be found on many streaming services, and yes, I know that it's not a very independent way of doing things, but it's one of many exceptions 🙃.
All of my beats are waiting for somebody to do something with them. I'm giving them all for donation, so if you happen to like any, just shoot me a message. I can re-arrange and remix them as much as possible. I can mix your tracks, and I really will to do that, it doesn't matter what kind of music it is, I'm ready to work with everything, if I like it *(at least a little bit)*.
## IT
> I'm a DevOps after all
[Visit my gitea](https://git.badhouseplants.net)
For I'm a DevOps I'm working a lot with Kubernetes, Containers, Linux, etc. And that's the root of my intention to move to Linux completely.
I hope I will do my contribution to the world of Linux music production too. I'm hosting my own Gitea instance. There you will be able to find all my code (or almost all of my code).
If you made it to here, you might think that it's the point of all of this existing. Self-hosted blog, a music streaming service, and git. **This guy is just a fucking geek! **
And yes, you're partially right. The main reason it exists is that I'm trying to follow and promote `indie/punk` culture, that is not only applies to arts. And that's going to be covered in my posts, I hope.
---
### If you're still here,
I'm looking for people with the same mindset as me, to make music or to code together, or anything. So I would be happy to get connections on [Mastodon](https://mastodon.social/@allanger)

53
content/beats/_index.md Normal file
View File

@ -0,0 +1,53 @@
---
title: Beats
date: 2023-01-24T09:26:52+01:00
draft: false
---
>I don't lease my beats. If you happen to like anything, just shout me a message and we will come to an agreement. And if you decide to use any of my beats you'll be the only one using it (legally).
---
### Easy Money
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=18"></iframe>
{{< /rawhtml >}}
### Phantom Limb
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=19"></iframe>
{{< /rawhtml >}}
### Ark
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=21"></iframe>
{{< /rawhtml >}}
### Tremor
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=24"></iframe>
{{< /rawhtml >}}
### Empty Cubicles
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=23"></iframe>
{{< /rawhtml >}}
### Body Drop
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=20"></iframe>
{{< /rawhtml >}}
### Broken Piano
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=22"></iframe>
{{< /rawhtml >}}
### Dead Wings
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=25"></iframe>
{{< /rawhtml >}}
### Trapped
{{< rawhtml >}}
<iframe width="100%" height="150" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=track&amp;id=17"></iframe>
{{< /rawhtml >}}

97
content/cv/index.md Normal file
View File

@ -0,0 +1,97 @@
---
title: "Curriculum Vitae (CV)"
date: 2023-02-11T18:29:00+01:00
draft: false
ShowToc: true
---
# Nikolai Rodionov
```
> Location: Düsseldorf, Germany
> Email: allanger@zohomail.com (preffered)
> Phone: 015223284008
> Github: https://github.com/allanger
```
---
## About me
<p align="center">
<img src="./myself.jpeg" alt="drawing" width="30%"/>
</p>
I'm a DevOps engineer (or SRE if you wish) with 5++ years of hands-on experience with a decent amount of tools that are most probably used or going to be used in your company. One of the most important tools that I love working with and want to continue working with, is Kubernetes. At least, while I don't see any better alternative to it. I think that containers themselves are one of coolest inventions in development, and I'm trying to use them as long as it's possible. Also, I believe that every routine must be automated, because routing is a boring job that lets people lose focus and make mistakes.
I think that there are several things that a good SRE or DevOps engineer must be able to do:
- To build reliable and stable infrastructure
- Keep this infrastructure up-to-date
- Keep all the source and instructions of this infrastructure clean and simple
- Avoid a human factor as long as possible
- And when it's not possible to avoid it, not to be afraid to take responsibility
Also, I think it's important that before implementing anything, an engineer has understood all the requirements and checked tools that can fulfil them. I often see, how people try to use a tool for its name but not for its functionality, and hence they have to do a lot of additional work and deal with compromises. And if nothing really can fulfil those requirements, you need not be afraid of writing something new *and open-source it*.
<div class="page-break"></div>
## Experience
**Klöckner-i**: DevOps Engineer
> 01.2022 - until now
```
| GCloud - Microsoft Azure
| Linux - Containers - Kubernetes
| Helm - Helmfile
| Percona Mysql - Postgresql
| Bash
| Prometheus - Grafana - Elasticsearch - Kibana
| ArgoCD - Gitlab CI - Github Actions
| Sops
| Ansible
```
---
**Itigris**: DevOps Engineer
> 07.2019 - 12.2021
```
| AWS - Yandex Cloud
| Linux - Containers - Kubernetes
| Helm - Helmfile - Kustomize
| Bash
| Gitlab CI - Drone - ArgoCD
| Postgresql - Redis
| Java - JS - Go
| Ansible - Terraform
| Prometheus - Grafana - Loki - Elasticsearch - Kibana
```
---
**Etersoft**: DevOps Engineer
> 03.2017 - 06.2019
```
| Bare metal - Proxmox - Virtual Box
| Linux - Containers - Networks
| Bash - Perl
| Mysql - Postgresql
| Minio - Ceph
| Gitlab CI
| Ansible
```
<div class="page-break"></div>
## A little bit more about me
- I love to work with `Kubernetes`, but not with `yaml`.
- I'm a huge fan of [Helmfile](https://github.com/helmfile/helmfile).
- I have written several small cli tools in Rust, that you might find in my [GitHub profile pins](https://github.com/allanger) (they are not perfect, but I'm working on it).
- I'm contributing to [db-operator](https://github.com/kloeckner-i/db-operator).
- I'm trying to automate everything until I'm losing control over something that is automated.
- I love Perl, although I don't even remember how to write code with it, but I would be somehow thrilled to have any ability to work with it in production
- I also think that everything is better in Rust, or at least in Go *(if Bash is not enough)*
I have a blog (written-as-code) that is deployed to K8s (https://badhouseplants.net/), with the source code stored in a self-hosted Gitea, that is also deployed to K8s alongside the CI/CD system where this blog is being built and published. This CV is also being built as a part of the CI process, and then uploaded to `minio` storage that is also ~~surprisingly~~ running in this cluster. So you can download the latest version of CV here: <https://s3.badhouseplants.net/public-download/n.rodionov.pdf>
> But I can't guarantee 100% availability because it's a one-node k8s, and sometimes I need to do a maintenance work

BIN
content/cv/myself.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

49
content/music/index.md Normal file
View File

@ -0,0 +1,49 @@
---
title: "Music"
date: 2023-01-31T13:52:43+01:00
draft: false
ShowToc: true
---
Everything that's created by me, can be found on my [funkwhale instance](https://funkwhale.badhouseplants.net). But I'm only uploading `lossy` there. I was trying to upload losseless, but then it either doesn't really work with my Android App, or it's hard to manage. And it needs a way more disk that way. So if you want to listnen to lossless, go to my [Bandcamp](https://allanger.bandcamp.com/). *A lot of tracks are still not there, but they will be there soon*. I also have a [SoundCloud account](https://soundcloud.com/allanger) and I try to publish everything there.
---
### allanger
[Spotify](https://open.spotify.com/artist/1VPAs75xrhaXhCIIHsgF02) - [Apple Music](https://music.apple.com/us/artist/allanger/1617855325) - [Deezer](https://www.deezer.com/us/artist/117780712) - [SoundCloud](https://soundcloud.com/allanger) - [Bandcamp](https://allanger.bandcamp.com/) - [Funkwhale](https://funkwhale.badhouseplants.net/library/artists/3/)
#### Anymore
> In this song, I'm using samples from a YouTube video and so I'm not sure that I can distribute on all platforms. That's why it exists only on SoundCloud and Funkwhale
>![Cover](/music/allanger-Anymore.jpg)
>Release Date: 2018-12-26
>
>Genre: Indie
>
> Sub Genre: Lo-Fi Indie
[SoundCloud](https://soundcloud.com/allanger/anymore) - [Funkwhale](https://funkwhale.badhouseplants.net/library/albums/11/)
### Oveleane
> It's another project made by me, I just thought that that electronic stuff won't fit well in the allanger's profile, and so decided to separate them. But it's still allanger, you know...
[Spotify](https://open.spotify.com/artist/2PKE1XvwP82LCacM5q6rCx?si=hJyJWcEgR4mZLkjbCso45A) - [Apple Music](https://music.apple.com/us/artist/oveleane/1654951021) - [Deezer](https://www.deezer.com/us/artist/190392997)
#### Four Steps Behind
>![Cover](/music/Oveleane%20-%20Four%20Steps%20Behind.jpg)
>Release Date: 2022-12-05
>
>Genre: Electronic
>
>Sub Genre: IDM/Experimental
[Spotify](https://open.spotify.com/album/1RjB1xLoD2JXmWuBjGegCN?si=fIsGrOfoQRaeKu9f-Oh0dw) - [Apple Music](https://music.apple.com/us/album/1654953305) - [Deezer](https://www.deezer.com/us/album/377293977) - [Funkwhale](https://funkwhale.badhouseplants.net/library/albums/1/)
{{< rawhtml >}}
<iframe width="100%" height="330" scrolling="no" frameborder="no" src="https://funkwhale.badhouseplants.net/front/embed.html?&amp;type=album&amp;id=1"></iframe>
{{< /rawhtml >}}

0
content/posts/_index.md Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 292 KiB

View File

@ -0,0 +1,574 @@
---
title: "ArgoCD vs Helmfile: Applications"
date: 2023-02-13T12:14:09+01:00
draft: false
cover:
image: "cover.png"
caption: "ArgoCD"
relative: false
responsiveImages: false
ShowToc: true
---
> So as promised in [the previous ArgoCD post]({{< ref "dont-use-argocd-for-infrastructure" >}}), I'll try to show a simple example of Pull Requests for different kinds of setups. This is the first part. Putting everything in the same post seems kind of too much.
# Intro
I've created three `main` branches and three branches for install two applications. I assume we have two production clusters (If you've read the previous post, you know that by saying 'production', I mean production for SRE team, so they can be dev/stage/whatever for other teams) and one test cluster (the one where SRE team can test anything without affecting other teams)
You can already check all of them here: <https://git.badhouseplants.net/allanger/helmfile-vs-argo/pulls>
I've decided to install [Vertical pod autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) to both prod clusters and [goldilocks](https://github.com/FairwindsOps/goldilocks) to only one of them. Therefore, I have to add both to the test-cluster as well. Also, I've promised that I'd implement the CI/CD for all of those solutions, but I think that it's going to be enough just to describe the logic. If you really want to see different implementation of CI/CD, you can shoot me a message, and I will write another post then.
# Applications (Ann App of Apps)
So here is the PR for installing applications with `Application` manifests.
<https://git.badhouseplants.net/allanger/helmfile-vs-argo/pulls/2/files>
I've chosen to follow the `App of apps` pattern, because it's including changes that must have been done if you use a "direct" applications installation and `app of apps`. So let's have a look at the main manifests, here you can see the base: <https://git.badhouseplants.net/allanger/helmfile-vs-argo/src/branch/argo-apps-main>
Initially I thought to use only one "Big Application" manifest for all three clusters, but I found out that it's not so easy when you don't have clusters with exactly the same infrastructure. Even with multi-source apps, you will probably have to use an additional tool for templating/substituting, for example like this:
```YAML
# app-of-apss.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argo-system
spec:
destination:
namespace: argo-system
server: https://kubernetes.default.svc
project: system
sources:
- path: ./manifests/$CLUSTER
repoURL: git@git.badhouseplants.net:allanger/helmfile-vs-argo.git
targetRevision: argo-apps-main
- path: ./manifests/common
repoURL: git@git.badhouseplants.net:allanger/helmfile-vs-argo.git
targetRevision: argo-apps-main
```
and then, in a pipeline do something like this:
```BASH
export CLUSTER=cluster1
kubectl apply $(envsubst < app-of-apps.yaml) # I haven't tested it out, so this command may no work, but I hope you get the point.
```
So it's either additional files, or an additional logic in CI/CD.
Also, the `helm-freeze` thing. I wanted to vendor charts, because in this example it's required, but my Gitea instance can't preview file changes when there are 9000+ lines of code updated, so I had to remove.
But logic would be like this
- Manual part:
- Update `helm-freeze.yaml`
- Run `helm-freeze sync`
- Add a new application to the `manifests/$CLUSTER` dir
- Push
- CI/CD
- Since it needs to be `GitOps`, you need to check that charts in the `vendor` dir are up-to-date with `helm-freeze.yaml`. *Because if you updated helm-freeze and forgot to execute `helm-freeze sync`, you will have a contradiction between actual and desired states. That's one of the reasons, why I don't like this kind of vendoring. Either it's an addition step in CI, that is verifying that the manual step was done, or it's an additional work for reviewer. You also can add an action that is going to execute it withing the pipeline and push to your branch, but I'm completely against it. (something for another post maybe)*
- Then depending on a branch:
- If not `main`
> Then you need to run `argocd diff` for production clusters, and deploy changes to the test clusters, so it's something like
- If `main`
> Deploy to all clusters
So let's try to do it
So we create a first `app-of-apps` manifests
```YAML
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argo-system
spec:
destination:
namespace: argo-system
server: https://kubernetes.default.svc
project: default
source:
path: ./manifests/cluster2/
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-updated
```
Then we need to create apps
```YAML
# ./manifests/cluster2/vpa.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vpa
namespace: argo-system
spec:
destination:
namespace: vpa-system
server: https://kubernetes.default.svc
project: default
source:
helm:
releaseName: vpa
valueFiles:
- ../../values/vpa.common.yaml
path: ./vendor/vpa
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-updated
```
Here we have different options.
- Sync everything automatically (app-of-apps and applications), but it doesn't look too fail-safe to me. And also we can't know diff then, because what's different will be applied immediately. So it's 👎
- Sync automatically only the `app-of-apps`, and then sync applications with the `argocd` cli. It sounds better, because then we can run diff on applications and know the difference between a wished state and a real state, so it's closer to 👍
- Sync applications automatically, but app-of-apps with cli. Doesn't sound to bad, does it? Maybe not that flexible as the previous option, but still not too bad. So it's closer to 👍 too.
- Sync everything with cli. I would say it will give you the best control, but will become additional steps in the pipeline. Now I don't think it's a hard thing to implement, so let's say "closer to 👍 too".
I don't consider the **first** option a reliable one, so I wouldn't even talk about it. You can try, of course, but your changes won't be visible unless they are deployed. So it's like the "test on production" thing.
The **second**, let's have a look. Let's try adding some values to the `vpa` release, and install Goldilocks (assuming it wasn't installed).
VPA values:
```YAML
# ./values/vpa.common.yaml
# I've just changes `false` to `true`
updater:
enabled: true # <- here
```
Goldilocks app:
```YAML
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: goldilocks
namespace: argo-system
spec:
destination:
namespace: vpa-system
server: https://kubernetes.default.svc
project: default
source:
helm:
releaseName: goldilocks
path: ./vendor/goldilocks
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-updated
```
And I pushed to repo.
So now let see what I've got in UI:
![Changes in UI](/argocd-vs-helmfile/update-in-ui.png)
This is how `diffs` for VPA look in the UI:
![Diff in UI](/argocd-vs-helmfile/diff-in-ui.png)
{{< details "Here you can find all the diffs from the UI as text" >}}
```diff
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app.kubernetes.io/component: updater
+ app.kubernetes.io/instance: vpa
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/name: vpa
+ app.kubernetes.io/version: 0.11.0
+ argocd.argoproj.io/instance: vpa
+ helm.sh/chart: vpa-1.6.0
+ name: vpa-updater
+ namespace: vpa-system
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: updater
+ app.kubernetes.io/instance: vpa
+ app.kubernetes.io/name: vpa
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: updater
+ app.kubernetes.io/instance: vpa
+ app.kubernetes.io/name: vpa
+ spec:
+ containers:
+ - env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: 'k8s.gcr.io/autoscaling/vpa-updater:0.11.0'
+ imagePullPolicy: Always
+ livenessProbe:
+ failureThreshold: 6
+ httpGet:
+ path: /health-check
+ port: metrics
+ scheme: HTTP
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 3
+ name: vpa
+ ports:
+ - containerPort: 8943
+ name: metrics
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 120
+ httpGet:
+ path: /health-check
+ port: metrics
+ scheme: HTTP
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 3
+ resources:
+ limits:
+ cpu: 200m
+ memory: 1000Mi
+ requests:
+ cpu: 50m
+ memory: 500Mi
+ securityContext: {}
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 65534
+ serviceAccountName: vpa-updater
```
```DIFF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"labels":{"argocd.argoproj.io/instance":"vpa"},"name":"vpa-actor"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"vpa-actor"},"subjects":[{"kind":"ServiceAccount","name":"vpa-recommender","namespace":"vpa-system"}]}
labels:
argocd.argoproj.io/instance: vpa
managedFields:
- apiVersion: rbac.authorization.k8s.io/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:labels':
.: {}
'f:argocd.argoproj.io/instance': {}
'f:roleRef': {}
'f:subjects': {}
manager: argocd-application-controller
operation: Update
time: '2023-02-13T20:58:02Z'
- apiVersion: rbac.authorization.k8s.io/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
.: {}
'f:kubectl.kubernetes.io/last-applied-configuration': {}
manager: argocd-controller
operation: Update
time: '2023-02-13T20:58:02Z'
name: vpa-actor
resourceVersion: '34857'
uid: 71958267-68b4-4923-b2bb-eaf7b3c1a992
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vpa-actor
subjects:
- kind: ServiceAccount
name: vpa-recommender
namespace: vpa-system
+ - kind: ServiceAccount
+ name: vpa-updater
+ namespace: vpa-system
```
```DIFF
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ argocd.argoproj.io/instance: vpa
+ name: vpa-evictionter-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: vpa-evictioner
+ subjects:
+ - kind: ServiceAccount
+ name: vpa-updater
+ namespace: vpa-system
```
```DIFF
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ argocd.argoproj.io/instance: vpa
+ name: vpa-status-reader-binding
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: vpa-status-reader
+ subjects:
+ - kind: ServiceAccount
+ name: vpa-updater
+ namespace: vpa-system
```
```DIFF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"labels":{"argocd.argoproj.io/instance":"vpa"},"name":"vpa-target-reader-binding"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"vpa-target-reader"},"subjects":[{"kind":"ServiceAccount","name":"vpa-recommender","namespace":"vpa-system"}]}
labels:
argocd.argoproj.io/instance: vpa
managedFields:
- apiVersion: rbac.authorization.k8s.io/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:labels':
.: {}
'f:argocd.argoproj.io/instance': {}
'f:roleRef': {}
'f:subjects': {}
manager: argocd-application-controller
operation: Update
time: '2023-02-13T20:58:02Z'
- apiVersion: rbac.authorization.k8s.io/v1
fieldsType: FieldsV1
fieldsV1:
'f:metadata':
'f:annotations':
.: {}
'f:kubectl.kubernetes.io/last-applied-configuration': {}
manager: argocd-controller
operation: Update
time: '2023-02-13T20:58:02Z'
name: vpa-target-reader-binding
resourceVersion: '34855'
uid: 30261740-ad5d-4cd9-b043-0ff18daaf3aa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vpa-target-reader
subjects:
- kind: ServiceAccount
name: vpa-recommender
namespace: vpa-system
+ - kind: ServiceAccount
+ name: vpa-updater
+ namespace: vpa-system
```
{{< /details >}}
And for Goldilocks
![Goldilocks Application](/argocd-vs-helmfile/goldilocks-ui.png)
All the diffs are also there, and they look good.
But to seem them I had to push to the target branch. And we want to see changes without pushing.
```YAML
# main
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argo-system
spec:
destination:
namespace: argo-system
server: https://kubernetes.default.svc
project: default
source:
path: ./manifests/cluster2/
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-main
```
Then we need to create apps
```YAML
# ./manifests/cluster2/vpa.yaml
# feature branch
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vpa
namespace: argo-system
spec:
destination:
namespace: vpa-system
server: https://kubernetes.default.svc
project: default
source:
helm:
releaseName: vpa
valueFiles:
- ../../values/vpa.common.yaml
path: ./vendor/vpa
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-main
```
![App of apps in the `main`](/argocd-vs-helmfile/app-of-apps-main.png)
So currently app of apps doesn't know about what's happening in my new branch. And so I can't just do `argocd app vpa diff`. So what should I do?
```BASH
argocd app diff --help
...
Usage:
argocd app diff APPNAME [flags]
...
```
That means that I can't use it for those new apps that exist inly in my branch, because I need to pass an App name, and since it's not installed yet, I have something like
```BASH
argocd app diff vpa
FATA[0000] rpc error: code = NotFound desc = error getting application: applications.argoproj.io "vpa" not found
```
There is a `--local` option, but it still requires a name ~~(why if there is a name in manfiests 🙃🙃🙃)~~
```BASH
# Just testing out
argocd app diff vpa --local ./manifests/cluster2/
FATA[0000] rpc error: code = NotFound desc = error getting application: applications.argoproj.io "vpa" not found # 🤪
```
Ok, then we can check the app-of-apps
```BASH
argocd app diff app-of-apps --local ./cluster-1.yaml
Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.FATA[0000] error while parsing source parameters: stat cluster-1.yaml/.argocd-source.yaml: not a directory
argocd app diff app-of-apps --local ./cluster-1.yaml --server-side-generate
FATA[0000] rpc error: code = Unknown desc = failed to get app path: ./manifests/cluster2/: app path does not exist
argocd app diff app-of-apps --local ./cluster-2.yaml --server-side-generate --loglevel debug
FATA[0000] rpc error: code = Unknown desc = failed to get app path: ./manifests/cluster2/: app path does not exist
# I can't get it, maybe anybody could tell me what I'm doing wrong?
argocd app diff app-of-apps --local ./cluster-2.yaml
Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.FATA[0000] error while parsing source parameters: stat cluster-2.yaml/.argocd-source.yaml: not a directory
mkdir /tmp/argo-test
cp cluster-2.yaml /tmp/argo-test
argocd app diff app-of-apps --local /tmp/argo-test --loglevel debug
Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.
===== argoproj.io/Application /app-of-apps ======
0a1,15
> apiVersion: argoproj.io/v1alpha1
> kind: Application
> metadata:
> labels:
> argocd.argoproj.io/instance: app-of-apps
> name: app-of-apps
> spec:
> destination:
> namespace: argo-system
> server: https://kubernetes.default.svc
> project: default
> source:
> path: manifests/cluster2/
> repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
> targetRevision: argo-apps-main
# If i change a branch for the app of apps target to the current one
cat cluster-2.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-of-apps
namespace: argo-system
spec:
destination:
namespace: argo-system
server: https://kubernetes.default.svc
project: default
source:
path: ./manifests/cluster2/
repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
targetRevision: argo-apps-updated
kuvectl apply -f cluster-2.yaml
cp cluster-2.yaml /tmp/argo-test
argocd app diff app-of-apps --local /tmp/argo-test --loglevel debug
Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.
===== argoproj.io/Application /app-of-apps ======
0a1,15
> apiVersion: argoproj.io/v1alpha1
> kind: Application
> metadata:
> labels:
> argocd.argoproj.io/instance: app-of-apps
> name: app-of-apps
> spec:
> destination:
> namespace: argo-system
> server: https://kubernetes.default.svc
> project: default
> source:
> path: ./manifests/cluster2/
> repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git
> targetRevision: argo-apps-updated
```
I don't really understand what it means. *Most probably, I'm just stupid.* But what I see is that it's not working with ` --server-side-generate ` with an error, that I can't really understand. And is saying that I shouldn't use it without the flag, because that way of running it is deprecated. And even without the flag, it's giving me a strange output, that I don't know how to use it.
So as I see, to have a proper diff, you need to apply. But it doesn't look like a fail-safe and scalable way to use.
I told that we can check different options for syncing, but as I see now, other workflows won't give me a better overview about what's happening. So I don't think it makes a lot of sense. If I find a way to see a proper diff without applying manifests first, I would go back to this topic and write one more post.
## Maybe it's because an App of Apps layer
Let's try installing apps directly. Remove an app-of-apps from k8s. And let's use manifests from `/manifests/cluster2/` directly. As I see, diffing won't work anyway for applications that are not installed yet. So you can check ones that are already installed, but I couldn't make it work too. I was changing values to check if they are shown, but they weren't. *Again, I could simply screw up, and if you have a positive experience with that, don't hesitate to let me know about it, I'm willing to change my mind*
## Conclusion
So you can check the PR here: <https://git.badhouseplants.net/allanger/helmfile-vs-argo/pulls/2/files>
I like that `values` can be handled as normal values files. (But for handling secrets you might have to add a [CMP](https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/), that means an additional work and maintenance) But even if adding CMP is fine, I couldn't get proper `diffs` for my changes, that means that I can't see what's happening without applying manifests. And applying manifests will mean that other team members will not be work on other tickets withing the same scope, so it looks like a bottleneck to me.
But I don't like that you need to add a lot of manifests to manage all the applications. We have only 2 manifests that are copied from folder to folder. So we have a lot of repeating code. And repeating code is never good. So I would write a tool that can let you choose applications from the list of all applications and choose clusters where they need to be deployed. So the config looks like this:
```YAML
app_path: ./manifests/common
clusters:
- cluster: cluster1
applications:
- vpa
- cluster: cluster2
applications:
- vpa
- goldilocks
- cluster: cluster3
applications:
- vpa
- goldilocks
```
But I think that with the whole GitOps pulling concept it will be a hard thing to implement. And in the end it looks like helmfile, so ... 🤷‍♀️🤷‍♀️🤷‍♀️
I can only say, that I see no profit in using argo like this. It only seems like either a very complicated setup (most probably you will be able to implement anything you need, the question is, how much time will you spend with that), or a ~~crippled~~ not complete setup.
And if you compare an amount of lines that area updadated to install these apps as `Applications` to the helmfile stuff, it's going to be ~100 vs ~30. And that's what I also don't like.
In the next post I will try doing the same with `ApplicationSets`, and we'll see, if it looks better or not.
Thanks,
Oi!

Binary file not shown.

After

Width:  |  Height:  |  Size: 324 KiB

View File

@ -0,0 +1,240 @@
---
title: "ArgoCD vs Helmfile: ApplicationSet"
date: 2023-02-15T10:14:09+01:00
draft: false
cover:
image: "cover.png"
caption: "ArgoCD"
relative: false
responsiveImages: false
ShowToc: true
---
This is a second post about *"argocding"* your infrastructure. [First can be found here]({{< ref "argocd-vs-helmfile-application" >}}).
There I've tried using `Applications` for deploying. Here I will try to show an example with `ApplicationSets`. As in the previous article, I will be installing [VPA](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) and [Goldilocks](https://github.com/FairwindsOps/goldilocks)
So let's prepare a base. We have 3 clusters:
- cluster-1
- cluster-2
- cluster-3
> With `ApplicationSets` you have an incredible amount of ways to deploy stuff. So what I'm doing may look super-different from what you would do
I'm creating 3 manifests, one for each cluster.
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: helm-releases
namespace: argo-system
spec:
syncPolicy:
preserveResourcesOnDeletion: true
generators:
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "cluster2/*"
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "common/*"
template:
metadata:
name: "{{ argo.application }}"
namespace: argo-system
spec:
project: "{{ argo.project }}"
source:
helm:
valueFiles:
- values.yaml
values: |-
{{ values }}
repoURL: "{{ chart.repo }}"
targetRevision: "{{ chart.version }}"
chart: "{{ chart.name }}"
destination:
server: "{{ argo.cluster }}"
namespace: "{{ argo.namespace }}"
```
Manifests with a setup like thos have only one values that is really different, so we could create just one manifest that would look like that:
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: helm-releases
namespace: argo-system
spec:
syncPolicy:
preserveResourcesOnDeletion: true
generators:
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "$CLUSTER/*"
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "common/*"
template:
metadata:
name: "{{ argo.application }}"
namespace: argo-system
spec:
project: "{{ argo.project }}"
source:
helm:
valueFiles:
- values.yaml
values: |-
{{ values }}
repoURL: "{{ chart.repo }}"
targetRevision: "{{ chart.version }}"
chart: "{{ chart.name }}"
destination:
server: "{{ argo.cluster }}"
namespace: "{{ argo.namespace }}"
```
And add a step in the `CI` pipeline, where we're substituting a correct value instead of the variable. But since I'm not really implementing a CI, I will create 3 manifests.
Then I need to add `generators` in the feature branch:
```YAML
#/common/vpa.yaml
---
argo:
cluster: https://kubernetes.default.svc
application: vpa
project: default
namespace: vpa-system
chart:
version: 1.6.0
name: vpa
repo: https://charts.fairwinds.com/stable
values: |
updater:
enabled: false
```
```YAML
#/cluster2/goldilocks.yaml
---
argo:
cluster: https://kubernetes.default.svc
application: goldilocks
project: default
namespace: vpa-system
chart:
version: 6.5.0
name: goldilocks
repo: https://charts.fairwinds.com/stable
values: |
```
And the main problem here is that values are passed as a string. So you can't separate them into different files, use secrets or share common values. That can be solved with multi-source apps that came with ArgoCD 2.6, but I can't say that they are production-ready yet. Also, I've read that `ApplicationSets` can be used to separate values and charts, but it seemed a way too complicated to me back then, and I think that with ArgoCD 2.7 this problem will be completely solved, so I'm not sure that it makes sense to check that approach now.
Next thing is that Git generators are pointed to a specific branch, so I have two problems. How to test changes on the `cluster-test` and how to view diffs.
### Test changes
This problem is solvable, I will show on a cluster-2 example, because I don't have 3 clusters running locally, but this logic should apply to the test cluster.
After you add new generators files, you need to deploy them to the `test cluster`, and you also need not override what's being tested by other team-members. So the best option that I currently see, is to get an `ApplicationSet` manifest that is already deployed to `k8s` and add new generators to it. So it looks like this:
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: helm-releases
namespace: argo-system
spec:
syncPolicy:
preserveResourcesOnDeletion: true
generators:
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "cluster2/*"
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-main
files:
- path: "common/*"
# This should be added within CI and removed once a the branch is merged
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-updated
files:
- path: "common/*"
- git:
repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git
revision: argo-applicationset-updated
files:
- path: "cluster2/*"
template:
metadata:
name: "{{ argo.application }}"
namespace: argo-system
spec:
project: "{{ argo.project }}"
source:
helm:
valueFiles:
- values.yaml
values: |-
{{ values }}
repoURL: "{{ chart.repo }}"
targetRevision: "{{ chart.version }}"
chart: "{{ chart.name }}"
destination:
server: "{{ argo.cluster }}"
namespace: "{{ argo.namespace }}"
```
After applying this change, this what I've got
![ApplicationSet](/argocd-vs-helmfile/applicationset-test.png)
Those applications should be deployed automatically within a pipeline. So steps in your pipeline would look like that:
- Get current `ApplicationSet` manifest from Kubernetes
- Add new generators
- Sync applications with argocd cli
But I'm not sure what going to happen if you have two different pipelines at the same time. Probably, changes will be overwriten but the pipeline that is a little bit slower. But I think that it can be solved without a lot of additional problems. And also I don't think that it's a situation that you will have to face very often, so you can just rerun your pipeline after all.
### Diffs
Diffs are not supported for `ApplicationSets` at the moment, and I'm not sure when they will be: <https://github.com/argoproj/argo-cd/issues/10895>
~~And with the diffing situation from the previous article, I think that they will not work the way I'd like them to work.~~
But I think that the easiest way to deal with them right now, would be adding `git generators` not only to a test cluster, but to all clusters, add to those applications an additional label (e.g. `test: true`), and sync only those applications that don't have this label. So the whole pipeline for branch would look like:
Feature branch
- Get current `ApplicationSet` manifests from Kubernetes (each cluster)
- Add new generators (each cluster)
- Sync applications with argocd cli (only test cluster)
Main branch (merged)
- Get current `ApplicationSet` manifests from Kubernetes (each cluster)
- Remove obsolete generators (each cluster)
- Sync applications with argocd cli (each cluster and filter by label not to sync those, that are not merged yet)
> But I'm not sure exactly how to manage these `test` labels. They can be added manually to generators files, but then you can't be sure that one won't forget to do it, so I think that, if possible, they should be added to generators inside an `ApplicationSet` manifest, or added to applications right after they were created by an `ApplicationSet`, but the second way is not the best, because if the `main` pipeline is faster than feature's one, you will have it installed in a production cluster.
## Conclusion
I like this way a way more than simple `Applications`, especially with multi-source applications. I think that the main problem with this approach are complicated CI/CD pipelines. And I don't like that for diffing you need to have something added to prod clusters. Diff must be safe, and if you add 1000 generator files and push them, you will have 1000 new applications in you ArgoCD. I'm not sure how it's going to handle it. And since ArgoCD is something that is managing your whole infrastructure, I bet, you want it to work like a charm, you don't want to doubt how it's going to survive situations like this.
Amount of changes is not big, pretty close to helmfile, I'd say. And the more common stuff you have, the less you need to copy-paste. You can see the PR here: <https://git.badhouseplants.net/allanger/helmfile-vs-argo/pulls/3>
Thanks,
Oi!

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -0,0 +1,323 @@
---
title: "Don't use ArgoCD for your infrastructure"
date: 2023-02-09T12:47:32+01:00
draft: false
ShowToc: true
cover:
image: "cover.png"
caption: "ArgoCD"
relative: false
responsiveImages: false
---
> Of course, it's just a clickbait title. Use whatever works for you. I will just describe why I wouldn't use `ArgoCD` for the infrastructure
## Prelude
`ArgoCD` is an incredibly popular tool and I see that many DevOps guys *(I know that it's not a job definition, but I feel like it's the best description that everybody can understand)* want to use everywhere. I wasn't an exception, but I've just changed my mind. I still think that `ArgoCD` is cool, and you need to use it, but not for the infrastructure.
## But why?
### One more prelude
Let's assume you are a team that is providing something as a service to other teams. Even if you're the only one member, it doesn't matter. And let's assume you're working with `Kubernetes` or you plan to work with it, otherwise I'm not sure why you would even read the post.
> It's very common to use separated clusters for different teams, customers, applications, etc. Let's say you have 3 clusters
![3 clusters and you](/dont-use-argocd-for-infrastructure/3-clusters.png)
Setups may be different, you can use different clusters for different products, environments, teams, or you can have your own opinion on how to split workload between clusters. But these (in our case) 3 clusters are used directly by other teams. Also, you may want to have a cluster for providing services, let's assume, your company decided to use [Gitea](https://gitea.io/en-us/) as a `git` provider, and you deployed it to Kubernetes. *It may be a very controversial example, but I'm not talking about what should run in K8s and what shouldn't, so if you can think of any other thing, that is supposed to be used across the whole company (GitLab Runners, Bitwarden, ElasticSearch, etc...)*. So it's already 4 clusters. Let's call the fourth cluster a `DevOps Cluster`
![3 Clusters and gitea](/dont-use-argocd-for-infrastructure/3-clusters-and-gitea.png)
I assume you need to have some common stuff deployed to each cluster, let's think of (Prometheus, Grafana and Loki).
And now you need to decide how to deploy it. You may have already known about `ArgoCD`, or you decided to look for **Best Practices** and found a lot about `ArgoCD`. And it sounds perfect. Everybody tends to use it. You can find a lot of information everywhere. People are helpful. GitHub repo is well-maintained.
>Why Argo CD?
>
>Application definitions, configurations, and environments should be declarative and version controlled. Application deployment and lifecycle management should be automated, >auditable, and easy to understand.
And now you need first deliver the `ArgoCD` itself and later start delivering everything with `ArgoCD`.
Let's first talk about how to deliver Argo. There are different options. For example, you can have one main installation in the `Devops Cluster` and use it to manage other clusters. That sounded good to me when I first heard about it. But I wanted to have all configuration as code, and to add other clusters to the main `Argo` you need to use the `argocd cli`, so it's either an addition step in the CI/CD, or a manual work. I didn't like both options, because I wanted to avoid adding scripts to pipelines, and manual work just wasn't an option. And also it's not very transparent anymore where all the applications in target clusters are coming from (or maybe I just couldn't find, I'd rather think that I was dumb). One more thing is that you obviously can't have several `K8s` resources with the same name in one namespace, so every `Application` must have a different name. I don't like long names, so it looks ugly to me. Especially, when you cluster have long names, like "the-first-product-production", and your application looks like "the-first-product-production-grafana". And you don't have to use the cluster name for the application, for sure, but you would like to have some logic there. And this logic must be as obvious as possible. But anyway, these are three main issues that I've faced, and that I can't live with, so here comes the second way to deliver `Argo`, install it to each cluster.
So I would go with 4 `ArgoCD` installations. So the first step is to install it, that is not a problem at all, there are many ways to do it. And after it's installed, we need to start delivering other applications. I'm aware of 3 ways of doing it:
1. Use `Application` manifests for applications
2. Use `Application` manifests to manage `Application` manifests from repo (the App of Apps pattern, or something like that)
3. Use `ApplicationSet` manifests to make `ArgoCD` render `Application` manifests and apply them
### Application
First option is really straightforward, isn't. All we need to do is to create manifests. `ArgoCD` devs have just published the versions 2.6 with `multi-source` applications support. *But currently I can't say it's usable. The main issue for me is that the `argocd` cli doesn't work with them, that makes the whole thing pointless to me. Without cli I can't implement CD, then I see no reason to use them at all. I could use the `AutoSync` option, but I won't do that, and later I'll come back to this point and describe why, maybe in the next post, or later*. So I can't use multi-source applications right now. Let's look at the list of applications that I need to install one more time:
To all clusters:
- Prometheus
- Grafana
- Loki
To the DevOps cluster only:
- Gitea
There are many ways to install applications to `K8s`. But actually, I think, that there is only one real way: [helm](https://helm.sh/). Why? Because each of those applications are a huge amount of manifests, that you need to combine, install and maintain. You probably won't write those manifests yourself. There are other options to install apps, but all of them seem super complicated. And I doubt that you want to spend 8 hours per day editing `yaml` files. At least I don't, so I'm choosing helm.
>I need to say that I'm not 100% happy with helm. There are some issues that seem very important to me, but it's good enough to use it. But maybe we can talk about them later.
Let's try the first approach (`Application` for an application)
First, package
```YAML
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus
namespace: argocd
spec:
destination:
namespace: monitoring # Let's not touch namespace management this time. Let's assume we already solved this issue
server: https://kubernetes.default.svc
project: monitoring
source:
chart: kube-prometheus-stack
helm:
valueFiles:
- values.yaml
path: .
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 45.0.0
```
But what about values? Single-source application will not be able to find values files in your repo if you use a remote chart, so you have two options (that I'm aware of)
1. Add values directly to you source like this:
```YAML
spec.source.helm.values: |
you-values: here
```
2. Create a CMP for handling helm packages and values
Second way is good, but complicated. Because it's a self-written tool that you should implement, that should work with argo, that you should maintain, and without any guarantees that it will keep working after `ArgoCD` is updated. I was using Argo with custom CMP, it's no fun.
But anyway, the `Application` way is not scalable, because you will have to create a manifest for each cluster, and is not secure, because you can't easily encrypt the data. Also, if you've seen values for `kube-prometheus-stack`, you know that they are huge. So now you have 4 huge manifests with unencrypted secrets. And it's only for one app, so it probably looks like this:
```
manifests/
cluster1/
prometheus.yaml
loki.yaml
grafana.yaml
cluster2/
prometheus.yaml
loki.yaml
grafana.yaml
cluster3/
prometheus.yaml
loki.yaml
grafana.yaml
cluster-devops/
prometheus.yaml
loki.yaml
grafana.yaml
gitea.yaml
```
In my experience, each `Application` like this with a proper configuration will contain about 150 - 200 lines of code, so you have about 1950 - 1600 lines of code to install 4 applications. One of them is really special, and others will most probably will have only several lines that are not duplicating, e.g. for ingress and passwords.
I think it's not a way to go. To solve this problem, many guys save charts to the same git repo where they store values, using helm-freeze for example. So it looks like
```
helm-freeze.yaml
vendored_charts/
prometheus/...
grafana/...
loke/...
gitea/...
manifests/
cluster1/
prometheus.yaml
loki.yaml
grafana.yaml
cluster2/
prometheus.yaml
loki.yaml
grafana.yaml
cluster3/
prometheus.yaml
loki.yaml
grafana.yaml
cluster-devops/
prometheus.yaml
loki.yaml
grafana.yaml
gitea.yaml
values/
prometheus/...
grafana/...
loki/...
gitea/...
```
Yes, now you can use values from files, you can encrypt secrets and your `Applications` are not that huge anymore. But I'm strongly against vendoring external charts. Why? First, it's my ideology, briefly, if you don't trust packagers, you don't use their packages. Vendoring charts into a git repo also means that you need to add a manual step to download them. With helm-freeze, for example, you need to execute `helm-freeze sync`. It's either pre-commit hook, or a manual execution, or a step in CI/CD. I don't like all the options for different reasons, but if I stop on every little point, this article will never be finished. So If it's interesting, feel free to ask.
> I would give up already here. I don't understand why you need to suffer that much just to use such a great tool
### App of Apps
It's actually pretty much the same thing. But instead of applying `Application` manifests one by one, you will create an additional `Application` manifests, that `ArgoCD` will use to generate others.
```YAML
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: applications
namespace: argocd
spec:
destination:
namespace: argo
server: https://kubernetes.default.svc
project: system
source:
path: ./manifests/cluster1
repoURL: $YOUR_GIT_REPO
targetRevision: main
```
You will create 4 manifests, one for each cluster and apply them. And when you push your (Not App of Apps) `Application` manifests to the main branch, `ArgoCD` will do something about it. It doesn't solve anything, as I see. You still have an ugly amount of `yaml` files, but also you have 4 additional that are not so huge. This concept might simplify the deployment process, but also it will steal a certain amount of control from you. Because now you're not responsible for deploying applications, but Argo.
> I think that GitOps and other automations are important, and it's the only way to do development right now, but you're probably hired as a DevOps Engineer or SRE, or whoever. You're supposed to be able to do something apart from pushing to git. You can't hand over all the responsibility git and pipelines and live a happy life. Once you will have to execute `kubectl edit deployment` and then you won't be happy if `ArgoCD` decide to rewrite your changes right after they are applied, because you're not following the Git Flow. You need to have control, and that's why you're paid. Not because you can edit `yaml` files
### ApplicationSets
It's a nice concept. *In theory*. You create one manifest for all applications in a cluster, or even one manifest for all applications across your clusters. The unique one, that will work everywhere. I won't provide an example, sorry, but you can do a lot of templating there, so one manifests will work for four clusters and will decrease amount of code. I'm using `ApplicationSets` myself, for my personal stuff, where I don't have any kind of obligations, and no one will sue me for breaking everything down. And actually I've done the breaking thing not so long ago. I'm not blaming `ArgoCD` for that, it was entirely my fault. But let's see what I've done. And let me know (anyhow) if you were able to spot the problem before the `kubectl apply` happened.
#### My file structure
I have one `ApplicationSet` for helm releases, that looks like that:
./helm-releases.yaml
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: helm-releases
namespace: argo-system
spec:
generators:
- git:
repoURL: git@github.com:allanger/my-repo.git
revision: HEAD
files:
- path: "releases/*"
template:
metadata:
name: "{{ argo.application }}"
namespace: argo-system
spec:
project: "{{ argo.project }}"
source:
path: "{{ argo.path }}"
helm:
valueFiles:
- values.yaml
values: |-
{{ values }}
repoURL: "{{ chart.repo }}"
targetRevision: "{{ chart.version }}"
chart: "{{ chart.name }}"
destination:
server: "{{ argo.cluster }}"
namespace: "{{ argo.namespace }}"
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle
- .webhooks[]?.failurePolicy
```
And a certain amount of generators files in the `./releases` folder. I'm using the first approach, like this:
```YAML
argo:
cluster: https://kubernetes.default.svc
application: cert-manager
project: system
namespace: cert-manager
path: .
chart:
version: 1.10.1
name: cert-manager
repo: https://charts.jetstack.io
values: |
...
```
I don't like having values here, and when `ArgoCD` 2.6 were released, I've decided to try multi-source applications, so I've created a new directory: `./releases_v2`, and a new `ApplicationSet` manifests
./helm-releases-v2.yaml:
```YAML
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: helm-releases
namespace: argo-system
spec:
generators:
- git:
repoURL: git@github.com:allanger/argo-deployment.git
revision: HEAD
files:
- path: "releases_v2/*"
template:
metadata:
name: "{{ argo.application }}"
namespace: argo-system
spec:
project: "{{ argo.project }}"
sources:
- path: "./values"
repoURL: git@github.com:allanger/argo-deployment.git
ref: values
- path: "{{ argo.path }}"
helm:
valueFiles:
- "$values/values/{{ chart.name }}/values.yaml"
repoURL: "{{ chart.repo }}"
targetRevision: "{{ chart.version }}"
chart: "{{ chart.name }}"
destination:
server: "{{ argo.cluster }}"
namespace: "{{ argo.namespace }}"
ignoreDifferences:
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
jqPathExpressions:
- .webhooks[]?.clientConfig.caBundle
- .webhooks[]?.failurePolicy
```
And executed `kubectl apply -f helm-releases-v2.yaml`
And for some reason `ArgoCD` stopped responding. And, actually, everything were gone. Nothing were left in my cluster. And after I realized what I've done: "How am I a DevOps engineer after all?". *In case you wonder, I was able to safe 100% of important persistent data that were there, and all the workload were back in 15 minutes, but still...*
One of the most important thing about your infrastructure is its sustainability. And if you happen to have a setup like this in you company, hire a junior engineer and he/she/they makes the same mistake, you have no right to punish him/her/them (I'm sorry if I'm not writing it right, I just don't know how to), on the contrary, you need to punish yourself, that you were able to build something that is so easy to destroy. And I know that there are options to avoid resources destruction when `Applications` or `ApplicationSet` are gone, or that you need to use `argocd` and not `kubectl` to manage these resources *(and I don't agree with that at all)*. But I think that adding additional fields to manifests to preserve resources that are eventually created by an operator after applying a CR manifests is rather not obvious and dangerous out of the box. When I need something to be reliable, I'd rather have a more complicated and less obvious, or maybe not automated at all, process for removing it.
> You'd rather think twice before executing `rm -rf ./something`, than do `git push` and wait until it's executed automatically, wouldn't you?
But `ApplicationSets` are not bad. I'm still using them, but now with additional fields, so I'm not afraid to remove everything accidentally. And yet it's not perfect. Because without multi-source applications they don't make any sense for bigger projects, than a Minecraft server that is used by 4 guys, *unless you're vendoring helm charts, of course*
Even when multi-source apps have a full support, and I can move values files to real values files, there is still no way to do `argocd appset diff`, and I'm aware of [this github issue](https://github.com/argoproj/argo-cd/issues/10895#issuecomment-1423566000). And you can read my concerns about server-side rendering implementation, that they want to implement, there.
So let's assume that cli supports multi-source apps and applications sets can be diffed, and your server is not overloaded when 1000 manifests are being rendered on each pipeline run just for diffing, and [helm repos are not DDoSed](https://todoist.com/app/project/2232733866/task/6521274379) *(Because it's not nice to DDoS something that is used by a huge amount of users across the world)*. And you're added all the fields to manifests to make your infra reliable. Sounds nice!
But there is one more problem that I see. What many teams don't think about, is that they, as a team, provide services to other teams. So, if you have clusters: `cluster-production`, `cluster-development`, `cluster-demo`, and `cluster-devops`, where should you deploy infra changes first? I think a lot of you could say, to the `cluster-development`, because it's not facing real customers at least. And... I totally don't agree. You're the team that provide other teams with services, and your real customers are those teams. Of course, you won't treat the production environment the same way you treat the development environment, but it's still not a playground for you. It's a playground for developers, that should be stable and reliable for them. I'm sure that there are many ways to handle it. But I think, that you should have one more cluster, a `cluster-infra-test`. Where you will deliver your changes first. And where you can test your changes before they affect other teams. So, it's a 5th `ArgoCD` with a very similar setup *(actually, the setup must be repeating all the other setups so you're sure you're testing what's going to be delivered later)*. And with the `ApplicationSet` and, for example, git generators that are pointed to the main branch on "production" environments (`cluster-production`, `cluster-development`, `cluster-demo`, and `cluster-devops`), but here changes must come not only from the main, but also from other branches *(assuming that your workflow is something like this: cut a branch, update the infra code, create a pull requests, and merge)*, because you need to test anything before it's in the main branch. So you have either a very complicated `ApplicationSet` *(I'm not even sure, that it's possible to do with templates)*, or you have different manifests for the test and the rest, so you have to remember updating both every time one is updated, or you have an additional step in a pipeline, that will get the `ApplicationSet` from the `cluster-infra-test` and add a new branch to generators *(because you must not overwrite and break test environments that are created by another members of your team)*
### Really?
Are you ready to go through all of this just to use Argo? Is there really nothing that can stop you from doing that? I was even tired of writing this post. I was stubborn, and I wanted to use the best `GitOps Kubernetes` tool, and I went through all of this, I was trying to convince others that it's cool. Just a little amount of work, and we're happy `Argo` users. But looking back, all I can say, just use [Helmfile](https://github.com/helmfile/helmfile)! `ArgoCD` is literally not solving any issue that `Helmfile` can solve (when it comes to the infrastructure deployment). And with a huge amount of work and compromises you can achieve a result that will be close to what you would have with a proper `helmfile` configuration (that is extremely easy and reliable).
Later I will create a repo where I show all the examples with configuration and CI/CD for different `ArgoCD` approaches and a `helmfile`. And so if you don't trust me now, you'll be able to see a difference or try to convince me, that I'm wrong.
> And using `helmfile`, I will install `ArgoCD` to my clusters, of course, because it's an awesome tool, without any doubts. But don't manage your infrastructure with it, because it's a part of your infrastructure, and it's a service that you provide to other teams. And I'll talk about in one of the next posts.
Thanks,
Oi!
---

Binary file not shown.

After

Width:  |  Height:  |  Size: 686 KiB

View File

@ -0,0 +1,287 @@
---
title: "Vst on Linux 1"
date: 2023-01-24T15:47:50+01:00
draft: false
ShowToc: true
cover:
image: "cover.png"
caption: "Vst on Linux"
relative: false
responsiveImages: false
---
>Best, but according to Output. Their article: *[https://output.com/blog/output-favorites-freebies](https://output.com/blog/output-favorites-freebies)
This is kinda article where I'm looking for "BEST FREE VST" articles or videos, or whatever, trying to run them on Linux and checking how they perform. The first article I've found is one by **Output**, so be it.
---
## 1. Arcade by Output 👎
Freaking unexpected, huh? But what choice do I have? **Walk the walk and talk the talk. **So let's start by pressing the "TRY IT FREE" button.
First I need to enter my email, then I need to enter a bunch of information about myself and then: What a bummer, they want me to add a payment method. And even thought they won't charge me the first month, I'm not doing talking the talk. Sorry, let's go to the next one.
---
## 2. OTT by Xfer 👍
This one you will find in any top, I believe. It can mean only one thing: it's really one of the best. So let's try.
There is no Linux version of this plugin, so we will have to use the Windows one. How, you would ask me? I will have to install a couple of packages to my system before I'm ready. I'm starting by installing **wine**.  
I am not going to describe the process of installing it, google `"$YOUR_LINUX_DISTRO_NAME install wine" ` after it's done you may want to create a new wine prefix in your system.
What is wine prefix? Let's think of it as of a directory that contains Windows-related stuff. All plugins will be installed there alongside libraries that are required to make them work.
Let's give this prefix a recognizable name, like `.wine_vst_plugins.` I'm opening the terminal, yes, I'll have to use it, but you shouldn't be scared of it, because terminal is our friend. Opening it and executing:
$ WINEPREFIX="$PWD/.wine_vst_plugins/" winecfg
It will open a window when you can configure your wine prefix, but the main stuff is already done, so I just close it.
To check if we're happy, I'm executing the following
$ ls -la $HOME/.wine_vst_plugins
total 3332
drwxr-xr-x 1 allanger allanger 126 Oct 27 18:13 .
drwx------ 1 allanger root 1922 Oct 27 18:15 ..
drwxr-xr-x 1 allanger allanger 8 Oct 27 18:13 dosdevices
drwxr-xr-x 1 allanger allanger 110 Oct 27 18:13 drive_c
-rw-r--r-- 1 allanger allanger 3282847 Oct 27 18:13 system.reg
-rw-r--r-- 1 allanger allanger 12 Oct 27 18:13 .update-timestamp
-rw-r--r-- 1 allanger allanger 4130 Oct 27 18:13 userdef.reg
-rw-r--r-- 1 allanger allanger 113309 Oct 27 18:13 user.reg
If your output looks like mine, we're good to go. Let's install the second tool: [https://github.com/robbert-vdh/yabridge](https://github.com/robbert-vdh/yabridge). You will find all the instructions if you just scroll down a wee bit. After installing it you also must have a tool called **yabridgectl,** to check that it is right, just execute the following
$ yabridgectl
yabridgectl 4.0.2
Robbert van der Helm <mail@robbertvanderhelm.nl>
Optional utility to help set up yabridge
USAGE:
yabridgectl <SUBCOMMAND>
OPTIONS:
-h, --help Print help information
-V, --version Print version information
SUBCOMMANDS:
add Add a plugin install location
rm Remove a plugin install location
list List the plugin install locations
status Show the installation status for all plugins
sync Set up or update yabridge for all plugins
set Change the yabridge path (advanced)
blacklist Manage the indexing blacklist (advanced)
help Print this message or the help of the given subcommand(s)
I hope you're seeing pretty much the same picture as I am. And it only means that we can go further.
Now it's time to install the plugin itself. I'm downloading the **Windows** version and opening my terminal again
Let's assume that you've downloaded it to the `~/Downloads` folder, and the file name is `Install_Xfer_OTT_135.exe`
$ cd ~/Dowloads
$ WINEPREFIX="$PWD/.wine_vst_plugins/" wine ./Install_Xfer_OTT_135.exe
Why we're adding this `WINEPREFIX` thing every time when running `wine`? Because we're saying which wine prefix should be used by wine, since it's not a default path for the prefix.
After installing this plugin, I will need to add it to **yabridge**. To do that, use **yabridgectl**
Insted of what I'm putting after `/drive_c/`, provide a path that you've chosen during the installation
$ yabridgectl add ~/.wine_vst_plugins/drive_c/Program\ Files/Common\ Files/VST
$ yabridgectl sync
And I'm opening a DAW, *I assume you already have one too, for here you are. But if you don't, and you don't know which to install, just install **Ardour.***
I'm opening it, adding a track and adding the **OTT** plugin to that track.
![OTT Xfer](/vst-on-linux-1/ott-xfer.png)
It is working and I would even say it's running
The UI part is a wee bit buggy, but I don't think that it's a problem.
---
## 3. Wider by Polyverse 👍
I'm entering my email again and receiving a download link. Downloading, unpacking, and installing
$ WINEPREFIX="$PWD/.wine_vst_plugins/" wine ./InfectedMushroom-Wider-V1.1.3.exe
$ yabridgectl sync
![Wider](/vst-on-linux-1/wider.png)Again, it's working flawlessly
So far so good, even the UI part is perfect.
---
## 4. CamelCrusher by Camel Audio 👍
It can be downloaded even without email thingy.
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ./camelcrusher-win_mac\ \(www.mpcindia.co\)/CamelCrusherWin-1-0-1-x64.exe
$ yabridgectl sync
And when I'm trying to add it to Ardour, I'm getting an error
[Info]: Scanning: /home/allanger/.vst/yabridge/CamelCrusher.so
09:23:38 [error]
09:23:38 [error] Error during initialization:
09:23:38 [error] '/home/allanger/.vst/yabridge/CamelCrusher.dll' does not exist, make sure to rename 'libyabridge-vst2.so' to match a VST plugin .dll file.
09:23:38 [error]
[ERROR]: ** ERROR** VSTFX : CamelCrusher could not be instantiated :(
[WARNING]: Cannot get VST information from '/home/allanger/.vst/yabridge/CamelCrusher.so': instantiation failed.
Scan Failed.
And I can't actually understand what is the problem here.
I don't give up so quickly. Let's try running it via [Carla](https://github.com/carla-simulator/carla). I won't describe how to install it, you can google it.
So after it's installed, I'm opening it as a standalone app first and trying to add my **CamelCrusher** there. And it's working. Then the next step is to add **Carla** as a **FX** plugin in **Ardour** and then add CamelCrusher there.
![CamelCrush](/vst-on-linux-1/camel-crush.png)Working again, but not without Carla
---
## 5. Fracture by Glitchmachines 👍
I love this plugin, and I'm using it a lot on my MacBook, so it would be nice to run it on **Linux** too. So let's go. But Glitchmachines can give us another great plugin for free, so I will try running both of them here. The other one is **Hysteresis**. So I'm downloading both of them.
After receiving two links, I'm installing them
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Fracture_setup.exe
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Hysteresis_setup.exe
$ yabridgectl sync
![Glitchmachines](/vst-on-linux-1/glitchmachines.png)
They are working but there is one UI glitch
![Glitchmachine's making glitch](/vst-on-linux-1/glitchmakesrs-glitch.gif)
#### Maybe you won't notice it on your system
Because we probably have different system configs, so maybe it's only possible to reproduce this bug with a set of configs and packages I'm using in my Linux. So if you don't face this issue, lucky you!
It's not very annoying to me, but to avoid this kind of behavior, I can wrap these plugins with **Carla.**
![Glitchmachines with Carla](/vst-on-linux-1/glitchmaker-carla.gif)
It's working perfectly with Carla *(it's not that buggy in real life, only on the record)*
---
## 6. FreqEcho by Valhalla DSP 👍
Valhalla's plugins, I think, are one of the most popular in the music world. I don't know a man who doesn't have all of their free plugins installed.  And I do have them installed already in my system, but I will go through the installation process again, just to describe it here. So let's download all of their free plugins
- Valhalla Freq Echo
- Valhalla Space Modulator
- Valhalla Supermassive
```BASH
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaFreqEchoWin_V1_2_0.exe
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaSpaceModulatorWin_1_1_6v3.exe
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaSupermassiveWin_V2_0_0.exe
$ yabridgectl sync
```
![Valhalla plugins](/vst-on-linux-1/valhalla.png)
All of them run flawlessly
---
## 7. Audio Treasure by Max For Cats 👎
As far as I could understand, it can be used only in **Ableton Live**, and since I'm not an Ableton user, and I don't want to run it with **Wine** *(cause there is no native Linux version)*, it's becoming the second plugin in the list, that is not working on
## 8. Saturation Knob by Softube 👎
To get it, I must have a Softube account, (*but I already have it, because I used to download all them free plugins like nuts and create accounts everywhere 🤦)*
So I'm downloading their App Center, that I'm going to use to install a plugin. *I do hate this approach of managing software, but I will cover it in another post one day. *
So the process is very similar to a direct plugin installation. Install **Softube Central** to the same **Wine** prefix
WINEPREFIX="$HOME/.wine_vst_plugins/" wine Softube\ Central\ Setup\ 1.7.1.exe
But after that, I've found out that their **Software Center** is written is **Electron-based** application, and I wasn't yet able to run stuff like this in **Wine**. Maybe later I will put more effort into that, but for now, I'm saying that it's not working. 😥
> #### Some kind of rhetorical question
>
> I don't understand why they and, for example, **Splice** can't release a Linux version of their application, it they already use Electron, that in my understanding is supposed for cross-platform development.
---
## 9 and 10. Plugins by iZotope 👎
**iZotope** is like **Valhalla**, everybody knows it. But I remember having troubles with their plugins on **Linux**. But now I'm trying again, and I'm full of hope.
Plugins:
- iZotope Vinyl
- iZotope Ozone Imager V2 *(it's not in the Output list, but why not?)*
- iZotope Vocal Doubler
One more plugin vendor that requires an account creation. But I do have one from those good old times.
Well, since I remember that there was a problem with them, I'll start by trying only one. Let it be **Ozone Imager V2**.
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine iZotope_Ozone_Imager_v2_1_0.exe
But unfortunately, when I'm opening these plugins, it asks for an authorization, and it doesn't work. It's described in the **yabridge README.md**,but I'm too dummy to read it there.
#### There is a workaround
I've heard that you can download cracked versions of these plugins and they won't require an auth, so they might work. But I'm not trying that, and you shouldn't too.
---
## 11. TAL-Chorus-LX 👍👍👍
It's a piece of cake. This plugin has a native build for Linux, so I'm (since I'm using **Arch Linux**) just installing it using **yay**. And you may want to use another package manager, or download it from the official web page, it's presented there.
![NATIVE, YOU KNOW!](/vst-on-linux-1/tal-chorus.png)
---
## 12. Snap Heap by Kilo Hearts 👎
I was trying to get it, but It didn't seem free.
![Snap Heap](/vst-on-linux-1/snap-heap.png)
---
## 13. Signal Free VST by Output 👎
I'm sorry, but I'm not even trying. It still hurts since the first place in this top.
---
## Some kind of conclusion
First, you have seen only pictures here but haven't heard anything I've done with these plugins. And I know that it sucks, because we're talking about music. I'll record a track using these plugins later and show you how it's working.
About those plugins that didn't work: I know that some of them are great, and it's sad that **iZotope** are not running on Linux, but there are alternatives that are Linux native. So for each failed one I will try to find an alternative, and later I will share my findings here.
Thanks for reading
Oi!

Binary file not shown.

After

Width:  |  Height:  |  Size: 506 KiB

View File

@ -0,0 +1,138 @@
---
title: "Vst on Linux 2"
date: 2023-01-31T19:32:34+01:00
ShowToc: true
cover:
image: "cover.png"
caption: "VST on Linux"
relative: false
responsiveImages: false
---
Big ups, **Venus Theory!**
{{< youtube OCzf38fCqB4 >}}
## Prerequisite
All of them are covered in [the first post]({{< ref "vst-on-linux-1" >}} )
- You have Wine and Yabridge installed
- You have a Wine prefix configured (in my case, `$HOME/.wine_vst_plugins/`, so every time you see it, use your own wine prefix)
## Deelay 👍
You can find it here: [https://sixthsample.com/deelay/](https://sixthsample.com/deelay/)
After receiving a link, I'm downloading a **Windows 64-bit** version and running
```BASH
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Deelay-1.0.3-Installer-64bit.exe
$ yabridgectl sync
```
![A running deelay](/vst-on-linux-2/deelay.png)
It's running, but the context menus UI glitch is present
Audio qualities of this plugin on Linux are prefect, but when you open any drop-down menu, plugin's window is getting hidden.
![UI glitch](vst-on-linux-2/deelay-glitch.gif)
But it can be fixed with **Carla**. If I'm putting Carla to my FX chain and then adding **Deelay** to **Carla**, this problem is gone.
## Surge XT 👍👍👍
This plugin has a native **Linux** version, and it's open-source, so just install it with your package manager or download from the official site. You won't even have to touch **yabridge** and **wine** this time
[**Source code**](https://github.com/surge-synthesizer/surge)
![Surge XT Linux native](/vst-on-linux-2/surge.png)
## Cardinal 👍👍👍
**You won't believe me!**
But it's open-source too and has supports **Linux** out of the box.
[**Source code**](https://github.com/DISTRHO/Cardinal)
![Again native and open-source](/vst-on-linux-2/cardinal.png)
## Fire 👍👍👍
*It ... is ... open ... source ... too ... and ... supports ... **Linux***
[**Source code**](https://github.com/jerryuhoo/Fire)
So I'm just installing it with a package manager and trying.
![A real fire](/static/vst-on-linux-2/fire.png)
## Ruina 👍
*And a couple of others*
It would be too good, if this one had a Linux version. To get this plugin, you will have to create an account, and download their installer 👿
[https://noiseengineering.us/products/the-freequel-bundle-sinc-vereor-virt-vereor-ruina](https://noiseengineering.us/products/the-freequel-bundle-sinc-vereor-virt-vereor-ruina)
After downloading an Installer, I'm installing it with **wine **to the same prefix I install all their other plugins
$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Noise\ Engineering\ Products\ 09358.exe
![The installer](/static/vst-on-linux-2/noise-engineering.png)
It will install plugins, we've only left to sync yabridge config.
$ yabridgectl sync
![Running Ruina ](/vst-on-linux-2/runia-plugin.png)
![Sinc Vereor](/vst-on-linux-2/sirt-plugin.png)
![Virt Vereor](/vst-on-linux-2/virt-vereor.png)
All of them are working flawlessly. One wouldn't even notice that they're not native
---
## Gatelab 👍 / Filterstep 👎 / Panflow 👍
No **Linux** version again, so I'm gonna. And they want me to enter my email again. That's a shame, but let's do it anyway. If Venus Theory says they're good, I'm sure they really are.
$ export WINEPREFIX="$HOME/.wine_vst_plugins/"
$ wine Audiomodern_Filterstep_1.1.2/WIN/Filterstep_64\ 1.1.2\ \(Win64\).exe
$ wine Audiomodern_Gatelab_1.1.3/WIN/Gatelab\ 1.1.3\ \(Win64\).exe
$ wine Audiomodern_Panflow_1.0.1/WIN/Panflow\ 1.0.1\ \(Win64\).exe
I had to rerun Ardour multiple times in order to make these plugins work. **Panflow** is running fine
![Panflow](/vst-on-linux-2/panflow.png)
Gatelab is working fine too
![Gatelab](/vst-on-linux-2/gatelab.png)
But when I add **Filterstep**, Ardour stops responding. I'm sure it's possible to make it work too, so I will try doing that in one of the next articles.
## PaulXStretch 👍👍👎
I was tired after **Audiomodern** plugins, because they were freezing my Ardour and I had to log out and log in again to my system, for Ardour wouldn't run again after that.
But **PaulXStrech** has a native Linux version too, and it has given me a strength to finish with this top.
So I'm just installing it with a package manager.
But my expectations were too high. I couldn't add PaulXStretch as a plugin in my **DAW**, even **Carla** couldn't handle it. Only [Kushview Element](https://kushview.net/) could handle it outside a **DAW**, but I couldn't connect Ardour with Element using JACK yet (probably because I'm too dummy). But when running **Element** as a plugin inside an **Ardour**, when I add PaulXStretch, Ardour crashes.
But on the official site of this plugin, there is nothing said about a plugin version of PaulXStretch for Linux. So you can use it as a standalone application. Just record whatever you want, stretch and render an audio file to import it to your **DAW**.
![PaulXStretch as a standalone application](/vst-on-linux-2/paulxstretch.png)
---
Actually, I'm very happy to see that 4 of 9 plugins has a native Linux support. It means that developers see now that Linux can be used for music production. And it makes me feel that Linux is becoming more adopted by more and more guys who just want to make music without struggling with their systems.
---
Thanks for reading
Oi!

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

View File

@ -0,0 +1,198 @@
---
title: "Vst on Linux 3"
date: 2023-02-03T18:26:44+01:00
draft: false
ShowToc: true
cover:
image: "cover.png"
caption: "Vst on Linux"
relative: false
responsiveImages: false
tags: ['music']
---
I've been told that I should add descriptions to plugins I'm writing about here, and it sounds very reasonable. So now I will not only describe how to install a plugin but also add a small description to it.
## Prerequisite
All of them are covered in [the first post]({{< ref "vst-on-linux-1" >}})
- You have Wine and Yabridge installed
- You have a Wine prefix configured (in my case, `$HOME/.wine_vst_plugins/`, so every time you see it, use your own wine prefix)
## Before we begin
In the previous post, I was trying to run paulxstretch on Linux, and using it as a plugin in a DAW didn't work out. I've tried to update the JUCE library in the source code, and now it's working. You can find the code here: [https://git.badhouseplants.net/badhouseplants/paulxstretch](https://git.badhouseplants.net/badhouseplants/paulxstretch)
To build, refer to the official build doc or use the `/build_docker.sh` script
## Melda Free Bundle 👍
You can find it here: <https://www.meldaproduction.com/MFreeFXBundle>
It's not a one plugin, but a whole bunch of them. I used to have it on my Mac a long ago, but I hate this way of managing plugins, so I don't install them if I'm not sure, I need them. And I never felt this way about **Melda Free Bundle.**
But now I'll try running it on **Linux**. I don't think I'll ever use it, even if it runs smoothly as hell. So I will create an additional temporary wine prefix to install it there.
$ export WINEPREFIX="$HOME/.wine_vst_plugins_tmp"
$ wine maudioplugins_16_01_setup.exe
Install Melda Free bundle
### MDrummer
[MDrummer](https://www.meldaproduction.com/MDrummer)
Let's start with the MDrummer plugin. I've tried to run it as a plugin in **Ardour**,but it killed it. So I've added it with a **Carla** as a wrapper
I'm downloading several GBs of samples and other data to make it work.
![MDrummer](/vst-on-linux-3/mdrummer.png)
I'd say it's over-bloated. And UI is a wee bit laggy on Linux
Let's see what we can do with it
{{< video "/vst-on-linux-3/mdrummer-example.mp4" "video-1" >}}
### MDrumReplacer
[MDrumReplacer](https://www.meldaproduction.com/MDrumReplacer)
As I understand, this plugin is catching drum signals and replacing them with another sound. So I'll add it next to the **MDrummer** in the **Carla Rack**.
![Replacer](/vst-on-linux-3/mdrummer-lfo-carla.png)
I've also added LFO in the chain, to modify the pitch value of the **MDrumReplacer** with it, so it doesn't sound so boring (at least, to me).
{{< video "/vst-on-linux-3/mdrummer-example-lfo.mp4" "video-2" >}}
### MPowerSynth
It's just a synth. I don't like the interface, and I'm not gonna use it in the future, so I'm simply using a preset.
{{< video "/vst-on-linux-3/mpower-synth.mp4" "video-3" >}}
It sounds a wee bet laggy, but it's just on a video
---
*I'm sorry, I'm not trying all of them, because it's too much. Let's try another one and go further.*
### MGuitarArchitect
It's like a pedal board, as I see. I can add many effects here. Let's try adding it to the same **Carla Rack** that is used for the bass.
{{< video "/vst-on-linux-3/mguitar-architect.mp4" "video-4" >}}
---
## u-he TyrellN6 👍
[u-he TyrellN6 ](https://u-he.com/products/tyrelln6/)
I think that almost every **u-he** plugins has a native **Linux** support, and **Tyrell** is not an exception. But you most probably won't be able to install it with a package manager, because it's available only on Amazona.de. So just follow the link on the official web-page and download the Linux version.
This is a pretty minimalistic synth, that can help you produce a lot of cool sounds without spending tons of hours learning it. I definitely can recommend it, as I can recommend anything developed by **u-he**
Unpack the archive and run the **./install.sh** script
$ ./TyrellN6-3898/install.sh
{{< video "/vst-on-linux-3/tyrell.mp4" "video-5" >}}
I can't tell anything bad about **u-he**, I love all their plugins, I love that they support Linux and I love that they together with **Bitwig** are working on the new plugin open-source format ([CLAP](https://github.com/free-audio/clap)).
Tyrell is just a very cool free synth. What else can I say?
---
## Valhalla Supermassive 👍
I've already covered Valhalla plugins in the [first post]({{< ref "vst-on-linux-1" >}}), so it doesn't make sense to repeat myself, instead of that, I'll add every free **Valhalla** plugin to the project and show you how it's working.
{{< video "/vst-on-linux-3/vallhalla.mp4" "video-6" >}}
## Spitfire Labs 👍
[Spitfire Labs](https://labs.spitfireaudio.com/?sortBy&#x3D;prod_products_labs_latest)
I think, everybody knows what's that. But if you don't and you don't mind creating a Spitfire account, just try.
I need to create an account for downloading that. *But I already have one, because I'm an active LABS user.*
$ wine SpitfireAudio-Win-3.4.0.exe
$ yabridgectl sync
You will have to install presets
![Spitfire App](/vst-on-linux-3/spitfire-labs-app.png)
{{< video "/vst-on-linux-3/labs-example.mp4" "video-7" >}}
## Infinite Space Piano 👍
[Space Piano](https://zaksound.com/infinite-space-piano/)
This is a piano synth. I already have a lot of them, and as for me, this is yet another one. But is sounds pretty good, so you may like it.
This time I really need to create an account, because it's the first time I hear about this one.
There is no **Linux** version, I'm getting the **Windows** one. I've got a feeling that I'm not gonna use it in the future, so I'm installing it in the temporary prefix.
$ export WINEPREFIX="$HOME/.wine_vst_plugins_tmp/"
$ wine Infinite\ Space\ Piano\ 2.exe
$ yabridgectl sync
{{< video "/vst-on-linux-3/space-piano.mp4" "video-8" >}}
## ProjectSAM Free Orchestra 👎
[Free Orchestra](https://projectsam.com/libraries/the-free-orchestra/)
![Free Orchestra](/vst-on-linux-3/native-access.png)
Ok, let's try
I'm downloading **Native Access**, and it's not working. And this is the moment, I'm giving up on this plugin.
## Eventide Pendulate👍
[Eventide Pendulate](https://www.eventideaudio.com/plug-ins/pendulate/)
Downloading a Windows version again.
$ wine Pendulate-1.3.6-windows-installer.exe
$ yabridgectl sync
{{< video "/vst-on-linux-3/eventide-pendulate.mp4" "video-9" >}}
Runnin just fine
As you see, this is a pretty interesting Synth, I have enough of synths for everything, but this one may join the ranks too.
## VCV Rack 👍
[VCV Rack](https://vcvrack.com/)
VCV Rack is an open-source Eurorack modular synthesizer simulator
I've already covered the Cardinal plugin [here]({{< ref "vst-on-linux-2" >}}). And this is basically the same thing. And so I will just show **Cardinal** instead of **VCV Rack**. But if you want VCV Rack, it's working on Linux just fine, but you can't use the free version as a plugin, that's why I'm showing Cardinal
{{< video "/vst-on-linux-3/vcv-rack.mp4" "video-10" >}}
I didn't have enough time to learn it yet, so that's what I could do with it
## U-He Protoverb 👍
[U-He Protoverb](https://u-he.com/products/protoverb/)
Protoverb is a reverb created by u-he. It has native Linux support
Download the **Linux** version and install it by running a script. You can finfd everything [here](https://u-he.com/products/protoverb/)
## Paulstretch 👍
It's already covered in the previous article. But since then, one thing is changed. You could've seen it in the very beginning of the post, that I've updated JUCE library in the source code, and now it's running as a VST plugin. If you missed it, try reading the beginning one more time.
I'm testing it currently, but you can help me with that.
## Bonus: Vital 👍
[Vital](https://vital.audio/)
I think this is the best Synth ever made. It's open source and has Linux support. I use it in every project and if you don't, I recommend you to start doing that.
{{< video "/vst-on-linux-3/vital.mp4" "video-11" >}}
The video is laggy again, but in reality it's fine.
---
If you like what I'm doing, you can follow my [twitter](https://twitter.com/_allanger) or [mastodon](https://mastodon.social/@allanger), since I'll be posing there all the updates
Thanks
Oi!

8
content/search.md Normal file
View File

@ -0,0 +1,8 @@
---
title: "Search" # in any language you want
layout: "search" # is necessary
# url: "/archive"
# description: "Description for Search"
summary: "search"
placeholder: "placeholder text in search input box"
---

View File

@ -0,0 +1,4 @@
<details>
<summary>{{ (.Get 0) | markdownify }}</summary>
{{ .Inner | markdownify }}
</details>

View File

@ -0,0 +1,2 @@
<!-- raw html -->
{{.Inner}}

View File

@ -0,0 +1,23 @@
<div class="container">
<div id="{{ .Get 1 }}" class="{{ .Get 1 }}" align=center></div>
</div>
<script
type="text/javascript"
src="https://cdn.jsdelivr.net/npm/@clappr/player@latest/dist/clappr.min.js"
>
</script>
<script>
var playerElement = document.getElementById("{{ .Get 1 }}");
var player = new Clappr.Player({
source: {{ .Get 0 }},
mute: true,
height: 360,
width: 640
});
player.attachTo(playerElement);
</script>

View File

@ -0,0 +1,8 @@
<ul>
{{ range .Data.Pages }}
<li>
<a href="{{.RelPermalink}}">{{ .Title }}</a>
</li>
{{ end }}
</ul>

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
{"Target":"ananke/css/main.min.css","MediaType":"text/css","Data":{}}

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

1
themes/ananke Submodule

@ -0,0 +1 @@
Subproject commit 315a00623c9f1c0074ad369c1fd39a960cd01e15

1
themes/papermod Submodule

@ -0,0 +1 @@
Subproject commit d3d90be8a4ea04433d95d02a1dc07b0014c5b8b8