commit 8c825f7c84dfae3fe49c101dd3facae9279cb1ac Author: Nikolai Rodionov Date: Fri Feb 17 15:19:49 2023 +0100 Init Commit Start following the GitFLow diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..3c3629e --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +node_modules diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..9f0dca2 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,84 @@ +--- +kind: pipeline +type: kubernetes +name: Build badhouseplants.net + +steps: +- name: Publish the Helm chart + when: + branch: + - main + image: alpine/helm + environment: + GITEA_TOKEN: + from_secret: GITEA_TOKEN + commands: + - helm plugin install https://github.com/chartmuseum/helm-push + - helm package chart -d chart-package + - helm repo add --username allanger --password $GITEA_TOKEN allanger-charts https://git.badhouseplants.net/api/packages/allanger/helm + - helm cm-push "./chart-package/$(ls chart-package)" allanger-charts + +- name: Init git submodules + image: alpine/git + when: + branch: + - main + commands: + - git submodule update --init --recursive + +- name: Get static content + image: rclone/rclone:latest + when: + branch: + - main + environment: + RCLONE_CONFIG_CONTENT: + from_secret: RCLONE_CONFIG_CONTENT + RCLONE_CONFIG: /tmp/rclone.conf + commands: + - echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG + - rclone copy -P badhouseplants-public:/badhouseplants-static static + +- name: Build and push the docker image + when: + branch: + - main + image: plugins/docker + settings: + registry: git.badhouseplants.net + username: allanger + password: + from_secret: GITEA_TOKEN + repo: git.badhouseplants.net/allanger/badhouseplants-net + tags: latest + depends_on: + - Init git submodules + - Get static content + +--- +kind: pipeline +type: kubernetes +name: CV Builder +when: + branch: + - main +steps: + - name: Build the CV + image: ghcr.io/puppeteer/puppeteer + commands: + - cp -R ./content/cv/* $HOME + - cd $HOME + - npm install md-to-pdf + - npx md-to-pdf index.md + - mkdir $DRONE_WORKSPACE/cv + - mv index.pdf $DRONE_WORKSPACE/cv/n.rodionov.pdf + + - name: Upload the CV + image: rclone/rclone:latest + environment: + RCLONE_CONFIG_CONTENT: + from_secret: RCLONE_CONFIG_CONTENT_PRIVATE + RCLONE_CONFIG: /tmp/rclone.conf + commands: + - echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG + - rclone copy -P $DRONE_WORKSPACE/cv badhouseplants-minio:/public-download diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..99de334 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +node_modules +static +content/cv/index.pdf diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..07711c6 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "themes/ananke"] + path = themes/ananke + url = https://github.com/theNewDynamic/gohugo-theme-ananke +[submodule "themes/papermod"] + path = themes/papermod + url = https://github.com/adityatelange/hugo-PaperMod.git diff --git a/.hugo_build.lock b/.hugo_build.lock new file mode 100644 index 0000000..e69de29 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..edaa67c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM alpine:latest AS builder +WORKDIR /src +ARG GOHUGO_LINK=https://github.com/gohugoio/hugo/releases/download/v0.110.0/hugo_0.110.0_linux-amd64.tar.gz +RUN apk update && apk add curl tar +RUN curl -LJO ${GOHUGO_LINK} && tar -xf hugo_0.110.0_linux-amd64.tar.gz +RUN chmod +x /src/hugo + +FROM alpine:latest +WORKDIR /src +COPY --from=builder /src/hugo /usr/bin/hugo +COPY . /src +ENTRYPOINT ["/usr/bin/hugo"] +CMD ["--help"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2bf6164 --- /dev/null +++ b/Makefile @@ -0,0 +1,5 @@ +upload_static: + rclone copy -P static badhouseplants-minio:/badhouseplants-static + +get_static: + rclone copy -P badhouseplants-public:/badhouseplants-static static diff --git a/README.md b/README.md new file mode 100644 index 0000000..b10608b --- /dev/null +++ b/README.md @@ -0,0 +1,4 @@ +# Badhouseplants NET + +## Static content +Storing static content in the repo is painful, because there are massive. That's why for storing them I'm using a S3 bucket that is publicly available for downstream operations diff --git a/archetypes/default.md b/archetypes/default.md new file mode 100644 index 0000000..d669718 --- /dev/null +++ b/archetypes/default.md @@ -0,0 +1,12 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +date: {{ .Date }} +draft: true +ShowToc: true +cover: + image: "cover.png" + caption: "{{ replace .Name "-" " " | title }}" + relative: false + responsiveImages: false +--- + diff --git a/chart/.helmignore b/chart/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/chart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/chart/Chart.yaml b/chart/Chart.yaml new file mode 100644 index 0000000..bd0bc0e --- /dev/null +++ b/chart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: badhouseplants-net +description: A Helm chart for Kubernetes +type: application +version: 0.1.12 +appVersion: "1.16.0" diff --git a/chart/templates/NOTES.txt b/chart/templates/NOTES.txt new file mode 100644 index 0000000..76b24e8 --- /dev/null +++ b/chart/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "badhouseplants-net.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "badhouseplants-net.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "badhouseplants-net.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "badhouseplants-net.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl new file mode 100644 index 0000000..864ef6f --- /dev/null +++ b/chart/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "badhouseplants-net.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "badhouseplants-net.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "badhouseplants-net.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "badhouseplants-net.labels" -}} +helm.sh/chart: {{ include "badhouseplants-net.chart" . }} +{{ include "badhouseplants-net.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "badhouseplants-net.selectorLabels" -}} +app.kubernetes.io/name: {{ include "badhouseplants-net.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "badhouseplants-net.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "badhouseplants-net.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml new file mode 100644 index 0000000..14f79af --- /dev/null +++ b/chart/templates/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "badhouseplants-net.fullname" . }} + labels: + {{- include "badhouseplants-net.labels" . | nindent 4 }} + {{- with .Values.deployAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "badhouseplants-net.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "badhouseplants-net.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + command: +{{ toYaml .Values.command | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/chart/templates/ingress.yaml b/chart/templates/ingress.yaml new file mode 100644 index 0000000..4d8ee58 --- /dev/null +++ b/chart/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "badhouseplants-net.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "badhouseplants-net.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/chart/templates/service.yaml b/chart/templates/service.yaml new file mode 100644 index 0000000..f1d3c10 --- /dev/null +++ b/chart/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "badhouseplants-net.fullname" . }} + labels: + {{- include "badhouseplants-net.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "badhouseplants-net.selectorLabels" . | nindent 4 }} diff --git a/chart/values.yaml b/chart/values.yaml new file mode 100644 index 0000000..f1a588f --- /dev/null +++ b/chart/values.yaml @@ -0,0 +1,73 @@ +replicaCount: 1 + +image: + repository: git.badhouseplants.net/allanger/badhouseplants-net + pullPolicy: Always + tag: latest + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +deployAnnotations: + keel.sh/trigger: poll + keel.sh/policy: 'force' + +podSecurityContext: {} + # fsGroup: 2000 + +command: + - "/bin/sh" + - "-c" + - "hugo server --bind 0.0.0.0 -p 80 -b https://badhouseplants.net/ --appendPort=false" + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: + kubernetes.io/ingress.class: istio + hosts: + - host: badhouseplants.net + paths: + - path: / + pathType: Prefix + tls: + - secretName: badhouseplants-wildcard-tls + hosts: + - badhouseplants.net + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..39cc7e6 --- /dev/null +++ b/config.yaml @@ -0,0 +1,65 @@ +baseURL: 'https://badhouseplants.net/' +languageCode: 'en-us' +title: 'Bad Houseplants' +theme: 'papermod' +menu: + main: + - name: Posts + url: /posts + weight: 10 + - name: Music + url: /music + weight: 11 + - name: Beats + url: /beats + weight: 12 + - name: About + url: /about + weight: 13 + - name: Search + url: /search + weight: 14 +taxonomies: + tag: tags +params: + ShowBreadCrumbs: true + ShowReadingTime: true + ShowPostNavLinks: true + ShowCodeCopyButtons: true + profileMode: + enabled: true + title: "Bad Houseplants" + subtitle: "... by allanger" + imageUrl: "Finish.png" + imageWidth: 150 + imageHeight: 150 + buttons: + - name: Source + url: "https://git.badhouseplants.net/allanger/badhouseplants-net" + - name: My Music + url: "https://funkwhale.badhouseplants.net/library/artists" + socialIcons: + - name: "telegram" + url: "https://t.me/allanger" + - name: "twitter" + url: "https://twitter.com/_allanger" + - name: "mastodon" + url: "https://mastodon.social/@allanger" + - name: github + url: 'https://github.com/allanger' + - name: email + url: 'mailto:allanger@zohomail.com' + ShowShareButtons: true + ShareButtons: ["telegram", "twitter", "reddit", "linkedin"] + env: production + title: Bad Houseplants + description: "...by allanger" + keywords: [Blog, Portfolio] + author: allanger + DateFormat: "January 2, 2006" + defaultTheme: auto +outputs: + home: + - HTML + - RSS + - JSON diff --git a/content/about/_index.md b/content/about/_index.md new file mode 100644 index 0000000..bcf2e1a --- /dev/null +++ b/content/about/_index.md @@ -0,0 +1,47 @@ +--- +title: About +date: 2023-01-24T09:26:52+01:00 +draft: false +--- + +> It was supposed to be just yet another web page with musical releases reviews, but after trying to write something about them, I've found out that I'm not good at it. So it's just a blog where I'm talking about everything that comes to my mind. + +[![Build Status](https://drone.badhouseplants.net/api/badges/allanger/badhouseplants-net/status.svg?ref=refs/heads/main)](https://drone.badhouseplants.net/allanger/badhouseplants-net/latest) + + +### Who am I? + +> If you're hiring, you can find [my CV here]({{< ref "cv" >}} ) + +I'm a musician and a geek, who works full time as a DevOps engineer, whatever it means. Thanks to my job, I know how to run self-hosted services pretty well, and that's helping me achieve my goal of bringing the indie culture everywhere I can. I'm trying to separate myself from global companies as a user as much as it's possible in my daily life. + +Also, I'm a Linux lover, what doesn't really correlate with my will to make music. I hope that one day we will see that developers will see that Linux is a real OS that can be used as a daily driver. And building software for Linux is important just like building for MacOS and Windows. I hope that we will be able to use not only open source solutions working on Linux, but also closed-source proprietary ones. + +### Music, Beats and Arrangements + +## Music + +> I always thought I was a musician +[Check out what I've got](https://funkwhale.badhouseplants.net) + +You can find everything I consider ready enough to be shown on my [FunkWhale](https://funkwhale.badhouseplants.net/library) instance. Also, my music can be found on many streaming services, and yes, I know that it's not a very independent way of doing things, but it's one of many exceptions ๐Ÿ™ƒ. + +All of my beats are waiting for somebody to do something with them. I'm giving them all for donation, so if you happen to like any, just shoot me a message. I can re-arrange and remix them as much as possible. I can mix your tracks, and I really will to do that, it doesn't matter what kind of music it is, I'm ready to work with everything, if I like it *(at least a little bit)*. + +## IT + +> I'm a DevOps after all +[Visit my gitea](https://git.badhouseplants.net) +For I'm a DevOps I'm working a lot with Kubernetes, Containers, Linux, etc. And that's the root of my intention to move to Linux completely. + +I hope I will do my contribution to the world of Linux music production too. I'm hosting my own Gitea instance. There you will be able to find all my code (or almost all of my code). + +If you made it to here, you might think that it's the point of all of this existing. Self-hosted blog, a music streaming service, and git. **This guy is just a fucking geek! ** + +And yes, you're partially right. The main reason it exists is that I'm trying to follow and promote `indie/punk` culture, that is not only applies to arts. And that's going to be covered in my posts, I hope. + +--- + +### If you're still here, + +I'm looking for people with the same mindset as me, to make music or to code together, or anything. So I would be happy to get connections on [Mastodon](https://mastodon.social/@allanger) diff --git a/content/beats/_index.md b/content/beats/_index.md new file mode 100644 index 0000000..a7d50fa --- /dev/null +++ b/content/beats/_index.md @@ -0,0 +1,53 @@ +--- +title: Beats +date: 2023-01-24T09:26:52+01:00 +draft: false +--- +>I don't lease my beats. If you happen to like anything, just shout me a message and we will come to an agreement. And if you decide to use any of my beats you'll be the only one using it (legally). + +--- +### Easy Money +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Phantom Limb +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Ark +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Tremor +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Empty Cubicles +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Body Drop +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Broken Piano +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Dead Wings +{{< rawhtml >}} + +{{< /rawhtml >}} + +### Trapped +{{< rawhtml >}} + +{{< /rawhtml >}} + diff --git a/content/cv/index.md b/content/cv/index.md new file mode 100644 index 0000000..75a230e --- /dev/null +++ b/content/cv/index.md @@ -0,0 +1,97 @@ +--- +title: "Curriculum Vitae (CV)" +date: 2023-02-11T18:29:00+01:00 +draft: false +ShowToc: true +--- +# Nikolai Rodionov + +``` +> Location: Dรผsseldorf, Germany +> Email: allanger@zohomail.com (preffered) +> Phone: 015223284008 +> Github: https://github.com/allanger +``` + +--- + +## About me +

+drawing +

+ +I'm a DevOps engineer (or SRE if you wish) with 5++ years of hands-on experience with a decent amount of tools that are most probably used or going to be used in your company. One of the most important tools that I love working with and want to continue working with, is Kubernetes. At least, while I don't see any better alternative to it. I think that containers themselves are one of coolest inventions in development, and I'm trying to use them as long as it's possible. Also, I believe that every routine must be automated, because routing is a boring job that lets people lose focus and make mistakes. + +I think that there are several things that a good SRE or DevOps engineer must be able to do: +- To build reliable and stable infrastructure +- Keep this infrastructure up-to-date +- Keep all the source and instructions of this infrastructure clean and simple +- Avoid a human factor as long as possible +- And when it's not possible to avoid it, not to be afraid to take responsibility + +Also, I think it's important that before implementing anything, an engineer has understood all the requirements and checked tools that can fulfil them. I often see, how people try to use a tool for its name but not for its functionality, and hence they have to do a lot of additional work and deal with compromises. And if nothing really can fulfil those requirements, you need not be afraid of writing something new *and open-source it*. + +
+ +## Experience + +**Klรถckner-i**: DevOps Engineer +> 01.2022 - until now + +``` +| GCloud - Microsoft Azure +| Linux - Containers - Kubernetes +| Helm - Helmfile +| Percona Mysql - Postgresql +| Bash +| Prometheus - Grafana - Elasticsearch - Kibana +| ArgoCD - Gitlab CI - Github Actions +| Sops +| Ansible +``` +--- + +**Itigris**: DevOps Engineer +> 07.2019 - 12.2021 + +``` +| AWS - Yandex Cloud +| Linux - Containers - Kubernetes +| Helm - Helmfile - Kustomize +| Bash +| Gitlab CI - Drone - ArgoCD +| Postgresql - Redis +| Java - JS - Go +| Ansible - Terraform +| Prometheus - Grafana - Loki - Elasticsearch - Kibana +``` +--- + +**Etersoft**: DevOps Engineer +> 03.2017 - 06.2019 + +``` +| Bare metal - Proxmox - Virtual Box +| Linux - Containers - Networks +| Bash - Perl +| Mysql - Postgresql +| Minio - Ceph +| Gitlab CI +| Ansible +``` + +
+ +## A little bit more about me + +- I love to work with `Kubernetes`, but not with `yaml`. +- I'm a huge fan of [Helmfile](https://github.com/helmfile/helmfile). +- I have written several small cli tools in Rust, that you might find in my [GitHub profile pins](https://github.com/allanger) (they are not perfect, but I'm working on it). +- I'm contributing to [db-operator](https://github.com/kloeckner-i/db-operator). +- I'm trying to automate everything until I'm losing control over something that is automated. +- I love Perl, although I don't even remember how to write code with it, but I would be somehow thrilled to have any ability to work with it in production +- I also think that everything is better in Rust, or at least in Go *(if Bash is not enough)* + +I have a blog (written-as-code) that is deployed to K8s (https://badhouseplants.net/), with the source code stored in a self-hosted Gitea, that is also deployed to K8s alongside the CI/CD system where this blog is being built and published. This CV is also being built as a part of the CI process, and then uploaded to `minio` storage that is also ~~surprisingly~~ running in this cluster. So you can download the latest version of CV here: + +> But I can't guarantee 100% availability because it's a one-node k8s, and sometimes I need to do a maintenance work diff --git a/content/cv/myself.jpeg b/content/cv/myself.jpeg new file mode 100644 index 0000000..25fb28d Binary files /dev/null and b/content/cv/myself.jpeg differ diff --git a/content/music/index.md b/content/music/index.md new file mode 100644 index 0000000..0c265eb --- /dev/null +++ b/content/music/index.md @@ -0,0 +1,49 @@ +--- +title: "Music" +date: 2023-01-31T13:52:43+01:00 +draft: false +ShowToc: true + +--- +Everything that's created by me, can be found on my [funkwhale instance](https://funkwhale.badhouseplants.net). But I'm only uploading `lossy` there. I was trying to upload losseless, but then it either doesn't really work with my Android App, or it's hard to manage. And it needs a way more disk that way. So if you want to listnen to lossless, go to my [Bandcamp](https://allanger.bandcamp.com/). *A lot of tracks are still not there, but they will be there soon*. I also have a [SoundCloud account](https://soundcloud.com/allanger) and I try to publish everything there. + + +--- + +### allanger + +[Spotify](https://open.spotify.com/artist/1VPAs75xrhaXhCIIHsgF02) - [Apple Music](https://music.apple.com/us/artist/allanger/1617855325) - [Deezer](https://www.deezer.com/us/artist/117780712) - [SoundCloud](https://soundcloud.com/allanger) - [Bandcamp](https://allanger.bandcamp.com/) - [Funkwhale](https://funkwhale.badhouseplants.net/library/artists/3/) + +#### Anymore +> In this song, I'm using samples from a YouTube video and so I'm not sure that I can distribute on all platforms. That's why it exists only on SoundCloud and Funkwhale +>![Cover](/music/allanger-Anymore.jpg) +>Release Date: 2018-12-26 +> +>Genre: Indie +> +> Sub Genre: Lo-Fi Indie +[SoundCloud](https://soundcloud.com/allanger/anymore) - [Funkwhale](https://funkwhale.badhouseplants.net/library/albums/11/) + + +### Oveleane + +> It's another project made by me, I just thought that that electronic stuff won't fit well in the allanger's profile, and so decided to separate them. But it's still allanger, you know... + +[Spotify](https://open.spotify.com/artist/2PKE1XvwP82LCacM5q6rCx?si=hJyJWcEgR4mZLkjbCso45A) - [Apple Music](https://music.apple.com/us/artist/oveleane/1654951021) - [Deezer](https://www.deezer.com/us/artist/190392997) + + +#### Four Steps Behind +>![Cover](/music/Oveleane%20-%20Four%20Steps%20Behind.jpg) +>Release Date: 2022-12-05 +> +>Genre: Electronic +> +>Sub Genre: IDM/Experimental + +[Spotify](https://open.spotify.com/album/1RjB1xLoD2JXmWuBjGegCN?si=fIsGrOfoQRaeKu9f-Oh0dw) - [Apple Music](https://music.apple.com/us/album/1654953305) - [Deezer](https://www.deezer.com/us/album/377293977) - [Funkwhale](https://funkwhale.badhouseplants.net/library/albums/1/) + +{{< rawhtml >}} + +{{< /rawhtml >}} + + diff --git a/content/posts/_index.md b/content/posts/_index.md new file mode 100644 index 0000000..e69de29 diff --git a/content/posts/argocd-vs-helmfile-application/cover.png b/content/posts/argocd-vs-helmfile-application/cover.png new file mode 100644 index 0000000..7536770 Binary files /dev/null and b/content/posts/argocd-vs-helmfile-application/cover.png differ diff --git a/content/posts/argocd-vs-helmfile-application/index.md b/content/posts/argocd-vs-helmfile-application/index.md new file mode 100644 index 0000000..11c8ca7 --- /dev/null +++ b/content/posts/argocd-vs-helmfile-application/index.md @@ -0,0 +1,574 @@ +--- +title: "ArgoCD vs Helmfile: Applications" +date: 2023-02-13T12:14:09+01:00 +draft: false +cover: + image: "cover.png" + caption: "ArgoCD" + relative: false + responsiveImages: false +ShowToc: true +--- + +> So as promised in [the previous ArgoCD post]({{< ref "dont-use-argocd-for-infrastructure" >}}), I'll try to show a simple example of Pull Requests for different kinds of setups. This is the first part. Putting everything in the same post seems kind of too much. + +# Intro +I've created three `main` branches and three branches for install two applications. I assume we have two production clusters (If you've read the previous post, you know that by saying 'production', I mean production for SRE team, so they can be dev/stage/whatever for other teams) and one test cluster (the one where SRE team can test anything without affecting other teams) + +You can already check all of them here: + +I've decided to install [Vertical pod autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) to both prod clusters and [goldilocks](https://github.com/FairwindsOps/goldilocks) to only one of them. Therefore, I have to add both to the test-cluster as well. Also, I've promised that I'd implement the CI/CD for all of those solutions, but I think that it's going to be enough just to describe the logic. If you really want to see different implementation of CI/CD, you can shoot me a message, and I will write another post then. + +# Applications (Ann App of Apps) + +So here is the PR for installing applications with `Application` manifests. + + +I've chosen to follow the `App of apps` pattern, because it's including changes that must have been done if you use a "direct" applications installation and `app of apps`. So let's have a look at the main manifests, here you can see the base: + +Initially I thought to use only one "Big Application" manifest for all three clusters, but I found out that it's not so easy when you don't have clusters with exactly the same infrastructure. Even with multi-source apps, you will probably have to use an additional tool for templating/substituting, for example like this: +```YAML +# app-of-apss.yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: app-of-apps + namespace: argo-system +spec: + destination: + namespace: argo-system + server: https://kubernetes.default.svc + project: system + sources: + - path: ./manifests/$CLUSTER + repoURL: git@git.badhouseplants.net:allanger/helmfile-vs-argo.git + targetRevision: argo-apps-main + - path: ./manifests/common + repoURL: git@git.badhouseplants.net:allanger/helmfile-vs-argo.git + targetRevision: argo-apps-main +``` + +and then, in a pipeline do something like this: +```BASH +export CLUSTER=cluster1 +kubectl apply $(envsubst < app-of-apps.yaml) # I haven't tested it out, so this command may no work, but I hope you get the point. +``` + +So it's either additional files, or an additional logic in CI/CD. + +Also, the `helm-freeze` thing. I wanted to vendor charts, because in this example it's required, but my Gitea instance can't preview file changes when there are 9000+ lines of code updated, so I had to remove. + +But logic would be like this +- Manual part: + - Update `helm-freeze.yaml` + - Run `helm-freeze sync` + - Add a new application to the `manifests/$CLUSTER` dir + - Push +- CI/CD + - Since it needs to be `GitOps`, you need to check that charts in the `vendor` dir are up-to-date with `helm-freeze.yaml`. *Because if you updated helm-freeze and forgot to execute `helm-freeze sync`, you will have a contradiction between actual and desired states. That's one of the reasons, why I don't like this kind of vendoring. Either it's an addition step in CI, that is verifying that the manual step was done, or it's an additional work for reviewer. You also can add an action that is going to execute it withing the pipeline and push to your branch, but I'm completely against it. (something for another post maybe)* + + - Then depending on a branch: + - If not `main` + > Then you need to run `argocd diff` for production clusters, and deploy changes to the test clusters, so it's something like + - If `main` + > Deploy to all clusters + +So let's try to do it + +So we create a first `app-of-apps` manifests +```YAML +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: app-of-apps + namespace: argo-system +spec: + destination: + namespace: argo-system + server: https://kubernetes.default.svc + project: default + source: + path: ./manifests/cluster2/ + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-updated + +``` + +Then we need to create apps + +```YAML +# ./manifests/cluster2/vpa.yaml +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: vpa + namespace: argo-system +spec: + destination: + namespace: vpa-system + server: https://kubernetes.default.svc + project: default + source: + helm: + releaseName: vpa + valueFiles: + - ../../values/vpa.common.yaml + path: ./vendor/vpa + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-updated + +``` + +Here we have different options. + +- Sync everything automatically (app-of-apps and applications), but it doesn't look too fail-safe to me. And also we can't know diff then, because what's different will be applied immediately. So it's ๐Ÿ‘Ž +- Sync automatically only the `app-of-apps`, and then sync applications with the `argocd` cli. It sounds better, because then we can run diff on applications and know the difference between a wished state and a real state, so it's closer to ๐Ÿ‘ +- Sync applications automatically, but app-of-apps with cli. Doesn't sound to bad, does it? Maybe not that flexible as the previous option, but still not too bad. So it's closer to ๐Ÿ‘ too. +- Sync everything with cli. I would say it will give you the best control, but will become additional steps in the pipeline. Now I don't think it's a hard thing to implement, so let's say "closer to ๐Ÿ‘ too". + +I don't consider the **first** option a reliable one, so I wouldn't even talk about it. You can try, of course, but your changes won't be visible unless they are deployed. So it's like the "test on production" thing. + +The **second**, let's have a look. Let's try adding some values to the `vpa` release, and install Goldilocks (assuming it wasn't installed). + +VPA values: +```YAML +# ./values/vpa.common.yaml +# I've just changes `false` to `true` +updater: + enabled: true # <- here +``` + +Goldilocks app: +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goldilocks + namespace: argo-system +spec: + destination: + namespace: vpa-system + server: https://kubernetes.default.svc + project: default + source: + helm: + releaseName: goldilocks + path: ./vendor/goldilocks + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-updated + +``` + +And I pushed to repo. + +So now let see what I've got in UI: +![Changes in UI](/argocd-vs-helmfile/update-in-ui.png) + +This is how `diffs` for VPA look in the UI: +![Diff in UI](/argocd-vs-helmfile/diff-in-ui.png) + +{{< details "Here you can find all the diffs from the UI as text" >}} + +```diff ++ apiVersion: apps/v1 ++ kind: Deployment ++ metadata: ++ labels: ++ app.kubernetes.io/component: updater ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/version: 0.11.0 ++ argocd.argoproj.io/instance: vpa ++ helm.sh/chart: vpa-1.6.0 ++ name: vpa-updater ++ namespace: vpa-system ++ spec: ++ replicas: 1 ++ selector: ++ matchLabels: ++ app.kubernetes.io/component: updater ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/name: vpa ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/component: updater ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/name: vpa ++ spec: ++ containers: ++ - env: ++ - name: NAMESPACE ++ valueFrom: ++ fieldRef: ++ fieldPath: metadata.namespace ++ image: 'k8s.gcr.io/autoscaling/vpa-updater:0.11.0' ++ imagePullPolicy: Always ++ livenessProbe: ++ failureThreshold: 6 ++ httpGet: ++ path: /health-check ++ port: metrics ++ scheme: HTTP ++ periodSeconds: 5 ++ successThreshold: 1 ++ timeoutSeconds: 3 ++ name: vpa ++ ports: ++ - containerPort: 8943 ++ name: metrics ++ protocol: TCP ++ readinessProbe: ++ failureThreshold: 120 ++ httpGet: ++ path: /health-check ++ port: metrics ++ scheme: HTTP ++ periodSeconds: 5 ++ successThreshold: 1 ++ timeoutSeconds: 3 ++ resources: ++ limits: ++ cpu: 200m ++ memory: 1000Mi ++ requests: ++ cpu: 50m ++ memory: 500Mi ++ securityContext: {} ++ securityContext: ++ runAsNonRoot: true ++ runAsUser: 65534 ++ serviceAccountName: vpa-updater +``` + +```DIFF +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"labels":{"argocd.argoproj.io/instance":"vpa"},"name":"vpa-actor"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"vpa-actor"},"subjects":[{"kind":"ServiceAccount","name":"vpa-recommender","namespace":"vpa-system"}]} + labels: + argocd.argoproj.io/instance: vpa + managedFields: + - apiVersion: rbac.authorization.k8s.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:labels': + .: {} + 'f:argocd.argoproj.io/instance': {} + 'f:roleRef': {} + 'f:subjects': {} + manager: argocd-application-controller + operation: Update + time: '2023-02-13T20:58:02Z' + - apiVersion: rbac.authorization.k8s.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + manager: argocd-controller + operation: Update + time: '2023-02-13T20:58:02Z' + name: vpa-actor + resourceVersion: '34857' + uid: 71958267-68b4-4923-b2bb-eaf7b3c1a992 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vpa-actor +subjects: + - kind: ServiceAccount + name: vpa-recommender + namespace: vpa-system ++ - kind: ServiceAccount ++ name: vpa-updater ++ namespace: vpa-system +``` +```DIFF ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ labels: ++ argocd.argoproj.io/instance: vpa ++ name: vpa-evictionter-binding ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-evictioner ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-updater ++ namespace: vpa-system +``` +```DIFF ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ labels: ++ argocd.argoproj.io/instance: vpa ++ name: vpa-status-reader-binding ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-status-reader ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-updater ++ namespace: vpa-system +``` +```DIFF +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: > + {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"labels":{"argocd.argoproj.io/instance":"vpa"},"name":"vpa-target-reader-binding"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"vpa-target-reader"},"subjects":[{"kind":"ServiceAccount","name":"vpa-recommender","namespace":"vpa-system"}]} + labels: + argocd.argoproj.io/instance: vpa + managedFields: + - apiVersion: rbac.authorization.k8s.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:labels': + .: {} + 'f:argocd.argoproj.io/instance': {} + 'f:roleRef': {} + 'f:subjects': {} + manager: argocd-application-controller + operation: Update + time: '2023-02-13T20:58:02Z' + - apiVersion: rbac.authorization.k8s.io/v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:annotations': + .: {} + 'f:kubectl.kubernetes.io/last-applied-configuration': {} + manager: argocd-controller + operation: Update + time: '2023-02-13T20:58:02Z' + name: vpa-target-reader-binding + resourceVersion: '34855' + uid: 30261740-ad5d-4cd9-b043-0ff18daaf3aa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vpa-target-reader +subjects: + - kind: ServiceAccount + name: vpa-recommender + namespace: vpa-system ++ - kind: ServiceAccount ++ name: vpa-updater ++ namespace: vpa-system +``` +{{< /details >}} + +And for Goldilocks +![Goldilocks Application](/argocd-vs-helmfile/goldilocks-ui.png) + +All the diffs are also there, and they look good. + +But to seem them I had to push to the target branch. And we want to see changes without pushing. +```YAML +# main +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: app-of-apps + namespace: argo-system +spec: + destination: + namespace: argo-system + server: https://kubernetes.default.svc + project: default + source: + path: ./manifests/cluster2/ + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-main + +``` + +Then we need to create apps + +```YAML +# ./manifests/cluster2/vpa.yaml +# feature branch +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: vpa + namespace: argo-system +spec: + destination: + namespace: vpa-system + server: https://kubernetes.default.svc + project: default + source: + helm: + releaseName: vpa + valueFiles: + - ../../values/vpa.common.yaml + path: ./vendor/vpa + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-main +``` + +![App of apps in the `main`](/argocd-vs-helmfile/app-of-apps-main.png) + +So currently app of apps doesn't know about what's happening in my new branch. And so I can't just do `argocd app vpa diff`. So what should I do? +```BASH +argocd app diff --help +... +Usage: + argocd app diff APPNAME [flags] +... +``` + +That means that I can't use it for those new apps that exist inly in my branch, because I need to pass an App name, and since it's not installed yet, I have something like +```BASH +argocd app diff vpa +FATA[0000] rpc error: code = NotFound desc = error getting application: applications.argoproj.io "vpa" not found +``` + +There is a `--local` option, but it still requires a name ~~(why if there is a name in manfiests ๐Ÿ™ƒ๐Ÿ™ƒ๐Ÿ™ƒ)~~ +```BASH +# Just testing out +argocd app diff vpa --local ./manifests/cluster2/ +FATA[0000] rpc error: code = NotFound desc = error getting application: applications.argoproj.io "vpa" not found # ๐Ÿคช +``` + +Ok, then we can check the app-of-apps +```BASH +argocd app diff app-of-apps --local ./cluster-1.yaml +Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.FATA[0000] error while parsing source parameters: stat cluster-1.yaml/.argocd-source.yaml: not a directory + +argocd app diff app-of-apps --local ./cluster-1.yaml --server-side-generate +FATA[0000] rpc error: code = Unknown desc = failed to get app path: ./manifests/cluster2/: app path does not exist + +argocd app diff app-of-apps --local ./cluster-2.yaml --server-side-generate --loglevel debug +FATA[0000] rpc error: code = Unknown desc = failed to get app path: ./manifests/cluster2/: app path does not exist +# I can't get it, maybe anybody could tell me what I'm doing wrong? + + +argocd app diff app-of-apps --local ./cluster-2.yaml +Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.FATA[0000] error while parsing source parameters: stat cluster-2.yaml/.argocd-source.yaml: not a directory + + +mkdir /tmp/argo-test +cp cluster-2.yaml /tmp/argo-test +argocd app diff app-of-apps --local /tmp/argo-test --loglevel debug + +Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7. +===== argoproj.io/Application /app-of-apps ====== +0a1,15 +> apiVersion: argoproj.io/v1alpha1 +> kind: Application +> metadata: +> labels: +> argocd.argoproj.io/instance: app-of-apps +> name: app-of-apps +> spec: +> destination: +> namespace: argo-system +> server: https://kubernetes.default.svc +> project: default +> source: +> path: manifests/cluster2/ +> repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git +> targetRevision: argo-apps-main + +# If i change a branch for the app of apps target to the current one + +cat cluster-2.yaml +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: app-of-apps + namespace: argo-system +spec: + destination: + namespace: argo-system + server: https://kubernetes.default.svc + project: default + source: + path: ./manifests/cluster2/ + repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git + targetRevision: argo-apps-updated + +kuvectl apply -f cluster-2.yaml +cp cluster-2.yaml /tmp/argo-test +argocd app diff app-of-apps --local /tmp/argo-test --loglevel debug +Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7. +===== argoproj.io/Application /app-of-apps ====== +0a1,15 +> apiVersion: argoproj.io/v1alpha1 +> kind: Application +> metadata: +> labels: +> argocd.argoproj.io/instance: app-of-apps +> name: app-of-apps +> spec: +> destination: +> namespace: argo-system +> server: https://kubernetes.default.svc +> project: default +> source: +> path: ./manifests/cluster2/ +> repoURL: ssh://git@git.badhouseplants.net/allanger/helmfile-vs-argo.git +> targetRevision: argo-apps-updated +``` + +I don't really understand what it means. *Most probably, I'm just stupid.* But what I see is that it's not working with ` --server-side-generate ` with an error, that I can't really understand. And is saying that I shouldn't use it without the flag, because that way of running it is deprecated. And even without the flag, it's giving me a strange output, that I don't know how to use it. + +So as I see, to have a proper diff, you need to apply. But it doesn't look like a fail-safe and scalable way to use. + +I told that we can check different options for syncing, but as I see now, other workflows won't give me a better overview about what's happening. So I don't think it makes a lot of sense. If I find a way to see a proper diff without applying manifests first, I would go back to this topic and write one more post. + +## Maybe it's because an App of Apps layer + +Let's try installing apps directly. Remove an app-of-apps from k8s. And let's use manifests from `/manifests/cluster2/` directly. As I see, diffing won't work anyway for applications that are not installed yet. So you can check ones that are already installed, but I couldn't make it work too. I was changing values to check if they are shown, but they weren't. *Again, I could simply screw up, and if you have a positive experience with that, don't hesitate to let me know about it, I'm willing to change my mind* + +## Conclusion +So you can check the PR here: + +I like that `values` can be handled as normal values files. (But for handling secrets you might have to add a [CMP](https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/), that means an additional work and maintenance) But even if adding CMP is fine, I couldn't get proper `diffs` for my changes, that means that I can't see what's happening without applying manifests. And applying manifests will mean that other team members will not be work on other tickets withing the same scope, so it looks like a bottleneck to me. + +But I don't like that you need to add a lot of manifests to manage all the applications. We have only 2 manifests that are copied from folder to folder. So we have a lot of repeating code. And repeating code is never good. So I would write a tool that can let you choose applications from the list of all applications and choose clusters where they need to be deployed. So the config looks like this: +```YAML +app_path: ./manifests/common +clusters: + - cluster: cluster1 + applications: + - vpa + - cluster: cluster2 + applications: + - vpa + - goldilocks + - cluster: cluster3 + applications: + - vpa + - goldilocks +``` +But I think that with the whole GitOps pulling concept it will be a hard thing to implement. And in the end it looks like helmfile, so ... ๐Ÿคทโ€โ™€๏ธ๐Ÿคทโ€โ™€๏ธ๐Ÿคทโ€โ™€๏ธ + +I can only say, that I see no profit in using argo like this. It only seems like either a very complicated setup (most probably you will be able to implement anything you need, the question is, how much time will you spend with that), or a ~~crippled~~ not complete setup. + +And if you compare an amount of lines that area updadated to install these apps as `Applications` to the helmfile stuff, it's going to be ~100 vs ~30. And that's what I also don't like. + +In the next post I will try doing the same with `ApplicationSets`, and we'll see, if it looks better or not. + +Thanks, + +Oi! diff --git a/content/posts/argocd-vs-helmfile-applicationset/cover.png b/content/posts/argocd-vs-helmfile-applicationset/cover.png new file mode 100644 index 0000000..d242e3a Binary files /dev/null and b/content/posts/argocd-vs-helmfile-applicationset/cover.png differ diff --git a/content/posts/argocd-vs-helmfile-applicationset/index.md b/content/posts/argocd-vs-helmfile-applicationset/index.md new file mode 100644 index 0000000..fcd0ff9 --- /dev/null +++ b/content/posts/argocd-vs-helmfile-applicationset/index.md @@ -0,0 +1,240 @@ +--- +title: "ArgoCD vs Helmfile: ApplicationSet" +date: 2023-02-15T10:14:09+01:00 +draft: false +cover: + image: "cover.png" + caption: "ArgoCD" + relative: false + responsiveImages: false +ShowToc: true +--- + +This is a second post about *"argocding"* your infrastructure. [First can be found here]({{< ref "argocd-vs-helmfile-application" >}}). + +There I've tried using `Applications` for deploying. Here I will try to show an example with `ApplicationSets`. As in the previous article, I will be installing [VPA](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) and [Goldilocks](https://github.com/FairwindsOps/goldilocks) + +So let's prepare a base. We have 3 clusters: +- cluster-1 +- cluster-2 +- cluster-3 + +> With `ApplicationSets` you have an incredible amount of ways to deploy stuff. So what I'm doing may look super-different from what you would do + +I'm creating 3 manifests, one for each cluster. +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: helm-releases + namespace: argo-system +spec: + syncPolicy: + preserveResourcesOnDeletion: true + generators: + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "cluster2/*" + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "common/*" + template: + metadata: + name: "{{ argo.application }}" + namespace: argo-system + spec: + project: "{{ argo.project }}" + source: + helm: + valueFiles: + - values.yaml + values: |- + {{ values }} + repoURL: "{{ chart.repo }}" + targetRevision: "{{ chart.version }}" + chart: "{{ chart.name }}" + destination: + server: "{{ argo.cluster }}" + namespace: "{{ argo.namespace }}" + +``` + + +Manifests with a setup like thos have only one values that is really different, so we could create just one manifest that would look like that: +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: helm-releases + namespace: argo-system +spec: + syncPolicy: + preserveResourcesOnDeletion: true + generators: + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "$CLUSTER/*" + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "common/*" + template: + metadata: + name: "{{ argo.application }}" + namespace: argo-system + spec: + project: "{{ argo.project }}" + source: + helm: + valueFiles: + - values.yaml + values: |- + {{ values }} + repoURL: "{{ chart.repo }}" + targetRevision: "{{ chart.version }}" + chart: "{{ chart.name }}" + destination: + server: "{{ argo.cluster }}" + namespace: "{{ argo.namespace }}" + +``` + +And add a step in the `CI` pipeline, where we're substituting a correct value instead of the variable. But since I'm not really implementing a CI, I will create 3 manifests. + +Then I need to add `generators` in the feature branch: +```YAML +#/common/vpa.yaml +--- +argo: + cluster: https://kubernetes.default.svc + application: vpa + project: default + namespace: vpa-system +chart: + version: 1.6.0 + name: vpa + repo: https://charts.fairwinds.com/stable +values: | + updater: + enabled: false +``` +```YAML +#/cluster2/goldilocks.yaml +--- +argo: + cluster: https://kubernetes.default.svc + application: goldilocks + project: default + namespace: vpa-system +chart: + version: 6.5.0 + name: goldilocks + repo: https://charts.fairwinds.com/stable +values: | +``` + +And the main problem here is that values are passed as a string. So you can't separate them into different files, use secrets or share common values. That can be solved with multi-source apps that came with ArgoCD 2.6, but I can't say that they are production-ready yet. Also, I've read that `ApplicationSets` can be used to separate values and charts, but it seemed a way too complicated to me back then, and I think that with ArgoCD 2.7 this problem will be completely solved, so I'm not sure that it makes sense to check that approach now. + +Next thing is that Git generators are pointed to a specific branch, so I have two problems. How to test changes on the `cluster-test` and how to view diffs. + +### Test changes +This problem is solvable, I will show on a cluster-2 example, because I don't have 3 clusters running locally, but this logic should apply to the test cluster. + + +After you add new generators files, you need to deploy them to the `test cluster`, and you also need not override what's being tested by other team-members. So the best option that I currently see, is to get an `ApplicationSet` manifest that is already deployed to `k8s` and add new generators to it. So it looks like this: +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: helm-releases + namespace: argo-system +spec: + syncPolicy: + preserveResourcesOnDeletion: true + generators: + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "cluster2/*" + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-main + files: + - path: "common/*" + # This should be added within CI and removed once a the branch is merged + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-updated + files: + - path: "common/*" + - git: + repoURL: https://git.badhouseplants.net/allanger/helmfile-vs-argo.git + revision: argo-applicationset-updated + files: + - path: "cluster2/*" + template: + metadata: + name: "{{ argo.application }}" + namespace: argo-system + spec: + project: "{{ argo.project }}" + source: + helm: + valueFiles: + - values.yaml + values: |- + {{ values }} + repoURL: "{{ chart.repo }}" + targetRevision: "{{ chart.version }}" + chart: "{{ chart.name }}" + destination: + server: "{{ argo.cluster }}" + namespace: "{{ argo.namespace }}" + +``` + +After applying this change, this what I've got +![ApplicationSet](/argocd-vs-helmfile/applicationset-test.png) + +Those applications should be deployed automatically within a pipeline. So steps in your pipeline would look like that: +- Get current `ApplicationSet` manifest from Kubernetes +- Add new generators +- Sync applications with argocd cli + +But I'm not sure what going to happen if you have two different pipelines at the same time. Probably, changes will be overwriten but the pipeline that is a little bit slower. But I think that it can be solved without a lot of additional problems. And also I don't think that it's a situation that you will have to face very often, so you can just rerun your pipeline after all. + +### Diffs +Diffs are not supported for `ApplicationSets` at the moment, and I'm not sure when they will be: + +~~And with the diffing situation from the previous article, I think that they will not work the way I'd like them to work.~~ + +But I think that the easiest way to deal with them right now, would be adding `git generators` not only to a test cluster, but to all clusters, add to those applications an additional label (e.g. `test: true`), and sync only those applications that don't have this label. So the whole pipeline for branch would look like: + +Feature branch +- Get current `ApplicationSet` manifests from Kubernetes (each cluster) +- Add new generators (each cluster) +- Sync applications with argocd cli (only test cluster) +Main branch (merged) +- Get current `ApplicationSet` manifests from Kubernetes (each cluster) +- Remove obsolete generators (each cluster) +- Sync applications with argocd cli (each cluster and filter by label not to sync those, that are not merged yet) + +> But I'm not sure exactly how to manage these `test` labels. They can be added manually to generators files, but then you can't be sure that one won't forget to do it, so I think that, if possible, they should be added to generators inside an `ApplicationSet` manifest, or added to applications right after they were created by an `ApplicationSet`, but the second way is not the best, because if the `main` pipeline is faster than feature's one, you will have it installed in a production cluster. + + +## Conclusion +I like this way a way more than simple `Applications`, especially with multi-source applications. I think that the main problem with this approach are complicated CI/CD pipelines. And I don't like that for diffing you need to have something added to prod clusters. Diff must be safe, and if you add 1000 generator files and push them, you will have 1000 new applications in you ArgoCD. I'm not sure how it's going to handle it. And since ArgoCD is something that is managing your whole infrastructure, I bet, you want it to work like a charm, you don't want to doubt how it's going to survive situations like this. + +Amount of changes is not big, pretty close to helmfile, I'd say. And the more common stuff you have, the less you need to copy-paste. You can see the PR here: + +Thanks, + +Oi! diff --git a/content/posts/argocd-vs-helmfile-helmfile/index.md b/content/posts/argocd-vs-helmfile-helmfile/index.md new file mode 100644 index 0000000..9272049 --- /dev/null +++ b/content/posts/argocd-vs-helmfile-helmfile/index.md @@ -0,0 +1,1585 @@ +--- +title: "Argocd vs Helmfile: Helmfile" +date: 2023-02-17T12:48:51+01:00 +draft: false +ShowToc: false +--- + +In two previous posts I've described how it's possible to install a couple of applications with [`Applications`]({{< ref "argocd-vs-helmfile-application" >}}) and [`ApplicationSets`]({{< ref "argocd-vs-helmfile-applicationset" >}}), and this one is the last in a row. And here I'm going to install the same applications (`VPA` and `Goldilocks`) with helmfile, and I will tell why I think that it's better than `ArgoCD` + +So let's start. Here you can find the [initial config](https://git.badhouseplants.net/allanger/helmfile-vs-argo/src/branch/helmfile-main). Let's see what we got here: + +The main file is `/helmfile.yaml` +```YAML +--- +{{ readFile "releases.yaml" }} + +bases: + - environments.yaml + - repositories.yaml + +releases: + +helmfiles: + - path: {{.Environment.Name }}/helmfile.yaml +``` + +You can see several imports here, let's check them one by one: + + + +```YAML +# releases.yaml <- here we will define all the charts that are going to be used by helmfile. It's a templating layer, so we don't have to copy-paste a lot of yaml stuff +--- +templates: +# It's supposed to be empty when nothing is installed, but I've decided to show that helmfile also can be used to manage CRDs. With this hooks, you'll be able to install CRDs with helmfile, see diffs when updating and update them while updating releases. But I need to say that I haven't checked how it's wotking on big systems, so consider this thing experimental + crd-management-hook: &crd-management-hook + hooks: + - events: ["preapply"] + showlogs: true + command: "sh" + args: + - -c + - "helm show crds {{ .Release.Chart }} --version {{ .Release.Version }}| kubectl apply -f -" + - events: ["prepare"] + showlogs: true + command: "sh" + args: + - -c + - "helm show crds {{ .Release.Chart }} --version {{ .Release.Version }} | kubectl diff -f - || true" + - events: ["postuninstall"] + showlogs: true + command: "sh" + args: + - -c + - "helm show crds {{ .Release.Chart }} --version {{ .Release.Version }} | kubectl delete -f -" + +# environments.yaml +--- +# Here you can define all you clusters so you can sync helmfiles using the `-e` flag +# helmfile -e cluster_test sync +environments: + cluster_test: + kubeContext: kind-kind + cluster1: + kubeContext: kind-kind + cluster2: + kubeContext: kind-kind + +# repositories.yaml +--- +# it's empty now, because we have nothing installed yet. But here we will add all the helm repos to avoid copy-paste again. +repositories: +``` + +Also there is a more complicated and less obvious import: +```YAML +helmfiles: + - path: {{.Environment.Name }}/helmfile.yaml +``` + +It's going to import helmfiles that are not used across all clusters. So if you want to install something to `cluster-2` accross, you will add it to the `/cluster2/helmfile.yaml` and sync the main helmfile. I will show an example later. + +So we're all set and ready to begin installing new stuff. + +- Add a new repo to `repositories.yaml` +```YAML +repositories: + - name: fairwinds-stable + url: https://charts.fairwinds.com/stable +``` +- Add new charts to `releases.yaml` +```YAML +releases: + vpa: &vpa + name: vpa + chart: fairwinds-stable/vpa + version: 1.6.0 + values: + - common/{{ .Release.Name }}/values.yaml + inherit: + template: crd-management-hook # <- Here we are using the crd-management-hook, so VPA CRDs are managed outside of helm itself + goldilocks: &goldilocks + name: goldilocks + chart: fairwinds-stable/goldilocks + version: 6.5.0 +``` + +And now we only need to say to which clusters we want to install those releases. So to the main `helmfile.yaml` I'm adding this: +```YAML +releases: + - <<: *vpa + installed: true + namespace: vpa-system + createNamespace: true +``` + +And to `cluster2/helmfile.yaml` and `cluster_test/helmfile.yaml` I'm adding: +```YAML +releases: + - <<: *goldilocks + installed: true + namespace: vpa-system +``` + +Let's see how the CI could be implemented with this setup. + +- If branch is not `main` + - `helmfile -e cluster_test apply` + - `helmfile -e cluster1 diff` + - `helmfile -e cluster2 diff` +- If branch is `main` + - `helmfile -e cluster_test apply` + - `helmfile -e cluster1 apply` + - `helmfile -e cluster2 apply` + + +{{< details "Diffs will look like that:" >}} +```YAML +Comparing release=goldilocks, chart=fairwinds-stable/goldilocks +******************** + + Release was not present in Helm. Diff will show entire contents as new. + +******************** +vpa-system, goldilocks-controller, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: goldilocks/templates/controller-clusterrole.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: goldilocks-controller ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: controller ++ rules: ++ - apiGroups: ++ - 'apps' ++ resources: ++ - '*' ++ verbs: ++ - 'get' ++ - 'list' ++ - 'watch' ++ - apiGroups: ++ - '' ++ resources: ++ - 'namespaces' ++ - 'pods' ++ verbs: ++ - 'get' ++ - 'list' ++ - 'watch' ++ - apiGroups: ++ - 'autoscaling.k8s.io' ++ resources: ++ - 'verticalpodautoscalers' ++ verbs: ++ - 'get' ++ - 'list' ++ - 'create' ++ - 'delete' ++ - 'update' ++ - apiGroups: ++ - 'argoproj.io' ++ resources: ++ - rollouts ++ verbs: ++ - 'get' ++ - 'list' ++ - 'watch' +vpa-system, goldilocks-controller, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: goldilocks/templates/controller-clusterrolebinding.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: goldilocks-controller ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: controller ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: goldilocks-controller ++ subjects: ++ - kind: ServiceAccount ++ name: goldilocks-controller ++ namespace: vpa-system +vpa-system, goldilocks-controller, Deployment (apps) has been added: +- ++ # Source: goldilocks/templates/controller-deployment.yaml ++ apiVersion: apps/v1 ++ kind: Deployment ++ metadata: ++ name: goldilocks-controller ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: controller ++ spec: ++ replicas: 1 ++ selector: ++ matchLabels: ++ app.kubernetes.io/name: goldilocks ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/component: controller ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/name: goldilocks ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/component: controller ++ spec: ++ serviceAccountName: goldilocks-controller ++ securityContext: ++ {} ++ containers: ++ - name: goldilocks ++ image: "us-docker.pkg.dev/fairwinds-ops/oss/goldilocks:v4.6.2" ++ imagePullPolicy: Always ++ command: ++ - /goldilocks ++ - controller ++ - -v2 ++ securityContext: ++ allowPrivilegeEscalation: false ++ capabilities: ++ drop: ++ - ALL ++ readOnlyRootFilesystem: true ++ runAsNonRoot: true ++ runAsUser: 10324 ++ resources: ++ limits: ++ cpu: 25m ++ memory: 32Mi ++ requests: ++ cpu: 25m ++ memory: 32Mi +vpa-system, goldilocks-controller, ServiceAccount (v1) has been added: +- ++ # Source: goldilocks/templates/controller-serviceaccount.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ metadata: ++ name: goldilocks-controller ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: controller +vpa-system, goldilocks-dashboard, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: goldilocks/templates/dashboard-clusterrole.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: goldilocks-dashboard ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: dashboard ++ rules: ++ - apiGroups: ++ - 'autoscaling.k8s.io' ++ resources: ++ - 'verticalpodautoscalers' ++ verbs: ++ - 'get' ++ - 'list' ++ - apiGroups: ++ - 'apps' ++ resources: ++ - '*' ++ verbs: ++ - 'get' ++ - 'list' ++ - apiGroups: ++ - '' ++ resources: ++ - 'namespaces' ++ - 'pods' ++ verbs: ++ - 'get' ++ - 'list' ++ - apiGroups: ++ - 'argoproj.io' ++ resources: ++ - rollouts ++ verbs: ++ - 'get' ++ - 'list' +vpa-system, goldilocks-dashboard, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: goldilocks/templates/dashboard-clusterrolebinding.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: goldilocks-dashboard ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: dashboard ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: goldilocks-dashboard ++ subjects: ++ - kind: ServiceAccount ++ name: goldilocks-dashboard ++ namespace: vpa-system +vpa-system, goldilocks-dashboard, Deployment (apps) has been added: +- ++ # Source: goldilocks/templates/dashboard-deployment.yaml ++ apiVersion: apps/v1 ++ kind: Deployment ++ metadata: ++ name: goldilocks-dashboard ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: dashboard ++ spec: ++ replicas: 2 ++ selector: ++ matchLabels: ++ app.kubernetes.io/name: goldilocks ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/component: dashboard ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/name: goldilocks ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/component: dashboard ++ spec: ++ serviceAccountName: goldilocks-dashboard ++ securityContext: ++ {} ++ containers: ++ - name: goldilocks ++ image: "us-docker.pkg.dev/fairwinds-ops/oss/goldilocks:v4.6.2" ++ imagePullPolicy: Always ++ command: ++ - /goldilocks ++ - dashboard ++ - --exclude-containers=linkerd-proxy,istio-proxy ++ - -v2 ++ securityContext: ++ allowPrivilegeEscalation: false ++ capabilities: ++ drop: ++ - ALL ++ readOnlyRootFilesystem: true ++ runAsNonRoot: true ++ runAsUser: 10324 ++ ports: ++ - name: http ++ containerPort: 8080 ++ protocol: TCP ++ livenessProbe: ++ httpGet: ++ path: /health ++ port: http ++ readinessProbe: ++ httpGet: ++ path: /health ++ port: http ++ resources: ++ limits: ++ cpu: 25m ++ memory: 32Mi ++ requests: ++ cpu: 25m ++ memory: 32Mi +vpa-system, goldilocks-dashboard, Service (v1) has been added: +- ++ # Source: goldilocks/templates/dashboard-service.yaml ++ apiVersion: v1 ++ kind: Service ++ metadata: ++ name: goldilocks-dashboard ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: dashboard ++ spec: ++ type: ClusterIP ++ ports: ++ - port: 80 ++ targetPort: http ++ protocol: TCP ++ name: http ++ selector: ++ app.kubernetes.io/name: goldilocks ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/component: dashboard +vpa-system, goldilocks-dashboard, ServiceAccount (v1) has been added: +- ++ # Source: goldilocks/templates/dashboard-serviceaccount.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ metadata: ++ name: goldilocks-dashboard ++ labels: ++ app.kubernetes.io/name: goldilocks ++ helm.sh/chart: goldilocks-6.5.0 ++ app.kubernetes.io/instance: goldilocks ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: dashboard + + +hook[prepare] logs | diff -u -N /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io +hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io 2023-02-17 13:15:29 +hook[prepare] logs | +++ /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io 2023-02-17 13:15:29 +hook[prepare] logs | @@ -0,0 +1,216 @@ +hook[prepare] logs | +apiVersion: apiextensions.k8s.io/v1 +hook[prepare] logs | +kind: CustomResourceDefinition +hook[prepare] logs | +metadata: +hook[prepare] logs | + annotations: +hook[prepare] logs | + api-approved.kubernetes.io: https://github.com/kubernetes/kubernetes/pull/63797 +hook[prepare] logs | + controller-gen.kubebuilder.io/version: v0.4.0 +hook[prepare] logs | + creationTimestamp: "2023-02-17T12:15:29Z" +hook[prepare] logs | + generation: 1 +hook[prepare] logs | + name: verticalpodautoscalercheckpoints.autoscaling.k8s.io +hook[prepare] logs | + uid: bd3d2d46-b0c1-48c4-bc72-968cebfd7640 +hook[prepare] logs | +spec: +hook[prepare] logs | + conversion: +hook[prepare] logs | + strategy: None +hook[prepare] logs | + group: autoscaling.k8s.io +hook[prepare] logs | + names: +hook[prepare] logs | + kind: VerticalPodAutoscalerCheckpoint +hook[prepare] logs | + listKind: VerticalPodAutoscalerCheckpointList +hook[prepare] logs | + plural: verticalpodautoscalercheckpoints +hook[prepare] logs | + shortNames: +hook[prepare] logs | + - vpacheckpoint +hook[prepare] logs | + singular: verticalpodautoscalercheckpoint +hook[prepare] logs | + scope: Namespaced +hook[prepare] logs | + versions: +hook[prepare] logs | + - name: v1 +hook[prepare] logs | + schema: +hook[prepare] logs | + openAPIV3Schema: +hook[prepare] logs | + description: VerticalPodAutoscalerCheckpoint is the checkpoint of the internal +hook[prepare] logs | + state of VPA that is used for recovery after recommender's restart. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: 'APIVersion defines the versioned schema of this representation +hook[prepare] logs | + of an object. Servers should convert recognized schemas to the latest +hook[prepare] logs | + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind is a string value representing the REST resource this +hook[prepare] logs | + object represents. Servers may infer this from the endpoint the client +hook[prepare] logs | + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' +hook[prepare] logs | + type: string +hook[prepare] logs | + metadata: +hook[prepare] logs | + type: object +hook[prepare] logs | + spec: +hook[prepare] logs | + description: 'Specification of the checkpoint. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.' +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the checkpointed container. +hook[prepare] logs | + type: string +hook[prepare] logs | + vpaObjectName: +hook[prepare] logs | + description: Name of the VPA object that stored VerticalPodAutoscalerCheckpoint +hook[prepare] logs | + object. +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + status: +hook[prepare] logs | + description: Data of the checkpoint. +hook[prepare] logs | + properties: +hook[prepare] logs | + cpuHistogram: +hook[prepare] logs | + description: Checkpoint of histogram for consumption of CPU. +hook[prepare] logs | + properties: +hook[prepare] logs | + bucketWeights: +hook[prepare] logs | + description: Map from bucket index to bucket weight. +hook[prepare] logs | + type: object +hook[prepare] logs | + x-kubernetes-preserve-unknown-fields: true +hook[prepare] logs | + referenceTimestamp: +hook[prepare] logs | + description: Reference timestamp for samples collected within +hook[prepare] logs | + this histogram. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + totalWeight: +hook[prepare] logs | + description: Sum of samples to be used as denominator for weights +hook[prepare] logs | + from BucketWeights. +hook[prepare] logs | + type: number +hook[prepare] logs | + type: object +hook[prepare] logs | + firstSampleStart: +hook[prepare] logs | + description: Timestamp of the fist sample from the histograms. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + lastSampleStart: +hook[prepare] logs | + description: Timestamp of the last sample from the histograms. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + lastUpdateTime: +hook[prepare] logs | + description: The time when the status was last refreshed. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + memoryHistogram: +hook[prepare] logs | + description: Checkpoint of histogram for consumption of memory. +hook[prepare] logs | + properties: +hook[prepare] logs | + bucketWeights: +hook[prepare] logs | + description: Map from bucket index to bucket weight. +hook[prepare] logs | + type: object +hook[prepare] logs | + x-kubernetes-preserve-unknown-fields: true +hook[prepare] logs | + referenceTimestamp: +hook[prepare] logs | + description: Reference timestamp for samples collected within +hook[prepare] logs | + this histogram. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + totalWeight: +hook[prepare] logs | + description: Sum of samples to be used as denominator for weights +hook[prepare] logs | + from BucketWeights. +hook[prepare] logs | + type: number +hook[prepare] logs | + type: object +hook[prepare] logs | + totalSamplesCount: +hook[prepare] logs | + description: Total number of samples in the histograms. +hook[prepare] logs | + type: integer +hook[prepare] logs | + version: +hook[prepare] logs | + description: Version of the format of the stored data. +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + type: object +hook[prepare] logs | + served: true +hook[prepare] logs | + storage: true +hook[prepare] logs | + - name: v1beta2 +hook[prepare] logs | + schema: +hook[prepare] logs | + openAPIV3Schema: +hook[prepare] logs | + description: VerticalPodAutoscalerCheckpoint is the checkpoint of the internal +hook[prepare] logs | + state of VPA that is used for recovery after recommender's restart. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: 'APIVersion defines the versioned schema of this representation +hook[prepare] logs | + of an object. Servers should convert recognized schemas to the latest +hook[prepare] logs | + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind is a string value representing the REST resource this +hook[prepare] logs | + object represents. Servers may infer this from the endpoint the client +hook[prepare] logs | + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' +hook[prepare] logs | + type: string +hook[prepare] logs | + metadata: +hook[prepare] logs | + type: object +hook[prepare] logs | + spec: +hook[prepare] logs | + description: 'Specification of the checkpoint. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.' +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the checkpointed container. +hook[prepare] logs | + type: string +hook[prepare] logs | + vpaObjectName: +hook[prepare] logs | + description: Name of the VPA object that stored VerticalPodAutoscalerCheckpoint +hook[prepare] logs | + object. +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + status: +hook[prepare] logs | + description: Data of the checkpoint. +hook[prepare] logs | + properties: +hook[prepare] logs | + cpuHistogram: +hook[prepare] logs | + description: Checkpoint of histogram for consumption of CPU. +hook[prepare] logs | + properties: +hook[prepare] logs | + bucketWeights: +hook[prepare] logs | + description: Map from bucket index to bucket weight. +hook[prepare] logs | + type: object +hook[prepare] logs | + x-kubernetes-preserve-unknown-fields: true +hook[prepare] logs | + referenceTimestamp: +hook[prepare] logs | + description: Reference timestamp for samples collected within +hook[prepare] logs | + this histogram. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + totalWeight: +hook[prepare] logs | + description: Sum of samples to be used as denominator for weights +hook[prepare] logs | + from BucketWeights. +hook[prepare] logs | + type: number +hook[prepare] logs | + type: object +hook[prepare] logs | + firstSampleStart: +hook[prepare] logs | + description: Timestamp of the fist sample from the histograms. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + lastSampleStart: +hook[prepare] logs | + description: Timestamp of the last sample from the histograms. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + lastUpdateTime: +hook[prepare] logs | + description: The time when the status was last refreshed. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + memoryHistogram: +hook[prepare] logs | + description: Checkpoint of histogram for consumption of memory. +hook[prepare] logs | + properties: +hook[prepare] logs | + bucketWeights: +hook[prepare] logs | + description: Map from bucket index to bucket weight. +hook[prepare] logs | + type: object +hook[prepare] logs | + x-kubernetes-preserve-unknown-fields: true +hook[prepare] logs | + referenceTimestamp: +hook[prepare] logs | + description: Reference timestamp for samples collected within +hook[prepare] logs | + this histogram. +hook[prepare] logs | + format: date-time +hook[prepare] logs | + nullable: true +hook[prepare] logs | + type: string +hook[prepare] logs | + totalWeight: +hook[prepare] logs | + description: Sum of samples to be used as denominator for weights +hook[prepare] logs | + from BucketWeights. +hook[prepare] logs | + type: number +hook[prepare] logs | + type: object +hook[prepare] logs | + totalSamplesCount: +hook[prepare] logs | + description: Total number of samples in the histograms. +hook[prepare] logs | + type: integer +hook[prepare] logs | + version: +hook[prepare] logs | + description: Version of the format of the stored data. +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + type: object +hook[prepare] logs | + served: true +hook[prepare] logs | + storage: false +hook[prepare] logs | +status: +hook[prepare] logs | + acceptedNames: +hook[prepare] logs | + kind: "" +hook[prepare] logs | + plural: "" +hook[prepare] logs | + conditions: null +hook[prepare] logs | + storedVersions: +hook[prepare] logs | + - v1 +hook[prepare] logs | diff -u -N /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io +hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io 2023-02-17 13:15:29 +hook[prepare] logs | +++ /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io 2023-02-17 13:15:29 +hook[prepare] logs | @@ -0,0 +1,550 @@ +hook[prepare] logs | +apiVersion: apiextensions.k8s.io/v1 +hook[prepare] logs | +kind: CustomResourceDefinition +hook[prepare] logs | +metadata: +hook[prepare] logs | + annotations: +hook[prepare] logs | + api-approved.kubernetes.io: https://github.com/kubernetes/kubernetes/pull/63797 +hook[prepare] logs | + controller-gen.kubebuilder.io/version: v0.4.0 +hook[prepare] logs | + creationTimestamp: "2023-02-17T12:15:29Z" +hook[prepare] logs | + generation: 1 +hook[prepare] logs | + name: verticalpodautoscalers.autoscaling.k8s.io +hook[prepare] logs | + uid: 91c297f7-3d26-43f3-bce8-14885ca00c10 +hook[prepare] logs | +spec: +hook[prepare] logs | + conversion: +hook[prepare] logs | + strategy: None +hook[prepare] logs | + group: autoscaling.k8s.io +hook[prepare] logs | + names: +hook[prepare] logs | + kind: VerticalPodAutoscaler +hook[prepare] logs | + listKind: VerticalPodAutoscalerList +hook[prepare] logs | + plural: verticalpodautoscalers +hook[prepare] logs | + shortNames: +hook[prepare] logs | + - vpa +hook[prepare] logs | + singular: verticalpodautoscaler +hook[prepare] logs | + scope: Namespaced +hook[prepare] logs | + versions: +hook[prepare] logs | + - additionalPrinterColumns: +hook[prepare] logs | + - jsonPath: .spec.updatePolicy.updateMode +hook[prepare] logs | + name: Mode +hook[prepare] logs | + type: string +hook[prepare] logs | + - jsonPath: .status.recommendation.containerRecommendations[0].target.cpu +hook[prepare] logs | + name: CPU +hook[prepare] logs | + type: string +hook[prepare] logs | + - jsonPath: .status.recommendation.containerRecommendations[0].target.memory +hook[prepare] logs | + name: Mem +hook[prepare] logs | + type: string +hook[prepare] logs | + - jsonPath: .status.conditions[?(@.type=='RecommendationProvided')].status +hook[prepare] logs | + name: Provided +hook[prepare] logs | + type: string +hook[prepare] logs | + - jsonPath: .metadata.creationTimestamp +hook[prepare] logs | + name: Age +hook[prepare] logs | + type: date +hook[prepare] logs | + name: v1 +hook[prepare] logs | + schema: +hook[prepare] logs | + openAPIV3Schema: +hook[prepare] logs | + description: VerticalPodAutoscaler is the configuration for a vertical pod +hook[prepare] logs | + autoscaler, which automatically manages pod resources based on historical +hook[prepare] logs | + and real time resource utilization. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: 'APIVersion defines the versioned schema of this representation +hook[prepare] logs | + of an object. Servers should convert recognized schemas to the latest +hook[prepare] logs | + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind is a string value representing the REST resource this +hook[prepare] logs | + object represents. Servers may infer this from the endpoint the client +hook[prepare] logs | + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' +hook[prepare] logs | + type: string +hook[prepare] logs | + metadata: +hook[prepare] logs | + type: object +hook[prepare] logs | + spec: +hook[prepare] logs | + description: 'Specification of the behavior of the autoscaler. More info: +hook[prepare] logs | + https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.' +hook[prepare] logs | + properties: +hook[prepare] logs | + recommenders: +hook[prepare] logs | + description: Recommender responsible for generating recommendation +hook[prepare] logs | + for this object. List should be empty (then the default recommender +hook[prepare] logs | + will generate the recommendation) or contain exactly one recommender. +hook[prepare] logs | + items: +hook[prepare] logs | + description: VerticalPodAutoscalerRecommenderSelector points to +hook[prepare] logs | + a specific Vertical Pod Autoscaler recommender. In the future +hook[prepare] logs | + it might pass parameters to the recommender. +hook[prepare] logs | + properties: +hook[prepare] logs | + name: +hook[prepare] logs | + description: Name of the recommender responsible for generating +hook[prepare] logs | + recommendation for this object. +hook[prepare] logs | + type: string +hook[prepare] logs | + required: +hook[prepare] logs | + - name +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + resourcePolicy: +hook[prepare] logs | + description: Controls how the autoscaler computes recommended resources. +hook[prepare] logs | + The resource policy may be used to set constraints on the recommendations +hook[prepare] logs | + for individual containers. If not specified, the autoscaler computes +hook[prepare] logs | + recommended resources for all containers in the pod, without additional +hook[prepare] logs | + constraints. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerPolicies: +hook[prepare] logs | + description: Per-container resource policies. +hook[prepare] logs | + items: +hook[prepare] logs | + description: ContainerResourcePolicy controls how autoscaler +hook[prepare] logs | + computes the recommended resources for a specific container. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the container or DefaultContainerResourcePolicy, +hook[prepare] logs | + in which case the policy is used by the containers that +hook[prepare] logs | + don't have their own policy specified. +hook[prepare] logs | + type: string +hook[prepare] logs | + controlledResources: +hook[prepare] logs | + description: Specifies the type of recommendations that +hook[prepare] logs | + will be computed (and possibly applied) by VPA. If not +hook[prepare] logs | + specified, the default of [ResourceCPU, ResourceMemory] +hook[prepare] logs | + will be used. +hook[prepare] logs | + items: +hook[prepare] logs | + description: ResourceName is the name identifying various +hook[prepare] logs | + resources in a ResourceList. +hook[prepare] logs | + type: string +hook[prepare] logs | + type: array +hook[prepare] logs | + controlledValues: +hook[prepare] logs | + description: Specifies which resource values should be controlled. +hook[prepare] logs | + The default is "RequestsAndLimits". +hook[prepare] logs | + enum: +hook[prepare] logs | + - RequestsAndLimits +hook[prepare] logs | + - RequestsOnly +hook[prepare] logs | + type: string +hook[prepare] logs | + maxAllowed: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Specifies the maximum amount of resources that +hook[prepare] logs | + will be recommended for the container. The default is +hook[prepare] logs | + no maximum. +hook[prepare] logs | + type: object +hook[prepare] logs | + minAllowed: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Specifies the minimal amount of resources that +hook[prepare] logs | + will be recommended for the container. The default is +hook[prepare] logs | + no minimum. +hook[prepare] logs | + type: object +hook[prepare] logs | + mode: +hook[prepare] logs | + description: Whether autoscaler is enabled for the container. +hook[prepare] logs | + The default is "Auto". +hook[prepare] logs | + enum: +hook[prepare] logs | + - Auto +hook[prepare] logs | + - "Off" +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + type: object +hook[prepare] logs | + targetRef: +hook[prepare] logs | + description: TargetRef points to the controller managing the set of +hook[prepare] logs | + pods for the autoscaler to control - e.g. Deployment, StatefulSet. +hook[prepare] logs | + VerticalPodAutoscaler can be targeted at controller implementing +hook[prepare] logs | + scale subresource (the pod set is retrieved from the controller's +hook[prepare] logs | + ScaleStatus) or some well known controllers (e.g. for DaemonSet +hook[prepare] logs | + the pod set is read from the controller's spec). If VerticalPodAutoscaler +hook[prepare] logs | + cannot use specified target it will report ConfigUnsupported condition. +hook[prepare] logs | + Note that VerticalPodAutoscaler does not require full implementation +hook[prepare] logs | + of scale subresource - it will not use it to modify the replica +hook[prepare] logs | + count. The only thing retrieved is a label selector matching pods +hook[prepare] logs | + grouped by the target resource. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: API version of the referent +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' +hook[prepare] logs | + type: string +hook[prepare] logs | + name: +hook[prepare] logs | + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' +hook[prepare] logs | + type: string +hook[prepare] logs | + required: +hook[prepare] logs | + - kind +hook[prepare] logs | + - name +hook[prepare] logs | + type: object +hook[prepare] logs | + updatePolicy: +hook[prepare] logs | + description: Describes the rules on how changes are applied to the +hook[prepare] logs | + pods. If not specified, all fields in the `PodUpdatePolicy` are +hook[prepare] logs | + set to their default values. +hook[prepare] logs | + properties: +hook[prepare] logs | + minReplicas: +hook[prepare] logs | + description: Minimal number of replicas which need to be alive +hook[prepare] logs | + for Updater to attempt pod eviction (pending other checks like +hook[prepare] logs | + PDB). Only positive values are allowed. Overrides global '--min-replicas' +hook[prepare] logs | + flag. +hook[prepare] logs | + format: int32 +hook[prepare] logs | + type: integer +hook[prepare] logs | + updateMode: +hook[prepare] logs | + description: Controls when autoscaler applies changes to the pod +hook[prepare] logs | + resources. The default is 'Auto'. +hook[prepare] logs | + enum: +hook[prepare] logs | + - "Off" +hook[prepare] logs | + - Initial +hook[prepare] logs | + - Recreate +hook[prepare] logs | + - Auto +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - targetRef +hook[prepare] logs | + type: object +hook[prepare] logs | + status: +hook[prepare] logs | + description: Current information about the autoscaler. +hook[prepare] logs | + properties: +hook[prepare] logs | + conditions: +hook[prepare] logs | + description: Conditions is the set of conditions required for this +hook[prepare] logs | + autoscaler to scale its target, and indicates whether or not those +hook[prepare] logs | + conditions are met. +hook[prepare] logs | + items: +hook[prepare] logs | + description: VerticalPodAutoscalerCondition describes the state +hook[prepare] logs | + of a VerticalPodAutoscaler at a certain point. +hook[prepare] logs | + properties: +hook[prepare] logs | + lastTransitionTime: +hook[prepare] logs | + description: lastTransitionTime is the last time the condition +hook[prepare] logs | + transitioned from one status to another +hook[prepare] logs | + format: date-time +hook[prepare] logs | + type: string +hook[prepare] logs | + message: +hook[prepare] logs | + description: message is a human-readable explanation containing +hook[prepare] logs | + details about the transition +hook[prepare] logs | + type: string +hook[prepare] logs | + reason: +hook[prepare] logs | + description: reason is the reason for the condition's last transition. +hook[prepare] logs | + type: string +hook[prepare] logs | + status: +hook[prepare] logs | + description: status is the status of the condition (True, False, +hook[prepare] logs | + Unknown) +hook[prepare] logs | + type: string +hook[prepare] logs | + type: +hook[prepare] logs | + description: type describes the current condition +hook[prepare] logs | + type: string +hook[prepare] logs | + required: +hook[prepare] logs | + - status +hook[prepare] logs | + - type +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + recommendation: +hook[prepare] logs | + description: The most recently computed amount of resources recommended +hook[prepare] logs | + by the autoscaler for the controlled pods. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerRecommendations: +hook[prepare] logs | + description: Resources recommended by the autoscaler for each +hook[prepare] logs | + container. +hook[prepare] logs | + items: +hook[prepare] logs | + description: RecommendedContainerResources is the recommendation +hook[prepare] logs | + of resources computed by autoscaler for a specific container. +hook[prepare] logs | + Respects the container resource policy if present in the spec. +hook[prepare] logs | + In particular the recommendation is not produced for containers +hook[prepare] logs | + with `ContainerScalingMode` set to 'Off'. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the container. +hook[prepare] logs | + type: string +hook[prepare] logs | + lowerBound: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Minimum recommended amount of resources. Observes +hook[prepare] logs | + ContainerResourcePolicy. This amount is not guaranteed +hook[prepare] logs | + to be sufficient for the application to operate in a stable +hook[prepare] logs | + way, however running with less resources is likely to +hook[prepare] logs | + have significant impact on performance/availability. +hook[prepare] logs | + type: object +hook[prepare] logs | + target: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Recommended amount of resources. Observes ContainerResourcePolicy. +hook[prepare] logs | + type: object +hook[prepare] logs | + uncappedTarget: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: The most recent recommended resources target +hook[prepare] logs | + computed by the autoscaler for the controlled pods, based +hook[prepare] logs | + only on actual resource usage, not taking into account +hook[prepare] logs | + the ContainerResourcePolicy. May differ from the Recommendation +hook[prepare] logs | + if the actual resource usage causes the target to violate +hook[prepare] logs | + the ContainerResourcePolicy (lower than MinAllowed or +hook[prepare] logs | + higher that MaxAllowed). Used only as status indication, +hook[prepare] logs | + will not affect actual resource assignment. +hook[prepare] logs | + type: object +hook[prepare] logs | + upperBound: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Maximum recommended amount of resources. Observes +hook[prepare] logs | + ContainerResourcePolicy. Any resources allocated beyond +hook[prepare] logs | + this value are likely wasted. This value may be larger +hook[prepare] logs | + than the maximum amount of application is actually capable +hook[prepare] logs | + of consuming. +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - target +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + type: object +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - spec +hook[prepare] logs | + type: object +hook[prepare] logs | + served: true +hook[prepare] logs | + storage: true +hook[prepare] logs | + subresources: {} +hook[prepare] logs | + - name: v1beta2 +hook[prepare] logs | + schema: +hook[prepare] logs | + openAPIV3Schema: +hook[prepare] logs | + description: VerticalPodAutoscaler is the configuration for a vertical pod +hook[prepare] logs | + autoscaler, which automatically manages pod resources based on historical +hook[prepare] logs | + and real time resource utilization. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: 'APIVersion defines the versioned schema of this representation +hook[prepare] logs | + of an object. Servers should convert recognized schemas to the latest +hook[prepare] logs | + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind is a string value representing the REST resource this +hook[prepare] logs | + object represents. Servers may infer this from the endpoint the client +hook[prepare] logs | + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' +hook[prepare] logs | + type: string +hook[prepare] logs | + metadata: +hook[prepare] logs | + type: object +hook[prepare] logs | + spec: +hook[prepare] logs | + description: 'Specification of the behavior of the autoscaler. More info: +hook[prepare] logs | + https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.' +hook[prepare] logs | + properties: +hook[prepare] logs | + resourcePolicy: +hook[prepare] logs | + description: Controls how the autoscaler computes recommended resources. +hook[prepare] logs | + The resource policy may be used to set constraints on the recommendations +hook[prepare] logs | + for individual containers. If not specified, the autoscaler computes +hook[prepare] logs | + recommended resources for all containers in the pod, without additional +hook[prepare] logs | + constraints. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerPolicies: +hook[prepare] logs | + description: Per-container resource policies. +hook[prepare] logs | + items: +hook[prepare] logs | + description: ContainerResourcePolicy controls how autoscaler +hook[prepare] logs | + computes the recommended resources for a specific container. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the container or DefaultContainerResourcePolicy, +hook[prepare] logs | + in which case the policy is used by the containers that +hook[prepare] logs | + don't have their own policy specified. +hook[prepare] logs | + type: string +hook[prepare] logs | + maxAllowed: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Specifies the maximum amount of resources that +hook[prepare] logs | + will be recommended for the container. The default is +hook[prepare] logs | + no maximum. +hook[prepare] logs | + type: object +hook[prepare] logs | + minAllowed: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Specifies the minimal amount of resources that +hook[prepare] logs | + will be recommended for the container. The default is +hook[prepare] logs | + no minimum. +hook[prepare] logs | + type: object +hook[prepare] logs | + mode: +hook[prepare] logs | + description: Whether autoscaler is enabled for the container. +hook[prepare] logs | + The default is "Auto". +hook[prepare] logs | + enum: +hook[prepare] logs | + - Auto +hook[prepare] logs | + - "Off" +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + type: object +hook[prepare] logs | + targetRef: +hook[prepare] logs | + description: TargetRef points to the controller managing the set of +hook[prepare] logs | + pods for the autoscaler to control - e.g. Deployment, StatefulSet. +hook[prepare] logs | + VerticalPodAutoscaler can be targeted at controller implementing +hook[prepare] logs | + scale subresource (the pod set is retrieved from the controller's +hook[prepare] logs | + ScaleStatus) or some well known controllers (e.g. for DaemonSet +hook[prepare] logs | + the pod set is read from the controller's spec). If VerticalPodAutoscaler +hook[prepare] logs | + cannot use specified target it will report ConfigUnsupported condition. +hook[prepare] logs | + Note that VerticalPodAutoscaler does not require full implementation +hook[prepare] logs | + of scale subresource - it will not use it to modify the replica +hook[prepare] logs | + count. The only thing retrieved is a label selector matching pods +hook[prepare] logs | + grouped by the target resource. +hook[prepare] logs | + properties: +hook[prepare] logs | + apiVersion: +hook[prepare] logs | + description: API version of the referent +hook[prepare] logs | + type: string +hook[prepare] logs | + kind: +hook[prepare] logs | + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' +hook[prepare] logs | + type: string +hook[prepare] logs | + name: +hook[prepare] logs | + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' +hook[prepare] logs | + type: string +hook[prepare] logs | + required: +hook[prepare] logs | + - kind +hook[prepare] logs | + - name +hook[prepare] logs | + type: object +hook[prepare] logs | + updatePolicy: +hook[prepare] logs | + description: Describes the rules on how changes are applied to the +hook[prepare] logs | + pods. If not specified, all fields in the `PodUpdatePolicy` are +hook[prepare] logs | + set to their default values. +hook[prepare] logs | + properties: +hook[prepare] logs | + updateMode: +hook[prepare] logs | + description: Controls when autoscaler applies changes to the pod +hook[prepare] logs | + resources. The default is 'Auto'. +hook[prepare] logs | + enum: +hook[prepare] logs | + - "Off" +hook[prepare] logs | + - Initial +hook[prepare] logs | + - Recreate +hook[prepare] logs | + - Auto +hook[prepare] logs | + type: string +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - targetRef +hook[prepare] logs | + type: object +hook[prepare] logs | + status: +hook[prepare] logs | + description: Current information about the autoscaler. +hook[prepare] logs | + properties: +hook[prepare] logs | + conditions: +hook[prepare] logs | + description: Conditions is the set of conditions required for this +hook[prepare] logs | + autoscaler to scale its target, and indicates whether or not those +hook[prepare] logs | + conditions are met. +hook[prepare] logs | + items: +hook[prepare] logs | + description: VerticalPodAutoscalerCondition describes the state +hook[prepare] logs | + of a VerticalPodAutoscaler at a certain point. +hook[prepare] logs | + properties: +hook[prepare] logs | + lastTransitionTime: +hook[prepare] logs | + description: lastTransitionTime is the last time the condition +hook[prepare] logs | + transitioned from one status to another +hook[prepare] logs | + format: date-time +hook[prepare] logs | + type: string +hook[prepare] logs | + message: +hook[prepare] logs | + description: message is a human-readable explanation containing +hook[prepare] logs | + details about the transition +hook[prepare] logs | + type: string +hook[prepare] logs | + reason: +hook[prepare] logs | + description: reason is the reason for the condition's last transition. +hook[prepare] logs | + type: string +hook[prepare] logs | + status: +hook[prepare] logs | + description: status is the status of the condition (True, False, +hook[prepare] logs | + Unknown) +hook[prepare] logs | + type: string +hook[prepare] logs | + type: +hook[prepare] logs | + description: type describes the current condition +hook[prepare] logs | + type: string +hook[prepare] logs | + required: +hook[prepare] logs | + - status +hook[prepare] logs | + - type +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + recommendation: +hook[prepare] logs | + description: The most recently computed amount of resources recommended +hook[prepare] logs | + by the autoscaler for the controlled pods. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerRecommendations: +hook[prepare] logs | + description: Resources recommended by the autoscaler for each +hook[prepare] logs | + container. +hook[prepare] logs | + items: +hook[prepare] logs | + description: RecommendedContainerResources is the recommendation +hook[prepare] logs | + of resources computed by autoscaler for a specific container. +hook[prepare] logs | + Respects the container resource policy if present in the spec. +hook[prepare] logs | + In particular the recommendation is not produced for containers +hook[prepare] logs | + with `ContainerScalingMode` set to 'Off'. +hook[prepare] logs | + properties: +hook[prepare] logs | + containerName: +hook[prepare] logs | + description: Name of the container. +hook[prepare] logs | + type: string +hook[prepare] logs | + lowerBound: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Minimum recommended amount of resources. Observes +hook[prepare] logs | + ContainerResourcePolicy. This amount is not guaranteed +hook[prepare] logs | + to be sufficient for the application to operate in a stable +hook[prepare] logs | + way, however running with less resources is likely to +hook[prepare] logs | + have significant impact on performance/availability. +hook[prepare] logs | + type: object +hook[prepare] logs | + target: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Recommended amount of resources. Observes ContainerResourcePolicy. +hook[prepare] logs | + type: object +hook[prepare] logs | + uncappedTarget: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: The most recent recommended resources target +hook[prepare] logs | + computed by the autoscaler for the controlled pods, based +hook[prepare] logs | + only on actual resource usage, not taking into account +hook[prepare] logs | + the ContainerResourcePolicy. May differ from the Recommendation +hook[prepare] logs | + if the actual resource usage causes the target to violate +hook[prepare] logs | + the ContainerResourcePolicy (lower than MinAllowed or +hook[prepare] logs | + higher that MaxAllowed). Used only as status indication, +hook[prepare] logs | + will not affect actual resource assignment. +hook[prepare] logs | + type: object +hook[prepare] logs | + upperBound: +hook[prepare] logs | + additionalProperties: +hook[prepare] logs | + anyOf: +hook[prepare] logs | + - type: integer +hook[prepare] logs | + - type: string +hook[prepare] logs | + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ +hook[prepare] logs | + x-kubernetes-int-or-string: true +hook[prepare] logs | + description: Maximum recommended amount of resources. Observes +hook[prepare] logs | + ContainerResourcePolicy. Any resources allocated beyond +hook[prepare] logs | + this value are likely wasted. This value may be larger +hook[prepare] logs | + than the maximum amount of application is actually capable +hook[prepare] logs | + of consuming. +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - target +hook[prepare] logs | + type: object +hook[prepare] logs | + type: array +hook[prepare] logs | + type: object +hook[prepare] logs | + type: object +hook[prepare] logs | + required: +hook[prepare] logs | + - spec +hook[prepare] logs | + type: object +hook[prepare] logs | + served: true +hook[prepare] logs | + storage: false +hook[prepare] logs | +status: +hook[prepare] logs | + acceptedNames: +hook[prepare] logs | + kind: "" +hook[prepare] logs | + plural: "" +hook[prepare] logs | + conditions: null +hook[prepare] logs | + storedVersions: +hook[prepare] logs | + - v1 +hook[prepare] logs | +Comparing release=vpa, chart=fairwinds-stable/vpa +******************** + + Release was not present in Helm. Diff will show entire contents as new. + +******************** +vpa-system, vpa-actor, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-actor ++ rules: ++ - apiGroups: ++ - "" ++ resources: ++ - pods ++ - nodes ++ - limitranges ++ verbs: ++ - get ++ - list ++ - watch ++ - apiGroups: ++ - "" ++ resources: ++ - events ++ verbs: ++ - get ++ - list ++ - watch ++ - create ++ - apiGroups: ++ - "poc.autoscaling.k8s.io" ++ resources: ++ - verticalpodautoscalers ++ verbs: ++ - get ++ - list ++ - watch ++ - patch ++ - apiGroups: ++ - "autoscaling.k8s.io" ++ resources: ++ - verticalpodautoscalers ++ verbs: ++ - get ++ - list ++ - watch ++ - patch +vpa-system, vpa-actor, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterrolebindings.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: vpa-actor ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-actor ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-recommender ++ namespace: vpa-system +vpa-system, vpa-checkpoint-actor, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-checkpoint-actor ++ rules: ++ - apiGroups: ++ - "poc.autoscaling.k8s.io" ++ resources: ++ - verticalpodautoscalercheckpoints ++ verbs: ++ - get ++ - list ++ - watch ++ - create ++ - patch ++ - delete ++ - apiGroups: ++ - "autoscaling.k8s.io" ++ resources: ++ - verticalpodautoscalercheckpoints ++ verbs: ++ - get ++ - list ++ - watch ++ - create ++ - patch ++ - delete ++ - apiGroups: ++ - "" ++ resources: ++ - namespaces ++ verbs: ++ - get ++ - list +vpa-system, vpa-checkpoint-actor, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterrolebindings.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: vpa-checkpoint-actor ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-checkpoint-actor ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-recommender ++ namespace: vpa-system +vpa-system, vpa-evictioner, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-evictioner ++ rules: ++ - apiGroups: ++ - "apps" ++ - "extensions" ++ resources: ++ - replicasets ++ verbs: ++ - get ++ - apiGroups: ++ - "" ++ resources: ++ - pods/eviction ++ verbs: ++ - create +vpa-system, vpa-metrics-reader, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-metrics-reader ++ rules: ++ - apiGroups: ++ - "metrics.k8s.io" ++ resources: ++ - pods ++ verbs: ++ - get ++ - list +vpa-system, vpa-metrics-reader, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterrolebindings.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: vpa-metrics-reader ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-metrics-reader ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-recommender ++ namespace: vpa-system +vpa-system, vpa-recommender, Deployment (apps) has been added: +- ++ # Source: vpa/templates/recommender-deployment.yaml ++ apiVersion: apps/v1 ++ kind: Deployment ++ metadata: ++ name: vpa-recommender ++ labels: ++ app.kubernetes.io/component: recommender ++ helm.sh/chart: vpa-1.6.0 ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/version: "0.11.0" ++ app.kubernetes.io/managed-by: Helm ++ spec: ++ replicas: 1 ++ selector: ++ matchLabels: ++ app.kubernetes.io/component: recommender ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/instance: vpa ++ template: ++ metadata: ++ labels: ++ app.kubernetes.io/component: recommender ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/instance: vpa ++ spec: ++ serviceAccountName: vpa-recommender ++ securityContext: ++ runAsNonRoot: true ++ runAsUser: 65534 ++ containers: ++ - name: vpa ++ securityContext: ++ {} ++ image: "k8s.gcr.io/autoscaling/vpa-recommender:0.11.0" ++ imagePullPolicy: Always ++ args: ++ - --pod-recommendation-min-cpu-millicores=15 ++ - --pod-recommendation-min-memory-mb=100 ++ - --v=4 ++ livenessProbe: ++ failureThreshold: 6 ++ httpGet: ++ path: /health-check ++ port: metrics ++ scheme: HTTP ++ periodSeconds: 5 ++ successThreshold: 1 ++ timeoutSeconds: 3 ++ readinessProbe: ++ failureThreshold: 120 ++ httpGet: ++ path: /health-check ++ port: metrics ++ scheme: HTTP ++ periodSeconds: 5 ++ successThreshold: 1 ++ timeoutSeconds: 3 ++ ports: ++ - name: metrics ++ containerPort: 8942 ++ protocol: TCP ++ resources: ++ limits: ++ cpu: 200m ++ memory: 1000Mi ++ requests: ++ cpu: 50m ++ memory: 500Mi +vpa-system, vpa-recommender, ServiceAccount (v1) has been added: +- ++ # Source: vpa/templates/recommender-service-account.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ automountServiceAccountToken: true ++ metadata: ++ name: vpa-recommender ++ labels: ++ helm.sh/chart: vpa-1.6.0 ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/version: "0.11.0" ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: recommender +vpa-system, vpa-status-reader, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-status-reader ++ rules: ++ - apiGroups: ++ - "coordination.k8s.io" ++ resources: ++ - leases ++ verbs: ++ - get ++ - list ++ - watch +vpa-system, vpa-target-reader, ClusterRole (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterroles.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRole ++ metadata: ++ name: vpa-target-reader ++ rules: ++ - apiGroups: ++ - '*' ++ resources: ++ - '*/scale' ++ verbs: ++ - get ++ - watch ++ - apiGroups: ++ - "" ++ resources: ++ - replicationcontrollers ++ verbs: ++ - get ++ - list ++ - watch ++ - apiGroups: ++ - apps ++ resources: ++ - daemonsets ++ - deployments ++ - replicasets ++ - statefulsets ++ verbs: ++ - get ++ - list ++ - watch ++ - apiGroups: ++ - batch ++ resources: ++ - jobs ++ - cronjobs ++ verbs: ++ - get ++ - list ++ - watch +vpa-system, vpa-target-reader-binding, ClusterRoleBinding (rbac.authorization.k8s.io) has been added: +- ++ # Source: vpa/templates/clusterrolebindings.yaml ++ apiVersion: rbac.authorization.k8s.io/v1 ++ kind: ClusterRoleBinding ++ metadata: ++ name: vpa-target-reader-binding ++ roleRef: ++ apiGroup: rbac.authorization.k8s.io ++ kind: ClusterRole ++ name: vpa-target-reader ++ subjects: ++ - kind: ServiceAccount ++ name: vpa-recommender ++ namespace: vpa-system +vpa-system, vpa-updater, ServiceAccount (v1) has been added: +- ++ # Source: vpa/templates/updater-service-account.yaml ++ apiVersion: v1 ++ kind: ServiceAccount ++ automountServiceAccountToken: true ++ metadata: ++ name: vpa-updater ++ labels: ++ helm.sh/chart: vpa-1.6.0 ++ app.kubernetes.io/name: vpa ++ app.kubernetes.io/instance: vpa ++ app.kubernetes.io/version: "0.11.0" ++ app.kubernetes.io/managed-by: Helm ++ app.kubernetes.io/component: updater +``` +{{< /details >}} + +Yeah, it's huge, but you can see everything that's going to happen. So I'd say it's good. + +## Conclusion +It's a short article, because I think the whole setup is super easy, CI is easy too. You still have a full `GitOps` (or almost full) but you also have control. I love this setup and would like to use it for my infrastructure. + +Why do I think it's better that `ArgoCD`? +With `ArgoCD` I either have a lot of `yaml` to install things, or I have complicated setups with `ApplicationSets` that are most probably very special and won't be reused in other companies. I need to care about how `ArgoCD` will handle a lot of applications that are added there only for diffing. I need additional applications installed im my clusters not only as a part of infrastructure itself, but also as a service that I'm providing other teams with. Because I want to manage applications that are being developed by other teams with `Argo`, so I'm mixing a lot of different kinds of applications here. + +Helmfile lets me separate infra from applications. `ArgoCD` can be only provided as a service, and other teams can use, because it's making k8s easier for those who don't need to understand it so deeply. Also, helmfile lets me use helm-secrets to encrypt values. I can do it with Argo too, but then I need to either have a custom `ArgoCD` image, or support a CMP plugin, that will handle SOPS. + +You can find an example of PR here: +> When helmfile is not GitOps? +> To uninstall a helm release, you need to add `isntalled: false` to it. If you just remove a release from helmfile.yaml, it isn't going to be removed. So in such cases it's not GitOps. You can write a hook, that is comparing a previous state of your helmfile to the current one and doing a cleanup, then it's again fully GitOps. But I prefer removing things manually, so to me, it's not a problem. Removing stuff is something that I think should be mostly done by a human being, if it's not a part of your daily work. + diff --git a/content/posts/dont-use-argocd-for-infrastructure/cover.png b/content/posts/dont-use-argocd-for-infrastructure/cover.png new file mode 100644 index 0000000..1154626 Binary files /dev/null and b/content/posts/dont-use-argocd-for-infrastructure/cover.png differ diff --git a/content/posts/dont-use-argocd-for-infrastructure/index.md b/content/posts/dont-use-argocd-for-infrastructure/index.md new file mode 100644 index 0000000..31400ef --- /dev/null +++ b/content/posts/dont-use-argocd-for-infrastructure/index.md @@ -0,0 +1,323 @@ +--- +title: "Don't use ArgoCD for your infrastructure" +date: 2023-02-09T12:47:32+01:00 +draft: false +ShowToc: true +cover: + image: "cover.png" + caption: "ArgoCD" + relative: false + responsiveImages: false +--- +> Of course, it's just a clickbait title. Use whatever works for you. I will just describe why I wouldn't use `ArgoCD` for the infrastructure + +## Prelude +`ArgoCD` is an incredibly popular tool and I see that many DevOps guys *(I know that it's not a job definition, but I feel like it's the best description that everybody can understand)* want to use everywhere. I wasn't an exception, but I've just changed my mind. I still think that `ArgoCD` is cool, and you need to use it, but not for the infrastructure. + +## But why? + +### One more prelude +Let's assume you are a team that is providing something as a service to other teams. Even if you're the only one member, it doesn't matter. And let's assume you're working with `Kubernetes` or you plan to work with it, otherwise I'm not sure why you would even read the post. + +> It's very common to use separated clusters for different teams, customers, applications, etc. Let's say you have 3 clusters + +![3 clusters and you](/dont-use-argocd-for-infrastructure/3-clusters.png) + +Setups may be different, you can use different clusters for different products, environments, teams, or you can have your own opinion on how to split workload between clusters. But these (in our case) 3 clusters are used directly by other teams. Also, you may want to have a cluster for providing services, let's assume, your company decided to use [Gitea](https://gitea.io/en-us/) as a `git` provider, and you deployed it to Kubernetes. *It may be a very controversial example, but I'm not talking about what should run in K8s and what shouldn't, so if you can think of any other thing, that is supposed to be used across the whole company (GitLab Runners, Bitwarden, ElasticSearch, etc...)*. So it's already 4 clusters. Let's call the fourth cluster a `DevOps Cluster` + +![3 Clusters and gitea](/dont-use-argocd-for-infrastructure/3-clusters-and-gitea.png) + +I assume you need to have some common stuff deployed to each cluster, let's think of (Prometheus, Grafana and Loki). + +And now you need to decide how to deploy it. You may have already known about `ArgoCD`, or you decided to look for **Best Practices** and found a lot about `ArgoCD`. And it sounds perfect. Everybody tends to use it. You can find a lot of information everywhere. People are helpful. GitHub repo is well-maintained. + +>Why Argo CD? +> +>Application definitions, configurations, and environments should be declarative and version controlled. Application deployment and lifecycle management should be automated, >auditable, and easy to understand. + +And now you need first deliver the `ArgoCD` itself and later start delivering everything with `ArgoCD`. + +Let's first talk about how to deliver Argo. There are different options. For example, you can have one main installation in the `Devops Cluster` and use it to manage other clusters. That sounded good to me when I first heard about it. But I wanted to have all configuration as code, and to add other clusters to the main `Argo` you need to use the `argocd cli`, so it's either an addition step in the CI/CD, or a manual work. I didn't like both options, because I wanted to avoid adding scripts to pipelines, and manual work just wasn't an option. And also it's not very transparent anymore where all the applications in target clusters are coming from (or maybe I just couldn't find, I'd rather think that I was dumb). One more thing is that you obviously can't have several `K8s` resources with the same name in one namespace, so every `Application` must have a different name. I don't like long names, so it looks ugly to me. Especially, when you cluster have long names, like "the-first-product-production", and your application looks like "the-first-product-production-grafana". And you don't have to use the cluster name for the application, for sure, but you would like to have some logic there. And this logic must be as obvious as possible. But anyway, these are three main issues that I've faced, and that I can't live with, so here comes the second way to deliver `Argo`, install it to each cluster. + +So I would go with 4 `ArgoCD` installations. So the first step is to install it, that is not a problem at all, there are many ways to do it. And after it's installed, we need to start delivering other applications. I'm aware of 3 ways of doing it: + +1. Use `Application` manifests for applications +2. Use `Application` manifests to manage `Application` manifests from repo (the App of Apps pattern, or something like that) +3. Use `ApplicationSet` manifests to make `ArgoCD` render `Application` manifests and apply them + +### Application + +First option is really straightforward, isn't. All we need to do is to create manifests. `ArgoCD` devs have just published the versions 2.6 with `multi-source` applications support. *But currently I can't say it's usable. The main issue for me is that the `argocd` cli doesn't work with them, that makes the whole thing pointless to me. Without cli I can't implement CD, then I see no reason to use them at all. I could use the `AutoSync` option, but I won't do that, and later I'll come back to this point and describe why, maybe in the next post, or later*. So I can't use multi-source applications right now. Let's look at the list of applications that I need to install one more time: + +To all clusters: +- Prometheus +- Grafana +- Loki +To the DevOps cluster only: +- Gitea + +There are many ways to install applications to `K8s`. But actually, I think, that there is only one real way: [helm](https://helm.sh/). Why? Because each of those applications are a huge amount of manifests, that you need to combine, install and maintain. You probably won't write those manifests yourself. There are other options to install apps, but all of them seem super complicated. And I doubt that you want to spend 8 hours per day editing `yaml` files. At least I don't, so I'm choosing helm. + +>I need to say that I'm not 100% happy with helm. There are some issues that seem very important to me, but it's good enough to use it. But maybe we can talk about them later. + +Let's try the first approach (`Application` for an application) +First, package +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: prometheus + namespace: argocd +spec: + destination: + namespace: monitoring # Let's not touch namespace management this time. Let's assume we already solved this issue + server: https://kubernetes.default.svc + project: monitoring + source: + chart: kube-prometheus-stack + helm: + valueFiles: + - values.yaml + path: . + repoURL: https://prometheus-community.github.io/helm-charts + targetRevision: 45.0.0 +``` + +But what about values? Single-source application will not be able to find values files in your repo if you use a remote chart, so you have two options (that I'm aware of) + +1. Add values directly to you source like this: +```YAML +spec.source.helm.values: | + you-values: here +``` +2. Create a CMP for handling helm packages and values + +Second way is good, but complicated. Because it's a self-written tool that you should implement, that should work with argo, that you should maintain, and without any guarantees that it will keep working after `ArgoCD` is updated. I was using Argo with custom CMP, it's no fun. + +But anyway, the `Application` way is not scalable, because you will have to create a manifest for each cluster, and is not secure, because you can't easily encrypt the data. Also, if you've seen values for `kube-prometheus-stack`, you know that they are huge. So now you have 4 huge manifests with unencrypted secrets. And it's only for one app, so it probably looks like this: +``` +manifests/ + cluster1/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster2/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster3/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster-devops/ + prometheus.yaml + loki.yaml + grafana.yaml + gitea.yaml +``` + +In my experience, each `Application` like this with a proper configuration will contain about 150 - 200 lines of code, so you have about 1950 - 1600 lines of code to install 4 applications. One of them is really special, and others will most probably will have only several lines that are not duplicating, e.g. for ingress and passwords. + +I think it's not a way to go. To solve this problem, many guys save charts to the same git repo where they store values, using helm-freeze for example. So it looks like +``` +helm-freeze.yaml +vendored_charts/ + prometheus/... + grafana/... + loke/... + gitea/... +manifests/ + cluster1/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster2/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster3/ + prometheus.yaml + loki.yaml + grafana.yaml + cluster-devops/ + prometheus.yaml + loki.yaml + grafana.yaml + gitea.yaml +values/ + prometheus/... + grafana/... + loki/... + gitea/... + +``` + +Yes, now you can use values from files, you can encrypt secrets and your `Applications` are not that huge anymore. But I'm strongly against vendoring external charts. Why? First, it's my ideology, briefly, if you don't trust packagers, you don't use their packages. Vendoring charts into a git repo also means that you need to add a manual step to download them. With helm-freeze, for example, you need to execute `helm-freeze sync`. It's either pre-commit hook, or a manual execution, or a step in CI/CD. I don't like all the options for different reasons, but if I stop on every little point, this article will never be finished. So If it's interesting, feel free to ask. + +> I would give up already here. I don't understand why you need to suffer that much just to use such a great tool + +### App of Apps + +It's actually pretty much the same thing. But instead of applying `Application` manifests one by one, you will create an additional `Application` manifests, that `ArgoCD` will use to generate others. + +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: applications + namespace: argocd +spec: + destination: + namespace: argo + server: https://kubernetes.default.svc + project: system + source: + path: ./manifests/cluster1 + repoURL: $YOUR_GIT_REPO + targetRevision: main +``` + +You will create 4 manifests, one for each cluster and apply them. And when you push your (Not App of Apps) `Application` manifests to the main branch, `ArgoCD` will do something about it. It doesn't solve anything, as I see. You still have an ugly amount of `yaml` files, but also you have 4 additional that are not so huge. This concept might simplify the deployment process, but also it will steal a certain amount of control from you. Because now you're not responsible for deploying applications, but Argo. + +> I think that GitOps and other automations are important, and it's the only way to do development right now, but you're probably hired as a DevOps Engineer or SRE, or whoever. You're supposed to be able to do something apart from pushing to git. You can't hand over all the responsibility git and pipelines and live a happy life. Once you will have to execute `kubectl edit deployment` and then you won't be happy if `ArgoCD` decide to rewrite your changes right after they are applied, because you're not following the Git Flow. You need to have control, and that's why you're paid. Not because you can edit `yaml` files + +### ApplicationSets + +It's a nice concept. *In theory*. You create one manifest for all applications in a cluster, or even one manifest for all applications across your clusters. The unique one, that will work everywhere. I won't provide an example, sorry, but you can do a lot of templating there, so one manifests will work for four clusters and will decrease amount of code. I'm using `ApplicationSets` myself, for my personal stuff, where I don't have any kind of obligations, and no one will sue me for breaking everything down. And actually I've done the breaking thing not so long ago. I'm not blaming `ArgoCD` for that, it was entirely my fault. But let's see what I've done. And let me know (anyhow) if you were able to spot the problem before the `kubectl apply` happened. + +#### My file structure + +I have one `ApplicationSet` for helm releases, that looks like that: + +./helm-releases.yaml + +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: helm-releases + namespace: argo-system +spec: + generators: + - git: + repoURL: git@github.com:allanger/my-repo.git + revision: HEAD + files: + - path: "releases/*" + template: + metadata: + name: "{{ argo.application }}" + namespace: argo-system + spec: + project: "{{ argo.project }}" + source: + path: "{{ argo.path }}" + helm: + valueFiles: + - values.yaml + values: |- + {{ values }} + repoURL: "{{ chart.repo }}" + targetRevision: "{{ chart.version }}" + chart: "{{ chart.name }}" + destination: + server: "{{ argo.cluster }}" + namespace: "{{ argo.namespace }}" + ignoreDifferences: + - group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + jqPathExpressions: + - .webhooks[]?.clientConfig.caBundle + - .webhooks[]?.failurePolicy +``` + +And a certain amount of generators files in the `./releases` folder. I'm using the first approach, like this: +```YAML +argo: + cluster: https://kubernetes.default.svc + application: cert-manager + project: system + namespace: cert-manager + path: . +chart: + version: 1.10.1 + name: cert-manager + repo: https://charts.jetstack.io +values: | +... +``` + +I don't like having values here, and when `ArgoCD` 2.6 were released, I've decided to try multi-source applications, so I've created a new directory: `./releases_v2`, and a new `ApplicationSet` manifests + +./helm-releases-v2.yaml: +```YAML +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: helm-releases + namespace: argo-system +spec: + generators: + - git: + repoURL: git@github.com:allanger/argo-deployment.git + revision: HEAD + files: + - path: "releases_v2/*" + template: + metadata: + name: "{{ argo.application }}" + namespace: argo-system + spec: + project: "{{ argo.project }}" + sources: + - path: "./values" + repoURL: git@github.com:allanger/argo-deployment.git + ref: values + - path: "{{ argo.path }}" + helm: + valueFiles: + - "$values/values/{{ chart.name }}/values.yaml" + repoURL: "{{ chart.repo }}" + targetRevision: "{{ chart.version }}" + chart: "{{ chart.name }}" + destination: + server: "{{ argo.cluster }}" + namespace: "{{ argo.namespace }}" + ignoreDifferences: + - group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + jqPathExpressions: + - .webhooks[]?.clientConfig.caBundle + - .webhooks[]?.failurePolicy +``` + +And executed `kubectl apply -f helm-releases-v2.yaml` + +And for some reason `ArgoCD` stopped responding. And, actually, everything were gone. Nothing were left in my cluster. And after I realized what I've done: "How am I a DevOps engineer after all?". *In case you wonder, I was able to safe 100% of important persistent data that were there, and all the workload were back in 15 minutes, but still...* + +One of the most important thing about your infrastructure is its sustainability. And if you happen to have a setup like this in you company, hire a junior engineer and he/she/they makes the same mistake, you have no right to punish him/her/them (I'm sorry if I'm not writing it right, I just don't know how to), on the contrary, you need to punish yourself, that you were able to build something that is so easy to destroy. And I know that there are options to avoid resources destruction when `Applications` or `ApplicationSet` are gone, or that you need to use `argocd` and not `kubectl` to manage these resources *(and I don't agree with that at all)*. But I think that adding additional fields to manifests to preserve resources that are eventually created by an operator after applying a CR manifests is rather not obvious and dangerous out of the box. When I need something to be reliable, I'd rather have a more complicated and less obvious, or maybe not automated at all, process for removing it. + +> You'd rather think twice before executing `rm -rf ./something`, than do `git push` and wait until it's executed automatically, wouldn't you? + +But `ApplicationSets` are not bad. I'm still using them, but now with additional fields, so I'm not afraid to remove everything accidentally. And yet it's not perfect. Because without multi-source applications they don't make any sense for bigger projects, than a Minecraft server that is used by 4 guys, *unless you're vendoring helm charts, of course* + +Even when multi-source apps have a full support, and I can move values files to real values files, there is still no way to do `argocd appset diff`, and I'm aware of [this github issue](https://github.com/argoproj/argo-cd/issues/10895#issuecomment-1423566000). And you can read my concerns about server-side rendering implementation, that they want to implement, there. + +So let's assume that cli supports multi-source apps and applications sets can be diffed, and your server is not overloaded when 1000 manifests are being rendered on each pipeline run just for diffing, and [helm repos are not DDoSed](https://todoist.com/app/project/2232733866/task/6521274379) *(Because it's not nice to DDoS something that is used by a huge amount of users across the world)*. And you're added all the fields to manifests to make your infra reliable. Sounds nice! + +But there is one more problem that I see. What many teams don't think about, is that they, as a team, provide services to other teams. So, if you have clusters: `cluster-production`, `cluster-development`, `cluster-demo`, and `cluster-devops`, where should you deploy infra changes first? I think a lot of you could say, to the `cluster-development`, because it's not facing real customers at least. And... I totally don't agree. You're the team that provide other teams with services, and your real customers are those teams. Of course, you won't treat the production environment the same way you treat the development environment, but it's still not a playground for you. It's a playground for developers, that should be stable and reliable for them. I'm sure that there are many ways to handle it. But I think, that you should have one more cluster, a `cluster-infra-test`. Where you will deliver your changes first. And where you can test your changes before they affect other teams. So, it's a 5th `ArgoCD` with a very similar setup *(actually, the setup must be repeating all the other setups so you're sure you're testing what's going to be delivered later)*. And with the `ApplicationSet` and, for example, git generators that are pointed to the main branch on "production" environments (`cluster-production`, `cluster-development`, `cluster-demo`, and `cluster-devops`), but here changes must come not only from the main, but also from other branches *(assuming that your workflow is something like this: cut a branch, update the infra code, create a pull requests, and merge)*, because you need to test anything before it's in the main branch. So you have either a very complicated `ApplicationSet` *(I'm not even sure, that it's possible to do with templates)*, or you have different manifests for the test and the rest, so you have to remember updating both every time one is updated, or you have an additional step in a pipeline, that will get the `ApplicationSet` from the `cluster-infra-test` and add a new branch to generators *(because you must not overwrite and break test environments that are created by another members of your team)* + + +### Really? + +Are you ready to go through all of this just to use Argo? Is there really nothing that can stop you from doing that? I was even tired of writing this post. I was stubborn, and I wanted to use the best `GitOps Kubernetes` tool, and I went through all of this, I was trying to convince others that it's cool. Just a little amount of work, and we're happy `Argo` users. But looking back, all I can say, just use [Helmfile](https://github.com/helmfile/helmfile)! `ArgoCD` is literally not solving any issue that `Helmfile` can solve (when it comes to the infrastructure deployment). And with a huge amount of work and compromises you can achieve a result that will be close to what you would have with a proper `helmfile` configuration (that is extremely easy and reliable). + +Later I will create a repo where I show all the examples with configuration and CI/CD for different `ArgoCD` approaches and a `helmfile`. And so if you don't trust me now, you'll be able to see a difference or try to convince me, that I'm wrong. + +> And using `helmfile`, I will install `ArgoCD` to my clusters, of course, because it's an awesome tool, without any doubts. But don't manage your infrastructure with it, because it's a part of your infrastructure, and it's a service that you provide to other teams. And I'll talk about in one of the next posts. + +Thanks, + +Oi! + +--- \ No newline at end of file diff --git a/content/posts/vst-on-linux-1/cover.png b/content/posts/vst-on-linux-1/cover.png new file mode 100644 index 0000000..6487c55 Binary files /dev/null and b/content/posts/vst-on-linux-1/cover.png differ diff --git a/content/posts/vst-on-linux-1/index.md b/content/posts/vst-on-linux-1/index.md new file mode 100644 index 0000000..bf9521f --- /dev/null +++ b/content/posts/vst-on-linux-1/index.md @@ -0,0 +1,287 @@ +--- +title: "Vst on Linux 1" +date: 2023-01-24T15:47:50+01:00 +draft: false +ShowToc: true +cover: + image: "cover.png" + caption: "Vst on Linux" + relative: false + responsiveImages: false +--- + +>Best, but according to Output. Their article: *[https://output.com/blog/output-favorites-freebies](https://output.com/blog/output-favorites-freebies) + +This is kinda article where I'm looking for "BEST FREE VST" articles or videos, or whatever, trying to run them on Linux and checking how they perform. The first article I've found is one by **Output**, so be it. + +--- + +## 1. Arcade by Output ๐Ÿ‘Ž + +Freaking unexpected, huh? But what choice do I have? **Walk the walk and talk the talk. **So let's start by pressing the "TRY IT FREE" button. + +First I need to enter my email, then I need to enter a bunch of information about myself and then: What a bummer, they want me to add a payment method. And even thought they won't charge me the first month, I'm not doing talking the talk. Sorry, let's go to the next one. + +--- + +## 2. OTT by Xfer ๐Ÿ‘ + +This one you will find in any top, I believe. It can mean only one thing: it's really one of the best. So let's try. + +There is no Linux version of this plugin, so we will have to use the Windows one. How, you would ask me? I will have to install a couple of packages to my system before I'm ready. I'm starting by installing **wine**. ย  + +I am not going to describe the process of installing it, google `"$YOUR_LINUX_DISTRO_NAME install wine" ` after it's done you may want to create a new wine prefix in your system. + +What is wine prefix? Let's think of it as of a directory that contains Windows-related stuff. All plugins will be installed there alongside libraries that are required to make them work. + +Let's give this prefix a recognizable name, like `.wine_vst_plugins.` I'm opening the terminal, yes, I'll have to use it, but you shouldn't be scared of it, because terminal is our friend. Opening it and executing: + + $ WINEPREFIX="$PWD/.wine_vst_plugins/" winecfg + + +It will open a window when you can configure your wine prefix, but the main stuff is already done, so I just close it. + +To check if we're happy, I'm executing the following + + $ ls -la $HOME/.wine_vst_plugins + + total 3332 + drwxr-xr-x 1 allanger allanger 126 Oct 27 18:13 . + drwx------ 1 allanger root 1922 Oct 27 18:15 .. + drwxr-xr-x 1 allanger allanger 8 Oct 27 18:13 dosdevices + drwxr-xr-x 1 allanger allanger 110 Oct 27 18:13 drive_c + -rw-r--r-- 1 allanger allanger 3282847 Oct 27 18:13 system.reg + -rw-r--r-- 1 allanger allanger 12 Oct 27 18:13 .update-timestamp + -rw-r--r-- 1 allanger allanger 4130 Oct 27 18:13 userdef.reg + -rw-r--r-- 1 allanger allanger 113309 Oct 27 18:13 user.reg + + +If your output looks like mine, we're good to go. Let's install the second tool: [https://github.com/robbert-vdh/yabridge](https://github.com/robbert-vdh/yabridge). You will find all the instructions if you just scroll down a wee bit. After installing it you also must have a tool called **yabridgectl,** to check that it is right, just execute the following + + $ yabridgectl + + yabridgectl 4.0.2 + Robbert van der Helm + Optional utility to help set up yabridge + + USAGE: + yabridgectl + + OPTIONS: + -h, --help Print help information + -V, --version Print version information + + SUBCOMMANDS: + add Add a plugin install location + rm Remove a plugin install location + list List the plugin install locations + status Show the installation status for all plugins + sync Set up or update yabridge for all plugins + set Change the yabridge path (advanced) + blacklist Manage the indexing blacklist (advanced) + help Print this message or the help of the given subcommand(s) + + + +I hope you're seeing pretty much the same picture as I am. And it only means that we can go further. + +Now it's time to install the plugin itself. I'm downloading the **Windows** version and opening my terminal again + +Let's assume that you've downloaded it to the `~/Downloads` folder, and the file name is `Install_Xfer_OTT_135.exe` + + $ cd ~/Dowloads + $ WINEPREFIX="$PWD/.wine_vst_plugins/" wine ./Install_Xfer_OTT_135.exe + + +Why we're adding this `WINEPREFIX` thing every time when running `wine`? Because we're saying which wine prefix should be used by wine, since it's not a default path for the prefix. + +After installing this plugin, I will need to add it to **yabridge**. To do that, use **yabridgectl** + +Insted of what I'm putting after `/drive_c/`, provide a path that you've chosen during the installation + + $ yabridgectl add ~/.wine_vst_plugins/drive_c/Program\ Files/Common\ Files/VST + $ yabridgectl sync + + +And I'm opening a DAW, *I assume you already have one too, for here you are. But if you don't, and you don't know which to install, just install **Ardour.*** + +I'm opening it, adding a track and adding the **OTT** plugin to that track. + +![OTT Xfer](/vst-on-linux-1/ott-xfer.png) + +It is working and I would even say it's running +The UI part is a wee bit buggy, but I don't think that it's a problem. + +--- + +## 3. Wider by Polyverse ๐Ÿ‘ + +I'm entering my email again and receiving a download link. Downloading, unpacking, and installing + + $ WINEPREFIX="$PWD/.wine_vst_plugins/" wine ./InfectedMushroom-Wider-V1.1.3.exe + $ yabridgectl sync + + +![Wider](/vst-on-linux-1/wider.png)Again, it's working flawlessly +So far so good, even the UI part is perfect. + +--- + +## 4. CamelCrusher by Camel Audio ๐Ÿ‘ + +It can be downloaded even without email thingy. + + $ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ./camelcrusher-win_mac\ \(www.mpcindia.co\)/CamelCrusherWin-1-0-1-x64.exe + $ yabridgectl sync + + +โ€ƒAnd when I'm trying to add it to Ardour, I'm getting an error + + [Info]: Scanning: /home/allanger/.vst/yabridge/CamelCrusher.so + 09:23:38 [error] + 09:23:38 [error] Error during initialization: + 09:23:38 [error] '/home/allanger/.vst/yabridge/CamelCrusher.dll' does not exist, make sure to rename 'libyabridge-vst2.so' to match a VST plugin .dll file. + 09:23:38 [error] + [ERROR]: ** ERROR** VSTFX : CamelCrusher could not be instantiated :( + + [WARNING]: Cannot get VST information from '/home/allanger/.vst/yabridge/CamelCrusher.so': instantiation failed. + Scan Failed. + + +And I can't actually understand what is the problem here. + +I don't give up so quickly. Let's try running it via [Carla](https://github.com/carla-simulator/carla). I won't describe how to install it, you can google it. + + +So after it's installed, I'm opening it as a standalone app first and trying to add my **CamelCrusher** there. And it's working. Then the next step is to add **Carla** as a **FX** plugin in **Ardour** and then add CamelCrusher there. +![CamelCrush](/vst-on-linux-1/camel-crush.png)Working again, but not without Carla + +--- + +## 5. Fracture by Glitchmachines ๐Ÿ‘ + +I love this plugin, and I'm using it a lot on my MacBook, so it would be nice to run it on **Linux** too. So let's go. But Glitchmachines can give us another great plugin for free, so I will try running both of them here. The other one is **Hysteresis**. So I'm downloading both of them. + +After receiving two links, I'm installing them + + $ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Fracture_setup.exe + $ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Hysteresis_setup.exe + $ yabridgectl sync + + +![Glitchmachines](/vst-on-linux-1/glitchmachines.png) + +They are working but there is one UI glitch + +![Glitchmachine's making glitch](/vst-on-linux-1/glitchmakesrs-glitch.gif) + +#### Maybe you won't notice it on your system + +Because we probably have different system configs, so maybe it's only possible to reproduce this bug with a set of configs and packages I'm using in my Linux. So if you don't face this issue, lucky you! + +It's not very annoying to me, but to avoid this kind of behavior, I can wrap these plugins with **Carla.** +![Glitchmachines with Carla](/vst-on-linux-1/glitchmaker-carla.gif) +It's working perfectly with Carla *(it's not that buggy in real life, only on the record)* + +--- + +## 6. FreqEcho by Valhalla DSP ๐Ÿ‘ + +Valhalla's plugins, I think, are one of the most popular in the music world. I don't know a man who doesn't have all of their free plugins installed. ย And I do have them installed already in my system, but I will go through the installation process again, just to describe it here. So let's download all of their free plugins + +- Valhalla Freq Echo +- Valhalla Space Modulator +- Valhalla Supermassive +```BASH +$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaFreqEchoWin_V1_2_0.exe +$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaSpaceModulatorWin_1_1_6v3.exe +$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine ValhallaSupermassiveWin_V2_0_0.exe +$ yabridgectl sync +``` + +![Valhalla plugins](/vst-on-linux-1/valhalla.png) +All of them run flawlessly + +--- + +## 7. Audio Treasure by Max For Cats ๐Ÿ‘Ž + +As far as I could understand, it can be used only in **Ableton Live**, and since I'm not an Ableton user, and I don't want to run it with **Wine** *(cause there is no native Linux version)*, it's becoming the second plugin in the list, that is not working on + +## 8. Saturation Knob by Softube ๐Ÿ‘Ž + +To get it, I must have a Softube account, (*but I already have it, because I used to download all them free plugins like nuts and create accounts everywhere ๐Ÿคฆ)* + +So I'm downloading their App Center, that I'm going to use to install a plugin. *I do hate this approach of managing software, but I will cover it in another post one day. * + +So the process is very similar to a direct plugin installation. Install **Softube Central** to the same **Wine** prefix + + WINEPREFIX="$HOME/.wine_vst_plugins/" wine Softube\ Central\ Setup\ 1.7.1.exe + + +But after that, I've found out that their **Software Center** is written is **Electron-based** application, and I wasn't yet able to run stuff like this in **Wine**. Maybe later I will put more effort into that, but for now, I'm saying that it's not working. ๐Ÿ˜ฅ + +> #### Some kind of rhetorical question +> +> I don't understand why they and, for example, **Splice** can't release a Linux version of their application, it they already use Electron, that in my understanding is supposed for cross-platform development. + +--- + +## 9 and 10. Plugins by iZotope ๐Ÿ‘Ž + +**iZotope** is like **Valhalla**, everybody knows it. But I remember having troubles with their plugins on **Linux**. But now I'm trying again, and I'm full of hope. + +Plugins: + +- iZotope Vinyl +- iZotope Ozone Imager V2 *(it's not in the Output list, but why not?)* +- iZotope Vocal Doubler + +One more plugin vendor that requires an account creation. But I do have one from those good old times. + +Well, since I remember that there was a problem with them, I'll start by trying only one. Let it be **Ozone Imager V2**. + + $ WINEPREFIX="$HOME/.wine_vst_plugins/" wine iZotope_Ozone_Imager_v2_1_0.exe + + +But unfortunately, when I'm opening these plugins, it asks for an authorization, and it doesn't work. It's described in the **yabridge README.md**,but I'm too dummy to read it there. + +#### There is a workaround + +I've heard that you can download cracked versions of these plugins and they won't require an auth, so they might work. But I'm not trying that, and you shouldn't too. + +--- + +## 11. TAL-Chorus-LX ๐Ÿ‘๐Ÿ‘๐Ÿ‘ + +It's a piece of cake. This plugin has a native build for Linux, so I'm (since I'm using **Arch Linux**) just installing it using **yay**. And you may want to use another package manager, or download it from the official web page, it's presented there. +![NATIVE, YOU KNOW!](/vst-on-linux-1/tal-chorus.png) + + +--- + +## 12. Snap Heap by Kilo Hearts ๐Ÿ‘Ž + +I was trying to get it, but It didn't seem free. + +![Snap Heap](/vst-on-linux-1/snap-heap.png) + +--- + +## 13. Signal Free VST by Output ๐Ÿ‘Ž + +I'm sorry, but I'm not even trying. It still hurts since the first place in this top. + +--- + +## Some kind of conclusion + +First, you have seen only pictures here but haven't heard anything I've done with these plugins. And I know that it sucks, because we're talking about music. I'll record a track using these plugins later and show you how it's working. + +About those plugins that didn't work: I know that some of them are great, and it's sad that **iZotope** are not running on Linux, but there are alternatives that are Linux native. So for each failed one I will try to find an alternative, and later I will share my findings here. + +Thanks for reading + +Oi! + + diff --git a/content/posts/vst-on-linux-2/cover.png b/content/posts/vst-on-linux-2/cover.png new file mode 100644 index 0000000..71116c3 Binary files /dev/null and b/content/posts/vst-on-linux-2/cover.png differ diff --git a/content/posts/vst-on-linux-2/index.md b/content/posts/vst-on-linux-2/index.md new file mode 100644 index 0000000..14e43c8 --- /dev/null +++ b/content/posts/vst-on-linux-2/index.md @@ -0,0 +1,138 @@ +--- +title: "Vst on Linux 2" +date: 2023-01-31T19:32:34+01:00 +ShowToc: true +cover: + image: "cover.png" + caption: "VST on Linux" + relative: false + responsiveImages: false +--- + +Big ups, **Venus Theory!** +{{< youtube OCzf38fCqB4 >}} + +## Prerequisite +All of them are covered in [the first post]({{< ref "vst-on-linux-1" >}} ) + +- You have Wine and Yabridge installed +- You have a Wine prefix configured (in my case, `$HOME/.wine_vst_plugins/`, so every time you see it, use your own wine prefix) + +## Deelay ๐Ÿ‘ + +You can find it here: [https://sixthsample.com/deelay/](https://sixthsample.com/deelay/) + + +After receiving a link, I'm downloading a **Windows 64-bit** version and running +```BASH + +$ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Deelay-1.0.3-Installer-64bit.exe +$ yabridgectl sync +``` + +![A running deelay](/vst-on-linux-2/deelay.png) + +It's running, but the context menus UI glitch is present +Audio qualities of this plugin on Linux are prefect, but when you open any drop-down menu, plugin's window is getting hidden. + +![UI glitch](vst-on-linux-2/deelay-glitch.gif) + +But it can be fixed with **Carla**. If I'm putting Carla to my FX chain and then adding **Deelay** to **Carla**, this problem is gone. + +## Surge XT ๐Ÿ‘๐Ÿ‘๐Ÿ‘ + +This plugin has a native **Linux** version, and it's open-source, so just install it with your package manager or download from the official site. You won't even have to touch **yabridge** and **wine** this time + +[**Source code**](https://github.com/surge-synthesizer/surge) + +![Surge XT Linux native](/vst-on-linux-2/surge.png) + +## Cardinal ๐Ÿ‘๐Ÿ‘๐Ÿ‘ + +**You won't believe me!** + +But it's open-source too and has supports **Linux** out of the box. + +[**Source code**](https://github.com/DISTRHO/Cardinal) + +![Again native and open-source](/vst-on-linux-2/cardinal.png) + +## Fire ๐Ÿ‘๐Ÿ‘๐Ÿ‘ + +*It ... is ... open ... source ... too ... and ... supports ... **Linux*** + +[**Source code**](https://github.com/jerryuhoo/Fire) + +So I'm just installing it with a package manager and trying. +![A real fire](/static/vst-on-linux-2/fire.png) + +## Ruina ๐Ÿ‘ + +*And a couple of others* + +It would be too good, if this one had a Linux version. To get this plugin, you will have to create an account, and download their installer ๐Ÿ‘ฟ + +[https://noiseengineering.us/products/the-freequel-bundle-sinc-vereor-virt-vereor-ruina](https://noiseengineering.us/products/the-freequel-bundle-sinc-vereor-virt-vereor-ruina) + +After downloading an Installer, I'm installing it with **wine **to the same prefix I install all their other plugins + + $ WINEPREFIX="$HOME/.wine_vst_plugins/" wine Noise\ Engineering\ Products\ 09358.exe + +![The installer](/static/vst-on-linux-2/noise-engineering.png) + +It will install plugins, we've only left to sync yabridge config. + + $ yabridgectl sync + +![Running Ruina ](/vst-on-linux-2/runia-plugin.png) + +![Sinc Vereor](/vst-on-linux-2/sirt-plugin.png) + +![Virt Vereor](/vst-on-linux-2/virt-vereor.png) + +All of them are working flawlessly. One wouldn't even notice that they're not native + +--- + +## Gatelab ๐Ÿ‘ / Filterstep ๐Ÿ‘Ž / Panflow ๐Ÿ‘ + +No **Linux** version again, so I'm gonna. And they want me to enter my email again. That's a shame, but let's do it anyway. If Venus Theory says they're good, I'm sure they really are. + + $ export WINEPREFIX="$HOME/.wine_vst_plugins/" + $ wine Audiomodern_Filterstep_1.1.2/WIN/Filterstep_64\ 1.1.2\ \(Win64\).exe + $ wine Audiomodern_Gatelab_1.1.3/WIN/Gatelab\ 1.1.3\ \(Win64\).exe + $ wine Audiomodern_Panflow_1.0.1/WIN/Panflow\ 1.0.1\ \(Win64\).exe + +I had to rerun Ardour multiple times in order to make these plugins work. **Panflow** is running fine + +![Panflow](/vst-on-linux-2/panflow.png) + +Gatelab is working fine too + +![Gatelab](/vst-on-linux-2/gatelab.png) + +But when I add **Filterstep**, Ardour stops responding. I'm sure it's possible to make it work too, so I will try doing that in one of the next articles. + +## PaulXStretch ๐Ÿ‘๐Ÿ‘๐Ÿ‘Ž + +I was tired after **Audiomodern** plugins, because they were freezing my Ardour and I had to log out and log in again to my system, for Ardour wouldn't run again after that. + +But **PaulXStrech** has a native Linux version too, and it has given me a strength to finish with this top. + +So I'm just installing it with a package manager. + +But my expectations were too high. I couldn't add PaulXStretch as a plugin in my **DAW**, even **Carla** couldn't handle it. Only [Kushview Element](https://kushview.net/) could handle it outside a **DAW**, but I couldn't connect Ardour with Element using JACK yet (probably because I'm too dummy). But when running **Element** as a plugin inside an **Ardour**, when I add PaulXStretch, Ardour crashes. + +But on the official site of this plugin, there is nothing said about a plugin version of PaulXStretch for Linux. So you can use it as a standalone application. Just record whatever you want, stretch and render an audio file to import it to your **DAW**. +![PaulXStretch as a standalone application](/vst-on-linux-2/paulxstretch.png) +--- + +Actually, I'm very happy to see that 4 of 9 plugins has a native Linux support. It means that developers see now that Linux can be used for music production. And it makes me feel that Linux is becoming more adopted by more and more guys who just want to make music without struggling with their systems. + +--- + +Thanks for reading + +Oi! + + diff --git a/content/posts/vst-on-linux-3/cover.png b/content/posts/vst-on-linux-3/cover.png new file mode 100644 index 0000000..d2524d9 Binary files /dev/null and b/content/posts/vst-on-linux-3/cover.png differ diff --git a/content/posts/vst-on-linux-3/index.md b/content/posts/vst-on-linux-3/index.md new file mode 100644 index 0000000..e49a1e0 --- /dev/null +++ b/content/posts/vst-on-linux-3/index.md @@ -0,0 +1,198 @@ +--- +title: "Vst on Linux 3" +date: 2023-02-03T18:26:44+01:00 +draft: false +ShowToc: true +cover: + image: "cover.png" + caption: "Vst on Linux" + relative: false + responsiveImages: false +tags: ['music'] +--- + +I've been told that I should add descriptions to plugins I'm writing about here, and it sounds very reasonable. So now I will not only describe how to install a plugin but also add a small description to it. + +## Prerequisite + +All of them are covered in [the first post]({{< ref "vst-on-linux-1" >}}) + +- You have Wine and Yabridge installed +- You have a Wine prefix configured (in my case, `$HOME/.wine_vst_plugins/`, so every time you see it, use your own wine prefix) + +## Before we begin + +In the previous post, I was trying to run paulxstretch on Linux, and using it as a plugin in a DAW didn't work out. I've tried to update the JUCE library in the source code, and now it's working. You can find the code here: [https://git.badhouseplants.net/badhouseplants/paulxstretch](https://git.badhouseplants.net/badhouseplants/paulxstretch) + +To build, refer to the official build doc or use the `/build_docker.sh` script + +## Melda Free Bundle ๐Ÿ‘ + +You can find it here: + +It's not a one plugin, but a whole bunch of them. I used to have it on my Mac a long ago, but I hate this way of managing plugins, so I don't install them if I'm not sure, I need them. And I never felt this way about **Melda Free Bundle.** + +But now I'll try running it on **Linux**. I don't think I'll ever use it, even if it runs smoothly as hell. So I will create an additional temporary wine prefix to install it there. + + $ export WINEPREFIX="$HOME/.wine_vst_plugins_tmp" + $ wine maudioplugins_16_01_setup.exe + +Install Melda Free bundle +### MDrummer + +[MDrummer](https://www.meldaproduction.com/MDrummer) + +Let's start with the MDrummer plugin. I've tried to run it as a plugin in **Ardour**,but it killed it. So I've added it with a **Carla** as a wrapper + +I'm downloading several GBs of samples and other data to make it work. + +![MDrummer](/vst-on-linux-3/mdrummer.png) +I'd say it's over-bloated. And UI is a wee bit laggy on Linux +Let's see what we can do with it + +{{< video "/vst-on-linux-3/mdrummer-example.mp4" "video-1" >}} + +### MDrumReplacer +[MDrumReplacer](https://www.meldaproduction.com/MDrumReplacer) + +As I understand, this plugin is catching drum signals and replacing them with another sound. So I'll add it next to the **MDrummer** in the **Carla Rack**. +![Replacer](/vst-on-linux-3/mdrummer-lfo-carla.png) + +I've also added LFO in the chain, to modify the pitch value of the **MDrumReplacer** with it, so it doesn't sound so boring (at least, to me). + +{{< video "/vst-on-linux-3/mdrummer-example-lfo.mp4" "video-2" >}} + +### MPowerSynth + +It's just a synth. I don't like the interface, and I'm not gonna use it in the future, so I'm simply using a preset. + +{{< video "/vst-on-linux-3/mpower-synth.mp4" "video-3" >}} + +It sounds a wee bet laggy, but it's just on a video + +--- + +*I'm sorry, I'm not trying all of them, because it's too much. Let's try another one and go further.* + +### MGuitarArchitect + +It's like a pedal board, as I see. I can add many effects here. Let's try adding it to the same **Carla Rack** that is used for the bass. + +{{< video "/vst-on-linux-3/mguitar-architect.mp4" "video-4" >}} + +--- + +## u-he TyrellN6 ๐Ÿ‘ +[u-he TyrellN6 ](https://u-he.com/products/tyrelln6/) + +I think that almost every **u-he** plugins has a native **Linux** support, and **Tyrell** is not an exception. But you most probably won't be able to install it with a package manager, because it's available only on Amazona.de. So just follow the link on the official web-page and download the Linux version. + +This is a pretty minimalistic synth, that can help you produce a lot of cool sounds without spending tons of hours learning it. I definitely can recommend it, as I can recommend anything developed by **u-he** + +Unpack the archive and run the **./install.sh** script + + $ ./TyrellN6-3898/install.sh + +{{< video "/vst-on-linux-3/tyrell.mp4" "video-5" >}} + +I can't tell anything bad about **u-he**, I love all their plugins, I love that they support Linux and I love that they together with **Bitwig** are working on the new plugin open-source format ([CLAP](https://github.com/free-audio/clap)). + +Tyrell is just a very cool free synth. What else can I say? + +--- + +## Valhalla Supermassive ๐Ÿ‘ + +I've already covered Valhalla plugins in the [first post]({{< ref "vst-on-linux-1" >}}), so it doesn't make sense to repeat myself, instead of that, I'll add every free **Valhalla** plugin to the project and show you how it's working. + +{{< video "/vst-on-linux-3/vallhalla.mp4" "video-6" >}} + + +## Spitfire Labs ๐Ÿ‘ +[Spitfire Labs](https://labs.spitfireaudio.com/?sortBy=prod_products_labs_latest) + +I think, everybody knows what's that. But if you don't and you don't mind creating a Spitfire account, just try. + +I need to create an account for downloading that. *But I already have one, because I'm an active LABS user.* + + $ wine SpitfireAudio-Win-3.4.0.exe + $ yabridgectl sync + +You will have to install presets +![Spitfire App](/vst-on-linux-3/spitfire-labs-app.png) + +{{< video "/vst-on-linux-3/labs-example.mp4" "video-7" >}} + +## Infinite Space Piano ๐Ÿ‘ + +[Space Piano](https://zaksound.com/infinite-space-piano/) + +This is a piano synth. I already have a lot of them, and as for me, this is yet another one. But is sounds pretty good, so you may like it. + +This time I really need to create an account, because it's the first time I hear about this one. + +There is no **Linux** version, I'm getting the **Windows** one. I've got a feeling that I'm not gonna use it in the future, so I'm installing it in the temporary prefix. + + $ export WINEPREFIX="$HOME/.wine_vst_plugins_tmp/" + $ wine Infinite\ Space\ Piano\ 2.exe + $ yabridgectl sync + +{{< video "/vst-on-linux-3/space-piano.mp4" "video-8" >}} + +## ProjectSAM Free Orchestra ๐Ÿ‘Ž +[Free Orchestra](https://projectsam.com/libraries/the-free-orchestra/) +![Free Orchestra](/vst-on-linux-3/native-access.png) +Ok, let's try +I'm downloading **Native Access**, and it's not working. And this is the moment, I'm giving up on this plugin. + +## Eventide Pendulate๐Ÿ‘ +[Eventide Pendulate](https://www.eventideaudio.com/plug-ins/pendulate/) + +Downloading a Windows version again. + + $ wine Pendulate-1.3.6-windows-installer.exe + $ yabridgectl sync + +{{< video "/vst-on-linux-3/eventide-pendulate.mp4" "video-9" >}} + +Runnin just fine +As you see, this is a pretty interesting Synth, I have enough of synths for everything, but this one may join the ranks too. + +## VCV Rack ๐Ÿ‘ +[VCV Rack](https://vcvrack.com/) + +VCV Rack is an open-source Eurorack modular synthesizer simulator + +I've already covered the Cardinal plugin [here]({{< ref "vst-on-linux-2" >}}). And this is basically the same thing. And so I will just show **Cardinal** instead of **VCV Rack**. But if you want VCV Rack, it's working on Linux just fine, but you can't use the free version as a plugin, that's why I'm showing Cardinal + +{{< video "/vst-on-linux-3/vcv-rack.mp4" "video-10" >}} + +I didn't have enough time to learn it yet, so that's what I could do with it + +## U-He Protoverb ๐Ÿ‘ +[U-He Protoverb](https://u-he.com/products/protoverb/) + +Protoverb is a reverb created by u-he. It has native Linux support + +Download the **Linux** version and install it by running a script. You can finfd everything [here](https://u-he.com/products/protoverb/) + +## Paulstretch ๐Ÿ‘ + +It's already covered in the previous article. But since then, one thing is changed. You could've seen it in the very beginning of the post, that I've updated JUCE library in the source code, and now it's running as a VST plugin. If you missed it, try reading the beginning one more time. + +I'm testing it currently, but you can help me with that. + +## Bonus: Vital ๐Ÿ‘ +[Vital](https://vital.audio/) + +I think this is the best Synth ever made. It's open source and has Linux support. I use it in every project and if you don't, I recommend you to start doing that. + +{{< video "/vst-on-linux-3/vital.mp4" "video-11" >}} + +The video is laggy again, but in reality it's fine. + +--- +If you like what I'm doing, you can follow my [twitter](https://twitter.com/_allanger) or [mastodon](https://mastodon.social/@allanger), since I'll be posing there all the updates + +Thanks +Oi! diff --git a/content/search.md b/content/search.md new file mode 100644 index 0000000..39ef5f9 --- /dev/null +++ b/content/search.md @@ -0,0 +1,8 @@ +--- +title: "Search" # in any language you want +layout: "search" # is necessary +# url: "/archive" +# description: "Description for Search" +summary: "search" +placeholder: "placeholder text in search input box" +--- \ No newline at end of file diff --git a/layouts/shortcodes/details.html b/layouts/shortcodes/details.html new file mode 100644 index 0000000..b6054f9 --- /dev/null +++ b/layouts/shortcodes/details.html @@ -0,0 +1,4 @@ +
+ {{ (.Get 0) | markdownify }} + {{ .Inner | markdownify }} +
\ No newline at end of file diff --git a/layouts/shortcodes/rawhtml.html b/layouts/shortcodes/rawhtml.html new file mode 100644 index 0000000..b90bea2 --- /dev/null +++ b/layouts/shortcodes/rawhtml.html @@ -0,0 +1,2 @@ + +{{.Inner}} diff --git a/layouts/shortcodes/video.html b/layouts/shortcodes/video.html new file mode 100644 index 0000000..6a7a985 --- /dev/null +++ b/layouts/shortcodes/video.html @@ -0,0 +1,23 @@ +
+
+
+ + + + + diff --git a/layouts/taxonomy/tag.html b/layouts/taxonomy/tag.html new file mode 100644 index 0000000..db85c20 --- /dev/null +++ b/layouts/taxonomy/tag.html @@ -0,0 +1,8 @@ + + diff --git a/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.content b/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.content new file mode 100644 index 0000000..e19d307 --- /dev/null +++ b/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.content @@ -0,0 +1 @@ +/*!TACHYONS v4.9.1 | http://tachyons.io*//*!TACHYONS v4.12.0 | http://tachyons.io*//*!normalize.css v8.0.0 | MIT License | github.com/necolas/normalize.css*/html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}.border-box,a,article,aside,blockquote,body,code,dd,div,dl,dt,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,html,input[type=email],input[type=number],input[type=password],input[type=tel],input[type=text],input[type=url],legend,li,main,nav,ol,p,pre,section,table,td,textarea,th,tr,ul{box-sizing:border-box}.aspect-ratio{height:0;position:relative}.aspect-ratio--16x9{padding-bottom:56.25%}.aspect-ratio--9x16{padding-bottom:177.77%}.aspect-ratio--4x3{padding-bottom:75%}.aspect-ratio--3x4{padding-bottom:133.33%}.aspect-ratio--6x4{padding-bottom:66.6%}.aspect-ratio--4x6{padding-bottom:150%}.aspect-ratio--8x5{padding-bottom:62.5%}.aspect-ratio--5x8{padding-bottom:160%}.aspect-ratio--7x5{padding-bottom:71.42%}.aspect-ratio--5x7{padding-bottom:140%}.aspect-ratio--1x1{padding-bottom:100%}.aspect-ratio--object{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}img{max-width:100%}.cover{background-size:cover!important}.contain{background-size:contain!important}.bg-center{background-position:50%}.bg-center,.bg-top{background-repeat:no-repeat}.bg-top{background-position:top}.bg-right{background-position:100%}.bg-bottom,.bg-right{background-repeat:no-repeat}.bg-bottom{background-position:bottom}.bg-left{background-repeat:no-repeat;background-position:0}.outline{outline:1px solid}.outline-transparent{outline:1px solid transparent}.outline-0{outline:0}.ba{border-style:solid;border-width:1px}.bt{border-top-style:solid;border-top-width:1px}.br{border-right-style:solid;border-right-width:1px}.bb{border-bottom-style:solid;border-bottom-width:1px}.bl{border-left-style:solid;border-left-width:1px}.bn{border-style:none;border-width:0}.b--black{border-color:#000}.b--near-black{border-color:#111}.b--dark-gray{border-color:#333}.b--mid-gray{border-color:#555}.b--gray{border-color:#777}.b--silver{border-color:#999}.b--light-silver{border-color:#aaa}.b--moon-gray{border-color:#ccc}.b--light-gray{border-color:#eee}.b--near-white{border-color:#f4f4f4}.b--white{border-color:#fff}.b--white-90{border-color:rgba(255,255,255,.9)}.b--white-80{border-color:rgba(255,255,255,.8)}.b--white-70{border-color:rgba(255,255,255,.7)}.b--white-60{border-color:rgba(255,255,255,.6)}.b--white-50{border-color:rgba(255,255,255,.5)}.b--white-40{border-color:rgba(255,255,255,.4)}.b--white-30{border-color:rgba(255,255,255,.3)}.b--white-20{border-color:rgba(255,255,255,.2)}.b--white-10{border-color:rgba(255,255,255,.1)}.b--white-05{border-color:rgba(255,255,255,5%)}.b--white-025{border-color:rgba(255,255,255,.025)}.b--white-0125{border-color:rgba(255,255,255,.0125)}.b--black-90{border-color:rgba(0,0,0,.9)}.b--black-80{border-color:rgba(0,0,0,.8)}.b--black-70{border-color:rgba(0,0,0,.7)}.b--black-60{border-color:rgba(0,0,0,.6)}.b--black-50{border-color:rgba(0,0,0,.5)}.b--black-40{border-color:rgba(0,0,0,.4)}.b--black-30{border-color:rgba(0,0,0,.3)}.b--black-20{border-color:rgba(0,0,0,.2)}.b--black-10{border-color:rgba(0,0,0,.1)}.b--black-05{border-color:rgba(0,0,0,5%)}.b--black-025{border-color:rgba(0,0,0,.025)}.b--black-0125{border-color:rgba(0,0,0,.0125)}.b--dark-red{border-color:#e7040f}.b--red{border-color:#ff4136}.b--light-red{border-color:#ff725c}.b--orange{border-color:#ff6300}.b--gold{border-color:#ffb700}.b--yellow{border-color:gold}.b--light-yellow{border-color:#fbf1a9}.b--purple{border-color:#5e2ca5}.b--light-purple{border-color:#a463f2}.b--dark-pink{border-color:#d5008f}.b--hot-pink{border-color:#ff41b4}.b--pink{border-color:#ff80cc}.b--light-pink{border-color:#ffa3d7}.b--dark-green{border-color:#137752}.b--green{border-color:#19a974}.b--light-green{border-color:#9eebcf}.b--navy{border-color:#001b44}.b--dark-blue{border-color:#00449e}.b--blue{border-color:#357edd}.b--light-blue{border-color:#96ccff}.b--lightest-blue{border-color:#cdecff}.b--washed-blue{border-color:#f6fffe}.b--washed-green{border-color:#e8fdf5}.b--washed-yellow{border-color:#fffceb}.b--washed-red{border-color:#ffdfdf}.b--transparent{border-color:transparent}.b--inherit{border-color:inherit}.b--initial{border-color:initial}.b--unset{border-color:unset}.br0{border-radius:0}.br1{border-radius:.125rem}.br2{border-radius:.25rem}.br3{border-radius:.5rem}.br4{border-radius:1rem}.br-100{border-radius:100%}.br-pill{border-radius:9999px}.br--bottom{border-top-left-radius:0;border-top-right-radius:0}.br--top{border-bottom-right-radius:0}.br--right,.br--top{border-bottom-left-radius:0}.br--right{border-top-left-radius:0}.br--left{border-top-right-radius:0;border-bottom-right-radius:0}.br-inherit{border-radius:inherit}.br-initial{border-radius:initial}.br-unset{border-radius:unset}.b--dotted{border-style:dotted}.b--dashed{border-style:dashed}.b--solid{border-style:solid}.b--none{border-style:none}.bw0{border-width:0}.bw1{border-width:.125rem}.bw2{border-width:.25rem}.bw3{border-width:.5rem}.bw4{border-width:1rem}.bw5{border-width:2rem}.bt-0{border-top-width:0}.br-0{border-right-width:0}.bb-0{border-bottom-width:0}.bl-0{border-left-width:0}.shadow-1{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.shadow-2{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.shadow-3{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.shadow-4{box-shadow:2px 2px 8px rgba(0,0,0,.2)}.shadow-5{box-shadow:4px 4px 8px rgba(0,0,0,.2)}.pre{overflow-x:auto;overflow-y:hidden;overflow:scroll}.top-0{top:0}.right-0{right:0}.bottom-0{bottom:0}.left-0{left:0}.top-1{top:1rem}.right-1{right:1rem}.bottom-1{bottom:1rem}.left-1{left:1rem}.top-2{top:2rem}.right-2{right:2rem}.bottom-2{bottom:2rem}.left-2{left:2rem}.top--1{top:-1rem}.right--1{right:-1rem}.bottom--1{bottom:-1rem}.left--1{left:-1rem}.top--2{top:-2rem}.right--2{right:-2rem}.bottom--2{bottom:-2rem}.left--2{left:-2rem}.absolute--fill{top:0;right:0;bottom:0;left:0}.cf:after,.cf:before{content:" ";display:table}.cf:after{clear:both}.cf{*zoom:1}.cl{clear:left}.cr{clear:right}.cb{clear:both}.cn{clear:none}.dn{display:none}.di{display:inline}.db{display:block}.dib{display:inline-block}.dit{display:inline-table}.dt{display:table}.dtc{display:table-cell}.dt-row{display:table-row}.dt-row-group{display:table-row-group}.dt-column{display:table-column}.dt-column-group{display:table-column-group}.dt--fixed{table-layout:fixed;width:100%}.flex{display:flex}.inline-flex{display:inline-flex}.flex-auto{flex:auto;min-width:0;min-height:0}.flex-none{flex:none}.flex-column{flex-direction:column}.flex-row{flex-direction:row}.flex-wrap{flex-wrap:wrap}.flex-nowrap{flex-wrap:nowrap}.flex-wrap-reverse{flex-wrap:wrap-reverse}.flex-column-reverse{flex-direction:column-reverse}.flex-row-reverse{flex-direction:row-reverse}.items-start{align-items:flex-start}.items-end{align-items:flex-end}.items-center{align-items:center}.items-baseline{align-items:baseline}.items-stretch{align-items:stretch}.self-start{align-self:flex-start}.self-end{align-self:flex-end}.self-center{align-self:center}.self-baseline{align-self:baseline}.self-stretch{align-self:stretch}.justify-start{justify-content:flex-start}.justify-end{justify-content:flex-end}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.justify-around{justify-content:space-around}.content-start{align-content:flex-start}.content-end{align-content:flex-end}.content-center{align-content:center}.content-between{align-content:space-between}.content-around{align-content:space-around}.content-stretch{align-content:stretch}.order-0{order:0}.order-1{order:1}.order-2{order:2}.order-3{order:3}.order-4{order:4}.order-5{order:5}.order-6{order:6}.order-7{order:7}.order-8{order:8}.order-last{order:99999}.flex-grow-0{flex-grow:0}.flex-grow-1{flex-grow:1}.flex-shrink-0{flex-shrink:0}.flex-shrink-1{flex-shrink:1}.fl{float:left}.fl,.fr{_display:inline}.fr{float:right}.fn{float:none}.sans-serif{font-family:-apple-system,BlinkMacSystemFont,avenir next,avenir,helvetica neue,helvetica,ubuntu,roboto,noto,segoe ui,arial,sans-serif}.serif{font-family:georgia,times,serif}.system-sans-serif{font-family:sans-serif}.system-serif{font-family:serif}.code,code{font-family:Consolas,monaco,monospace}.courier{font-family:Courier Next,courier,monospace}.helvetica{font-family:helvetica neue,helvetica,sans-serif}.avenir{font-family:avenir next,avenir,sans-serif}.athelas{font-family:athelas,georgia,serif}.georgia{font-family:georgia,serif}.times{font-family:times,serif}.bodoni{font-family:Bodoni MT,serif}.calisto{font-family:Calisto MT,serif}.garamond{font-family:garamond,serif}.baskerville{font-family:baskerville,serif}.i{font-style:italic}.fs-normal{font-style:normal}.normal{font-weight:400}.b{font-weight:700}.fw1{font-weight:100}.fw2{font-weight:200}.fw3{font-weight:300}.fw4{font-weight:400}.fw5{font-weight:500}.fw6{font-weight:600}.fw7{font-weight:700}.fw8{font-weight:800}.fw9{font-weight:900}.input-reset{-webkit-appearance:none;-moz-appearance:none}.button-reset::-moz-focus-inner,.input-reset::-moz-focus-inner{border:0;padding:0}.h1{height:1rem}.h2{height:2rem}.h3{height:4rem}.h4{height:8rem}.h5{height:16rem}.h-25{height:25%}.h-50{height:50%}.h-75{height:75%}.h-100{height:100%}.min-h-100{min-height:100%}.vh-25{height:25vh}.vh-50{height:50vh}.vh-75{height:75vh}.vh-100{height:100vh}.min-vh-100{min-height:100vh}.h-auto{height:auto}.h-inherit{height:inherit}.tracked{letter-spacing:.1em}.tracked-tight{letter-spacing:-.05em}.tracked-mega{letter-spacing:.25em}.lh-solid{line-height:1}.lh-title{line-height:1.25}.lh-copy{line-height:1.5}.link{text-decoration:none}.link,.link:active,.link:focus,.link:hover,.link:link,.link:visited{transition:color .15s ease-in}.link:focus{outline:1px dotted currentColor}.list{list-style-type:none}.mw-100{max-width:100%}.mw1{max-width:1rem}.mw2{max-width:2rem}.mw3{max-width:4rem}.mw4{max-width:8rem}.mw5{max-width:16rem}.mw6{max-width:32rem}.mw7{max-width:48rem}.mw8{max-width:64rem}.mw9{max-width:96rem}.mw-none{max-width:none}.w1{width:1rem}.w2{width:2rem}.w3{width:4rem}.w4{width:8rem}.w5{width:16rem}.w-10{width:10%}.w-20{width:20%}.w-25{width:25%}.w-30{width:30%}.w-33{width:33%}.w-34{width:34%}.w-40{width:40%}.w-50{width:50%}.w-60{width:60%}.w-70{width:70%}.w-75{width:75%}.w-80{width:80%}.w-90{width:90%}.w-100{width:100%}.w-third{width:33.33333%}.w-two-thirds{width:66.66667%}.w-auto{width:auto}.overflow-visible{overflow:visible}.overflow-hidden{overflow:hidden}.overflow-scroll{overflow:scroll}.overflow-auto{overflow:auto}.overflow-x-visible{overflow-x:visible}.overflow-x-hidden{overflow-x:hidden}.overflow-x-scroll{overflow-x:scroll}.overflow-x-auto{overflow-x:auto}.overflow-y-visible{overflow-y:visible}.overflow-y-hidden{overflow-y:hidden}.overflow-y-scroll{overflow-y:scroll}.overflow-y-auto{overflow-y:auto}.static{position:static}.relative{position:relative}.absolute{position:absolute}.fixed{position:fixed}.o-100{opacity:1}.o-90{opacity:.9}.o-80{opacity:.8}.o-70{opacity:.7}.o-60{opacity:.6}.o-50{opacity:.5}.o-40{opacity:.4}.o-30{opacity:.3}.o-20{opacity:.2}.o-10{opacity:.1}.o-05{opacity:.05}.o-025{opacity:.025}.o-0{opacity:0}.rotate-45{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.rotate-135{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.rotate-225{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.rotate-315{-webkit-transform:rotate(315deg);transform:rotate(315deg)}.black-90{color:rgba(0,0,0,.9)}.black-80{color:rgba(0,0,0,.8)}.black-70{color:rgba(0,0,0,.7)}.black-60{color:rgba(0,0,0,.6)}.black-50{color:rgba(0,0,0,.5)}.black-40{color:rgba(0,0,0,.4)}.black-30{color:rgba(0,0,0,.3)}.black-20{color:rgba(0,0,0,.2)}.black-10{color:rgba(0,0,0,.1)}.black-05{color:rgba(0,0,0,5%)}.white-90{color:rgba(255,255,255,.9)}.white-80{color:rgba(255,255,255,.8)}.white-70{color:rgba(255,255,255,.7)}.white-60{color:rgba(255,255,255,.6)}.white-50{color:rgba(255,255,255,.5)}.white-40{color:rgba(255,255,255,.4)}.white-30{color:rgba(255,255,255,.3)}.white-20{color:rgba(255,255,255,.2)}.white-10{color:rgba(255,255,255,.1)}.black{color:#000}.near-black{color:#111}.dark-gray{color:#333}.mid-gray{color:#555}.gray{color:#777}.silver{color:#999}.light-silver{color:#aaa}.moon-gray{color:#ccc}.light-gray{color:#eee}.near-white{color:#f4f4f4}.white{color:#fff}.dark-red{color:#e7040f}.red{color:#ff4136}.light-red{color:#ff725c}.orange{color:#ff6300}.gold{color:#ffb700}.yellow{color:gold}.light-yellow{color:#fbf1a9}.purple{color:#5e2ca5}.light-purple{color:#a463f2}.dark-pink{color:#d5008f}.hot-pink{color:#ff41b4}.pink{color:#ff80cc}.light-pink{color:#ffa3d7}.dark-green{color:#137752}.green{color:#19a974}.light-green{color:#9eebcf}.navy{color:#001b44}.dark-blue{color:#00449e}.blue{color:#357edd}.light-blue{color:#96ccff}.lightest-blue{color:#cdecff}.washed-blue{color:#f6fffe}.washed-green{color:#e8fdf5}.washed-yellow{color:#fffceb}.washed-red{color:#ffdfdf}.color-inherit{color:inherit}.bg-black-90{background-color:rgba(0,0,0,.9)}.bg-black-80{background-color:rgba(0,0,0,.8)}.bg-black-70{background-color:rgba(0,0,0,.7)}.bg-black-60{background-color:rgba(0,0,0,.6)}.bg-black-50{background-color:rgba(0,0,0,.5)}.bg-black-40{background-color:rgba(0,0,0,.4)}.bg-black-30{background-color:rgba(0,0,0,.3)}.bg-black-20{background-color:rgba(0,0,0,.2)}.bg-black-10{background-color:rgba(0,0,0,.1)}.bg-black-05{background-color:rgba(0,0,0,5%)}.bg-white-90{background-color:rgba(255,255,255,.9)}.bg-white-80{background-color:rgba(255,255,255,.8)}.bg-white-70{background-color:rgba(255,255,255,.7)}.bg-white-60{background-color:rgba(255,255,255,.6)}.bg-white-50{background-color:rgba(255,255,255,.5)}.bg-white-40{background-color:rgba(255,255,255,.4)}.bg-white-30{background-color:rgba(255,255,255,.3)}.bg-white-20{background-color:rgba(255,255,255,.2)}.bg-white-10{background-color:rgba(255,255,255,.1)}.bg-black{background-color:#000}.bg-near-black{background-color:#111}.bg-dark-gray{background-color:#333}.bg-mid-gray{background-color:#555}.bg-gray{background-color:#777}.bg-silver{background-color:#999}.bg-light-silver{background-color:#aaa}.bg-moon-gray{background-color:#ccc}.bg-light-gray{background-color:#eee}.bg-near-white{background-color:#f4f4f4}.bg-white{background-color:#fff}.bg-transparent{background-color:transparent}.bg-dark-red{background-color:#e7040f}.bg-red{background-color:#ff4136}.bg-light-red{background-color:#ff725c}.bg-orange{background-color:#ff6300}.bg-gold{background-color:#ffb700}.bg-yellow{background-color:gold}.bg-light-yellow{background-color:#fbf1a9}.bg-purple{background-color:#5e2ca5}.bg-light-purple{background-color:#a463f2}.bg-dark-pink{background-color:#d5008f}.bg-hot-pink{background-color:#ff41b4}.bg-pink{background-color:#ff80cc}.bg-light-pink{background-color:#ffa3d7}.bg-dark-green{background-color:#137752}.bg-green{background-color:#19a974}.bg-light-green{background-color:#9eebcf}.bg-navy{background-color:#001b44}.bg-dark-blue{background-color:#00449e}.bg-blue{background-color:#357edd}.bg-light-blue{background-color:#96ccff}.bg-lightest-blue{background-color:#cdecff}.bg-washed-blue{background-color:#f6fffe}.bg-washed-green{background-color:#e8fdf5}.bg-washed-yellow{background-color:#fffceb}.bg-washed-red{background-color:#ffdfdf}.bg-inherit{background-color:inherit}.hover-black:focus,.hover-black:hover{color:#000}.hover-near-black:focus,.hover-near-black:hover{color:#111}.hover-dark-gray:focus,.hover-dark-gray:hover{color:#333}.hover-mid-gray:focus,.hover-mid-gray:hover{color:#555}.hover-gray:focus,.hover-gray:hover{color:#777}.hover-silver:focus,.hover-silver:hover{color:#999}.hover-light-silver:focus,.hover-light-silver:hover{color:#aaa}.hover-moon-gray:focus,.hover-moon-gray:hover{color:#ccc}.hover-light-gray:focus,.hover-light-gray:hover{color:#eee}.hover-near-white:focus,.hover-near-white:hover{color:#f4f4f4}.hover-white:focus,.hover-white:hover{color:#fff}.hover-black-90:focus,.hover-black-90:hover{color:rgba(0,0,0,.9)}.hover-black-80:focus,.hover-black-80:hover{color:rgba(0,0,0,.8)}.hover-black-70:focus,.hover-black-70:hover{color:rgba(0,0,0,.7)}.hover-black-60:focus,.hover-black-60:hover{color:rgba(0,0,0,.6)}.hover-black-50:focus,.hover-black-50:hover{color:rgba(0,0,0,.5)}.hover-black-40:focus,.hover-black-40:hover{color:rgba(0,0,0,.4)}.hover-black-30:focus,.hover-black-30:hover{color:rgba(0,0,0,.3)}.hover-black-20:focus,.hover-black-20:hover{color:rgba(0,0,0,.2)}.hover-black-10:focus,.hover-black-10:hover{color:rgba(0,0,0,.1)}.hover-white-90:focus,.hover-white-90:hover{color:rgba(255,255,255,.9)}.hover-white-80:focus,.hover-white-80:hover{color:rgba(255,255,255,.8)}.hover-white-70:focus,.hover-white-70:hover{color:rgba(255,255,255,.7)}.hover-white-60:focus,.hover-white-60:hover{color:rgba(255,255,255,.6)}.hover-white-50:focus,.hover-white-50:hover{color:rgba(255,255,255,.5)}.hover-white-40:focus,.hover-white-40:hover{color:rgba(255,255,255,.4)}.hover-white-30:focus,.hover-white-30:hover{color:rgba(255,255,255,.3)}.hover-white-20:focus,.hover-white-20:hover{color:rgba(255,255,255,.2)}.hover-white-10:focus,.hover-white-10:hover{color:rgba(255,255,255,.1)}.hover-inherit:focus,.hover-inherit:hover{color:inherit}.hover-bg-black:focus,.hover-bg-black:hover{background-color:#000}.hover-bg-near-black:focus,.hover-bg-near-black:hover{background-color:#111}.hover-bg-dark-gray:focus,.hover-bg-dark-gray:hover{background-color:#333}.hover-bg-mid-gray:focus,.hover-bg-mid-gray:hover{background-color:#555}.hover-bg-gray:focus,.hover-bg-gray:hover{background-color:#777}.hover-bg-silver:focus,.hover-bg-silver:hover{background-color:#999}.hover-bg-light-silver:focus,.hover-bg-light-silver:hover{background-color:#aaa}.hover-bg-moon-gray:focus,.hover-bg-moon-gray:hover{background-color:#ccc}.hover-bg-light-gray:focus,.hover-bg-light-gray:hover{background-color:#eee}.hover-bg-near-white:focus,.hover-bg-near-white:hover{background-color:#f4f4f4}.hover-bg-white:focus,.hover-bg-white:hover{background-color:#fff}.hover-bg-transparent:focus,.hover-bg-transparent:hover{background-color:transparent}.hover-bg-black-90:focus,.hover-bg-black-90:hover{background-color:rgba(0,0,0,.9)}.hover-bg-black-80:focus,.hover-bg-black-80:hover{background-color:rgba(0,0,0,.8)}.hover-bg-black-70:focus,.hover-bg-black-70:hover{background-color:rgba(0,0,0,.7)}.hover-bg-black-60:focus,.hover-bg-black-60:hover{background-color:rgba(0,0,0,.6)}.hover-bg-black-50:focus,.hover-bg-black-50:hover{background-color:rgba(0,0,0,.5)}.hover-bg-black-40:focus,.hover-bg-black-40:hover{background-color:rgba(0,0,0,.4)}.hover-bg-black-30:focus,.hover-bg-black-30:hover{background-color:rgba(0,0,0,.3)}.hover-bg-black-20:focus,.hover-bg-black-20:hover{background-color:rgba(0,0,0,.2)}.hover-bg-black-10:focus,.hover-bg-black-10:hover{background-color:rgba(0,0,0,.1)}.hover-bg-white-90:focus,.hover-bg-white-90:hover{background-color:rgba(255,255,255,.9)}.hover-bg-white-80:focus,.hover-bg-white-80:hover{background-color:rgba(255,255,255,.8)}.hover-bg-white-70:focus,.hover-bg-white-70:hover{background-color:rgba(255,255,255,.7)}.hover-bg-white-60:focus,.hover-bg-white-60:hover{background-color:rgba(255,255,255,.6)}.hover-bg-white-50:focus,.hover-bg-white-50:hover{background-color:rgba(255,255,255,.5)}.hover-bg-white-40:focus,.hover-bg-white-40:hover{background-color:rgba(255,255,255,.4)}.hover-bg-white-30:focus,.hover-bg-white-30:hover{background-color:rgba(255,255,255,.3)}.hover-bg-white-20:focus,.hover-bg-white-20:hover{background-color:rgba(255,255,255,.2)}.hover-bg-white-10:focus,.hover-bg-white-10:hover{background-color:rgba(255,255,255,.1)}.hover-dark-red:focus,.hover-dark-red:hover{color:#e7040f}.hover-red:focus,.hover-red:hover{color:#ff4136}.hover-light-red:focus,.hover-light-red:hover{color:#ff725c}.hover-orange:focus,.hover-orange:hover{color:#ff6300}.hover-gold:focus,.hover-gold:hover{color:#ffb700}.hover-yellow:focus,.hover-yellow:hover{color:gold}.hover-light-yellow:focus,.hover-light-yellow:hover{color:#fbf1a9}.hover-purple:focus,.hover-purple:hover{color:#5e2ca5}.hover-light-purple:focus,.hover-light-purple:hover{color:#a463f2}.hover-dark-pink:focus,.hover-dark-pink:hover{color:#d5008f}.hover-hot-pink:focus,.hover-hot-pink:hover{color:#ff41b4}.hover-pink:focus,.hover-pink:hover{color:#ff80cc}.hover-light-pink:focus,.hover-light-pink:hover{color:#ffa3d7}.hover-dark-green:focus,.hover-dark-green:hover{color:#137752}.hover-green:focus,.hover-green:hover{color:#19a974}.hover-light-green:focus,.hover-light-green:hover{color:#9eebcf}.hover-navy:focus,.hover-navy:hover{color:#001b44}.hover-dark-blue:focus,.hover-dark-blue:hover{color:#00449e}.hover-blue:focus,.hover-blue:hover{color:#357edd}.hover-light-blue:focus,.hover-light-blue:hover{color:#96ccff}.hover-lightest-blue:focus,.hover-lightest-blue:hover{color:#cdecff}.hover-washed-blue:focus,.hover-washed-blue:hover{color:#f6fffe}.hover-washed-green:focus,.hover-washed-green:hover{color:#e8fdf5}.hover-washed-yellow:focus,.hover-washed-yellow:hover{color:#fffceb}.hover-washed-red:focus,.hover-washed-red:hover{color:#ffdfdf}.hover-bg-dark-red:focus,.hover-bg-dark-red:hover{background-color:#e7040f}.hover-bg-red:focus,.hover-bg-red:hover{background-color:#ff4136}.hover-bg-light-red:focus,.hover-bg-light-red:hover{background-color:#ff725c}.hover-bg-orange:focus,.hover-bg-orange:hover{background-color:#ff6300}.hover-bg-gold:focus,.hover-bg-gold:hover{background-color:#ffb700}.hover-bg-yellow:focus,.hover-bg-yellow:hover{background-color:gold}.hover-bg-light-yellow:focus,.hover-bg-light-yellow:hover{background-color:#fbf1a9}.hover-bg-purple:focus,.hover-bg-purple:hover{background-color:#5e2ca5}.hover-bg-light-purple:focus,.hover-bg-light-purple:hover{background-color:#a463f2}.hover-bg-dark-pink:focus,.hover-bg-dark-pink:hover{background-color:#d5008f}.hover-bg-hot-pink:focus,.hover-bg-hot-pink:hover{background-color:#ff41b4}.hover-bg-pink:focus,.hover-bg-pink:hover{background-color:#ff80cc}.hover-bg-light-pink:focus,.hover-bg-light-pink:hover{background-color:#ffa3d7}.hover-bg-dark-green:focus,.hover-bg-dark-green:hover{background-color:#137752}.hover-bg-green:focus,.hover-bg-green:hover{background-color:#19a974}.hover-bg-light-green:focus,.hover-bg-light-green:hover{background-color:#9eebcf}.hover-bg-navy:focus,.hover-bg-navy:hover{background-color:#001b44}.hover-bg-dark-blue:focus,.hover-bg-dark-blue:hover{background-color:#00449e}.hover-bg-blue:focus,.hover-bg-blue:hover{background-color:#357edd}.hover-bg-light-blue:focus,.hover-bg-light-blue:hover{background-color:#96ccff}.hover-bg-lightest-blue:focus,.hover-bg-lightest-blue:hover{background-color:#cdecff}.hover-bg-washed-blue:focus,.hover-bg-washed-blue:hover{background-color:#f6fffe}.hover-bg-washed-green:focus,.hover-bg-washed-green:hover{background-color:#e8fdf5}.hover-bg-washed-yellow:focus,.hover-bg-washed-yellow:hover{background-color:#fffceb}.hover-bg-washed-red:focus,.hover-bg-washed-red:hover{background-color:#ffdfdf}.hover-bg-inherit:focus,.hover-bg-inherit:hover{background-color:inherit}.pa0{padding:0}.pa1{padding:.25rem}.pa2{padding:.5rem}.pa3{padding:1rem}.pa4{padding:2rem}.pa5{padding:4rem}.pa6{padding:8rem}.pa7{padding:16rem}.pl0{padding-left:0}.pl1{padding-left:.25rem}.pl2{padding-left:.5rem}.pl3{padding-left:1rem}.pl4{padding-left:2rem}.pl5{padding-left:4rem}.pl6{padding-left:8rem}.pl7{padding-left:16rem}.pr0{padding-right:0}.pr1{padding-right:.25rem}.pr2{padding-right:.5rem}.pr3{padding-right:1rem}.pr4{padding-right:2rem}.pr5{padding-right:4rem}.pr6{padding-right:8rem}.pr7{padding-right:16rem}.pb0{padding-bottom:0}.pb1{padding-bottom:.25rem}.pb2{padding-bottom:.5rem}.pb3{padding-bottom:1rem}.pb4{padding-bottom:2rem}.pb5{padding-bottom:4rem}.pb6{padding-bottom:8rem}.pb7{padding-bottom:16rem}.pt0{padding-top:0}.pt1{padding-top:.25rem}.pt2{padding-top:.5rem}.pt3{padding-top:1rem}.pt4{padding-top:2rem}.pt5{padding-top:4rem}.pt6{padding-top:8rem}.pt7{padding-top:16rem}.pv0{padding-top:0;padding-bottom:0}.pv1{padding-top:.25rem;padding-bottom:.25rem}.pv2{padding-top:.5rem;padding-bottom:.5rem}.pv3{padding-top:1rem;padding-bottom:1rem}.pv4{padding-top:2rem;padding-bottom:2rem}.pv5{padding-top:4rem;padding-bottom:4rem}.pv6{padding-top:8rem;padding-bottom:8rem}.pv7{padding-top:16rem;padding-bottom:16rem}.ph0{padding-left:0;padding-right:0}.ph1{padding-left:.25rem;padding-right:.25rem}.ph2{padding-left:.5rem;padding-right:.5rem}.ph3{padding-left:1rem;padding-right:1rem}.ph4{padding-left:2rem;padding-right:2rem}.ph5{padding-left:4rem;padding-right:4rem}.ph6{padding-left:8rem;padding-right:8rem}.ph7{padding-left:16rem;padding-right:16rem}.ma0{margin:0}.ma1{margin:.25rem}.ma2{margin:.5rem}.ma3{margin:1rem}.ma4{margin:2rem}.ma5{margin:4rem}.ma6{margin:8rem}.ma7{margin:16rem}.ml0{margin-left:0}.ml1{margin-left:.25rem}.ml2{margin-left:.5rem}.ml3{margin-left:1rem}.ml4{margin-left:2rem}.ml5{margin-left:4rem}.ml6{margin-left:8rem}.ml7{margin-left:16rem}.mr0{margin-right:0}.mr1{margin-right:.25rem}.mr2{margin-right:.5rem}.mr3{margin-right:1rem}.mr4{margin-right:2rem}.mr5{margin-right:4rem}.mr6{margin-right:8rem}.mr7{margin-right:16rem}.mb0{margin-bottom:0}.mb1{margin-bottom:.25rem}.mb2{margin-bottom:.5rem}.mb3{margin-bottom:1rem}.mb4{margin-bottom:2rem}.mb5{margin-bottom:4rem}.mb6{margin-bottom:8rem}.mb7{margin-bottom:16rem}.mt0{margin-top:0}.mt1{margin-top:.25rem}.mt2{margin-top:.5rem}.mt3{margin-top:1rem}.mt4{margin-top:2rem}.mt5{margin-top:4rem}.mt6{margin-top:8rem}.mt7{margin-top:16rem}.mv0{margin-top:0;margin-bottom:0}.mv1{margin-top:.25rem;margin-bottom:.25rem}.mv2{margin-top:.5rem;margin-bottom:.5rem}.mv3{margin-top:1rem;margin-bottom:1rem}.mv4{margin-top:2rem;margin-bottom:2rem}.mv5{margin-top:4rem;margin-bottom:4rem}.mv6{margin-top:8rem;margin-bottom:8rem}.mv7{margin-top:16rem;margin-bottom:16rem}.mh0{margin-left:0;margin-right:0}.mh1{margin-left:.25rem;margin-right:.25rem}.mh2{margin-left:.5rem;margin-right:.5rem}.mh3{margin-left:1rem;margin-right:1rem}.mh4{margin-left:2rem;margin-right:2rem}.mh5{margin-left:4rem;margin-right:4rem}.mh6{margin-left:8rem;margin-right:8rem}.mh7{margin-left:16rem;margin-right:16rem}.na1{margin:-.25rem}.na2{margin:-.5rem}.na3{margin:-1rem}.na4{margin:-2rem}.na5{margin:-4rem}.na6{margin:-8rem}.na7{margin:-16rem}.nl1{margin-left:-.25rem}.nl2{margin-left:-.5rem}.nl3{margin-left:-1rem}.nl4{margin-left:-2rem}.nl5{margin-left:-4rem}.nl6{margin-left:-8rem}.nl7{margin-left:-16rem}.nr1{margin-right:-.25rem}.nr2{margin-right:-.5rem}.nr3{margin-right:-1rem}.nr4{margin-right:-2rem}.nr5{margin-right:-4rem}.nr6{margin-right:-8rem}.nr7{margin-right:-16rem}.nb1{margin-bottom:-.25rem}.nb2{margin-bottom:-.5rem}.nb3{margin-bottom:-1rem}.nb4{margin-bottom:-2rem}.nb5{margin-bottom:-4rem}.nb6{margin-bottom:-8rem}.nb7{margin-bottom:-16rem}.nt1{margin-top:-.25rem}.nt2{margin-top:-.5rem}.nt3{margin-top:-1rem}.nt4{margin-top:-2rem}.nt5{margin-top:-4rem}.nt6{margin-top:-8rem}.nt7{margin-top:-16rem}.collapse{border-collapse:collapse;border-spacing:0}.striped--light-silver:nth-child(odd){background-color:#aaa}.striped--moon-gray:nth-child(odd){background-color:#ccc}.striped--light-gray:nth-child(odd){background-color:#eee}.striped--near-white:nth-child(odd){background-color:#f4f4f4}.stripe-light:nth-child(odd){background-color:rgba(255,255,255,.1)}.stripe-dark:nth-child(odd){background-color:rgba(0,0,0,.1)}.strike{text-decoration:line-through}.underline{text-decoration:underline}.no-underline{text-decoration:none}.tl{text-align:left}.tr{text-align:right}.tc{text-align:center}.tj{text-align:justify}.ttc{text-transform:capitalize}.ttl{text-transform:lowercase}.ttu{text-transform:uppercase}.ttn{text-transform:none}.f-6,.f-headline{font-size:6rem}.f-5,.f-subheadline{font-size:5rem}.f1{font-size:3rem}.f2{font-size:2.25rem}.f3{font-size:1.5rem}.f4{font-size:1.25rem}.f5{font-size:1rem}.f6{font-size:.875rem}.f7{font-size:.75rem}.measure{max-width:30em}.measure-wide{max-width:34em}.measure-narrow{max-width:20em}.indent{text-indent:1em;margin-top:0;margin-bottom:0}.small-caps{font-variant:small-caps}.truncate{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.overflow-container{overflow-y:scroll}.center{margin-left:auto}.center,.mr-auto{margin-right:auto}.ml-auto{margin-left:auto}.clip{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}.ws-normal{white-space:normal}.nowrap{white-space:nowrap}.pre{white-space:pre}.v-base{vertical-align:baseline}.v-mid{vertical-align:middle}.v-top{vertical-align:top}.v-btm{vertical-align:bottom}.dim{opacity:1}.dim,.dim:focus,.dim:hover{transition:opacity .15s ease-in}.dim:focus,.dim:hover{opacity:.5}.dim:active{opacity:.8;transition:opacity .15s ease-out}.glow,.glow:focus,.glow:hover{transition:opacity .15s ease-in}.glow:focus,.glow:hover{opacity:1}.hide-child .child{opacity:0;transition:opacity .15s ease-in}.hide-child:active .child,.hide-child:focus .child,.hide-child:hover .child{opacity:1;transition:opacity .15s ease-in}.underline-hover:focus,.underline-hover:hover{text-decoration:underline}.grow{-moz-osx-font-smoothing:grayscale;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-transform:translateZ(0);transform:translateZ(0);transition:-webkit-transform .25s ease-out;transition:transform .25s ease-out;transition:transform .25s ease-out,-webkit-transform .25s ease-out}.grow:focus,.grow:hover{-webkit-transform:scale(1.05);transform:scale(1.05)}.grow:active{-webkit-transform:scale(.9);transform:scale(.9)}.grow-large{-moz-osx-font-smoothing:grayscale;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-transform:translateZ(0);transform:translateZ(0);transition:-webkit-transform .25s ease-in-out;transition:transform .25s ease-in-out;transition:transform .25s ease-in-out,-webkit-transform .25s ease-in-out}.grow-large:focus,.grow-large:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.grow-large:active{-webkit-transform:scale(.95);transform:scale(.95)}.pointer:hover,.shadow-hover{cursor:pointer}.shadow-hover{position:relative;transition:all .5s cubic-bezier(.165,.84,.44,1)}.shadow-hover:after{content:"";box-shadow:0 0 16px 2px rgba(0,0,0,.2);border-radius:inherit;opacity:0;position:absolute;top:0;left:0;width:100%;height:100%;z-index:-1;transition:opacity .5s cubic-bezier(.165,.84,.44,1)}.shadow-hover:focus:after,.shadow-hover:hover:after{opacity:1}.bg-animate,.bg-animate:focus,.bg-animate:hover{transition:background-color .15s ease-in-out}.z-0{z-index:0}.z-1{z-index:1}.z-2{z-index:2}.z-3{z-index:3}.z-4{z-index:4}.z-5{z-index:5}.z-999{z-index:999}.z-9999{z-index:9999}.z-max{z-index:2147483647}.z-inherit{z-index:inherit}.z-initial{z-index:auto}.z-unset{z-index:unset}.nested-copy-line-height ol,.nested-copy-line-height p,.nested-copy-line-height ul{line-height:1.5}.nested-headline-line-height h1,.nested-headline-line-height h2,.nested-headline-line-height h3,.nested-headline-line-height h4,.nested-headline-line-height h5,.nested-headline-line-height h6{line-height:1.25}.nested-list-reset ol,.nested-list-reset ul{padding-left:0;margin-left:0;list-style-type:none}.nested-copy-indent p+p{text-indent:1em;margin-top:0;margin-bottom:0}.nested-copy-separator p+p{margin-top:1.5em}.nested-img img{width:100%;max-width:100%;display:block}.nested-links a{color:#357edd;transition:color .15s ease-in}.nested-links a:focus,.nested-links a:hover{color:#96ccff;transition:color .15s ease-in}.debug *{outline:1px solid gold}.debug-white *{outline:1px solid #fff}.debug-black *{outline:1px solid #000}.debug-grid{background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAFElEQVR4AWPAC97/9x0eCsAEPgwAVLshdpENIxcAAAAASUVORK5CYII=)repeat}.debug-grid-16{background:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAMklEQVR4AWOgCLz/b0epAa6UGuBOqQHOQHLUgFEDnAbcBZ4UGwDOkiCnkIhdgNgNxAYAiYlD+8sEuo8AAAAASUVORK5CYII=)repeat}.debug-grid-8-solid{background:#fff url(data:image/gif;base64,R0lGODdhCAAIAPEAAADw/wDx/////wAAACwAAAAACAAIAAACDZQvgaeb/lxbAIKA8y0AOw==)repeat}.debug-grid-16-solid{background:#fff url(data:image/gif;base64,R0lGODdhEAAQAPEAAADw/wDx/xXy/////ywAAAAAEAAQAAACIZyPKckYDQFsb6ZqD85jZ2+BkwiRFKehhqQCQgDHcgwEBQA7)repeat}@media screen and (min-width:30em){.aspect-ratio-ns{height:0;position:relative}.aspect-ratio--16x9-ns{padding-bottom:56.25%}.aspect-ratio--9x16-ns{padding-bottom:177.77%}.aspect-ratio--4x3-ns{padding-bottom:75%}.aspect-ratio--3x4-ns{padding-bottom:133.33%}.aspect-ratio--6x4-ns{padding-bottom:66.6%}.aspect-ratio--4x6-ns{padding-bottom:150%}.aspect-ratio--8x5-ns{padding-bottom:62.5%}.aspect-ratio--5x8-ns{padding-bottom:160%}.aspect-ratio--7x5-ns{padding-bottom:71.42%}.aspect-ratio--5x7-ns{padding-bottom:140%}.aspect-ratio--1x1-ns{padding-bottom:100%}.aspect-ratio--object-ns{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}.cover-ns{background-size:cover!important}.contain-ns{background-size:contain!important}.bg-center-ns{background-position:50%}.bg-center-ns,.bg-top-ns{background-repeat:no-repeat}.bg-top-ns{background-position:top}.bg-right-ns{background-position:100%}.bg-bottom-ns,.bg-right-ns{background-repeat:no-repeat}.bg-bottom-ns{background-position:bottom}.bg-left-ns{background-repeat:no-repeat;background-position:0}.outline-ns{outline:1px solid}.outline-transparent-ns{outline:1px solid transparent}.outline-0-ns{outline:0}.ba-ns{border-style:solid;border-width:1px}.bt-ns{border-top-style:solid;border-top-width:1px}.br-ns{border-right-style:solid;border-right-width:1px}.bb-ns{border-bottom-style:solid;border-bottom-width:1px}.bl-ns{border-left-style:solid;border-left-width:1px}.bn-ns{border-style:none;border-width:0}.br0-ns{border-radius:0}.br1-ns{border-radius:.125rem}.br2-ns{border-radius:.25rem}.br3-ns{border-radius:.5rem}.br4-ns{border-radius:1rem}.br-100-ns{border-radius:100%}.br-pill-ns{border-radius:9999px}.br--bottom-ns{border-top-left-radius:0;border-top-right-radius:0}.br--top-ns{border-bottom-right-radius:0}.br--right-ns,.br--top-ns{border-bottom-left-radius:0}.br--right-ns{border-top-left-radius:0}.br--left-ns{border-top-right-radius:0;border-bottom-right-radius:0}.br-inherit-ns{border-radius:inherit}.br-initial-ns{border-radius:initial}.br-unset-ns{border-radius:unset}.b--dotted-ns{border-style:dotted}.b--dashed-ns{border-style:dashed}.b--solid-ns{border-style:solid}.b--none-ns{border-style:none}.bw0-ns{border-width:0}.bw1-ns{border-width:.125rem}.bw2-ns{border-width:.25rem}.bw3-ns{border-width:.5rem}.bw4-ns{border-width:1rem}.bw5-ns{border-width:2rem}.bt-0-ns{border-top-width:0}.br-0-ns{border-right-width:0}.bb-0-ns{border-bottom-width:0}.bl-0-ns{border-left-width:0}.shadow-1-ns{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.shadow-2-ns{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.shadow-3-ns{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.shadow-4-ns{box-shadow:2px 2px 8px rgba(0,0,0,.2)}.shadow-5-ns{box-shadow:4px 4px 8px rgba(0,0,0,.2)}.top-0-ns{top:0}.left-0-ns{left:0}.right-0-ns{right:0}.bottom-0-ns{bottom:0}.top-1-ns{top:1rem}.left-1-ns{left:1rem}.right-1-ns{right:1rem}.bottom-1-ns{bottom:1rem}.top-2-ns{top:2rem}.left-2-ns{left:2rem}.right-2-ns{right:2rem}.bottom-2-ns{bottom:2rem}.top--1-ns{top:-1rem}.right--1-ns{right:-1rem}.bottom--1-ns{bottom:-1rem}.left--1-ns{left:-1rem}.top--2-ns{top:-2rem}.right--2-ns{right:-2rem}.bottom--2-ns{bottom:-2rem}.left--2-ns{left:-2rem}.absolute--fill-ns{top:0;right:0;bottom:0;left:0}.cl-ns{clear:left}.cr-ns{clear:right}.cb-ns{clear:both}.cn-ns{clear:none}.dn-ns{display:none}.di-ns{display:inline}.db-ns{display:block}.dib-ns{display:inline-block}.dit-ns{display:inline-table}.dt-ns{display:table}.dtc-ns{display:table-cell}.dt-row-ns{display:table-row}.dt-row-group-ns{display:table-row-group}.dt-column-ns{display:table-column}.dt-column-group-ns{display:table-column-group}.dt--fixed-ns{table-layout:fixed;width:100%}.flex-ns{display:flex}.inline-flex-ns{display:inline-flex}.flex-auto-ns{flex:auto;min-width:0;min-height:0}.flex-none-ns{flex:none}.flex-column-ns{flex-direction:column}.flex-row-ns{flex-direction:row}.flex-wrap-ns{flex-wrap:wrap}.flex-nowrap-ns{flex-wrap:nowrap}.flex-wrap-reverse-ns{flex-wrap:wrap-reverse}.flex-column-reverse-ns{flex-direction:column-reverse}.flex-row-reverse-ns{flex-direction:row-reverse}.items-start-ns{align-items:flex-start}.items-end-ns{align-items:flex-end}.items-center-ns{align-items:center}.items-baseline-ns{align-items:baseline}.items-stretch-ns{align-items:stretch}.self-start-ns{align-self:flex-start}.self-end-ns{align-self:flex-end}.self-center-ns{align-self:center}.self-baseline-ns{align-self:baseline}.self-stretch-ns{align-self:stretch}.justify-start-ns{justify-content:flex-start}.justify-end-ns{justify-content:flex-end}.justify-center-ns{justify-content:center}.justify-between-ns{justify-content:space-between}.justify-around-ns{justify-content:space-around}.content-start-ns{align-content:flex-start}.content-end-ns{align-content:flex-end}.content-center-ns{align-content:center}.content-between-ns{align-content:space-between}.content-around-ns{align-content:space-around}.content-stretch-ns{align-content:stretch}.order-0-ns{order:0}.order-1-ns{order:1}.order-2-ns{order:2}.order-3-ns{order:3}.order-4-ns{order:4}.order-5-ns{order:5}.order-6-ns{order:6}.order-7-ns{order:7}.order-8-ns{order:8}.order-last-ns{order:99999}.flex-grow-0-ns{flex-grow:0}.flex-grow-1-ns{flex-grow:1}.flex-shrink-0-ns{flex-shrink:0}.flex-shrink-1-ns{flex-shrink:1}.fl-ns{float:left}.fl-ns,.fr-ns{_display:inline}.fr-ns{float:right}.fn-ns{float:none}.i-ns{font-style:italic}.fs-normal-ns{font-style:normal}.normal-ns{font-weight:400}.b-ns{font-weight:700}.fw1-ns{font-weight:100}.fw2-ns{font-weight:200}.fw3-ns{font-weight:300}.fw4-ns{font-weight:400}.fw5-ns{font-weight:500}.fw6-ns{font-weight:600}.fw7-ns{font-weight:700}.fw8-ns{font-weight:800}.fw9-ns{font-weight:900}.h1-ns{height:1rem}.h2-ns{height:2rem}.h3-ns{height:4rem}.h4-ns{height:8rem}.h5-ns{height:16rem}.h-25-ns{height:25%}.h-50-ns{height:50%}.h-75-ns{height:75%}.h-100-ns{height:100%}.min-h-100-ns{min-height:100%}.vh-25-ns{height:25vh}.vh-50-ns{height:50vh}.vh-75-ns{height:75vh}.vh-100-ns{height:100vh}.min-vh-100-ns{min-height:100vh}.h-auto-ns{height:auto}.h-inherit-ns{height:inherit}.tracked-ns{letter-spacing:.1em}.tracked-tight-ns{letter-spacing:-.05em}.tracked-mega-ns{letter-spacing:.25em}.lh-solid-ns{line-height:1}.lh-title-ns{line-height:1.25}.lh-copy-ns{line-height:1.5}.mw-100-ns{max-width:100%}.mw1-ns{max-width:1rem}.mw2-ns{max-width:2rem}.mw3-ns{max-width:4rem}.mw4-ns{max-width:8rem}.mw5-ns{max-width:16rem}.mw6-ns{max-width:32rem}.mw7-ns{max-width:48rem}.mw8-ns{max-width:64rem}.mw9-ns{max-width:96rem}.mw-none-ns{max-width:none}.w1-ns{width:1rem}.w2-ns{width:2rem}.w3-ns{width:4rem}.w4-ns{width:8rem}.w5-ns{width:16rem}.w-10-ns{width:10%}.w-20-ns{width:20%}.w-25-ns{width:25%}.w-30-ns{width:30%}.w-33-ns{width:33%}.w-34-ns{width:34%}.w-40-ns{width:40%}.w-50-ns{width:50%}.w-60-ns{width:60%}.w-70-ns{width:70%}.w-75-ns{width:75%}.w-80-ns{width:80%}.w-90-ns{width:90%}.w-100-ns{width:100%}.w-third-ns{width:33.33333%}.w-two-thirds-ns{width:66.66667%}.w-auto-ns{width:auto}.overflow-visible-ns{overflow:visible}.overflow-hidden-ns{overflow:hidden}.overflow-scroll-ns{overflow:scroll}.overflow-auto-ns{overflow:auto}.overflow-x-visible-ns{overflow-x:visible}.overflow-x-hidden-ns{overflow-x:hidden}.overflow-x-scroll-ns{overflow-x:scroll}.overflow-x-auto-ns{overflow-x:auto}.overflow-y-visible-ns{overflow-y:visible}.overflow-y-hidden-ns{overflow-y:hidden}.overflow-y-scroll-ns{overflow-y:scroll}.overflow-y-auto-ns{overflow-y:auto}.static-ns{position:static}.relative-ns{position:relative}.absolute-ns{position:absolute}.fixed-ns{position:fixed}.rotate-45-ns{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.rotate-90-ns{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.rotate-135-ns{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.rotate-180-ns{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.rotate-225-ns{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.rotate-270-ns{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.rotate-315-ns{-webkit-transform:rotate(315deg);transform:rotate(315deg)}.pa0-ns{padding:0}.pa1-ns{padding:.25rem}.pa2-ns{padding:.5rem}.pa3-ns{padding:1rem}.pa4-ns{padding:2rem}.pa5-ns{padding:4rem}.pa6-ns{padding:8rem}.pa7-ns{padding:16rem}.pl0-ns{padding-left:0}.pl1-ns{padding-left:.25rem}.pl2-ns{padding-left:.5rem}.pl3-ns{padding-left:1rem}.pl4-ns{padding-left:2rem}.pl5-ns{padding-left:4rem}.pl6-ns{padding-left:8rem}.pl7-ns{padding-left:16rem}.pr0-ns{padding-right:0}.pr1-ns{padding-right:.25rem}.pr2-ns{padding-right:.5rem}.pr3-ns{padding-right:1rem}.pr4-ns{padding-right:2rem}.pr5-ns{padding-right:4rem}.pr6-ns{padding-right:8rem}.pr7-ns{padding-right:16rem}.pb0-ns{padding-bottom:0}.pb1-ns{padding-bottom:.25rem}.pb2-ns{padding-bottom:.5rem}.pb3-ns{padding-bottom:1rem}.pb4-ns{padding-bottom:2rem}.pb5-ns{padding-bottom:4rem}.pb6-ns{padding-bottom:8rem}.pb7-ns{padding-bottom:16rem}.pt0-ns{padding-top:0}.pt1-ns{padding-top:.25rem}.pt2-ns{padding-top:.5rem}.pt3-ns{padding-top:1rem}.pt4-ns{padding-top:2rem}.pt5-ns{padding-top:4rem}.pt6-ns{padding-top:8rem}.pt7-ns{padding-top:16rem}.pv0-ns{padding-top:0;padding-bottom:0}.pv1-ns{padding-top:.25rem;padding-bottom:.25rem}.pv2-ns{padding-top:.5rem;padding-bottom:.5rem}.pv3-ns{padding-top:1rem;padding-bottom:1rem}.pv4-ns{padding-top:2rem;padding-bottom:2rem}.pv5-ns{padding-top:4rem;padding-bottom:4rem}.pv6-ns{padding-top:8rem;padding-bottom:8rem}.pv7-ns{padding-top:16rem;padding-bottom:16rem}.ph0-ns{padding-left:0;padding-right:0}.ph1-ns{padding-left:.25rem;padding-right:.25rem}.ph2-ns{padding-left:.5rem;padding-right:.5rem}.ph3-ns{padding-left:1rem;padding-right:1rem}.ph4-ns{padding-left:2rem;padding-right:2rem}.ph5-ns{padding-left:4rem;padding-right:4rem}.ph6-ns{padding-left:8rem;padding-right:8rem}.ph7-ns{padding-left:16rem;padding-right:16rem}.ma0-ns{margin:0}.ma1-ns{margin:.25rem}.ma2-ns{margin:.5rem}.ma3-ns{margin:1rem}.ma4-ns{margin:2rem}.ma5-ns{margin:4rem}.ma6-ns{margin:8rem}.ma7-ns{margin:16rem}.ml0-ns{margin-left:0}.ml1-ns{margin-left:.25rem}.ml2-ns{margin-left:.5rem}.ml3-ns{margin-left:1rem}.ml4-ns{margin-left:2rem}.ml5-ns{margin-left:4rem}.ml6-ns{margin-left:8rem}.ml7-ns{margin-left:16rem}.mr0-ns{margin-right:0}.mr1-ns{margin-right:.25rem}.mr2-ns{margin-right:.5rem}.mr3-ns{margin-right:1rem}.mr4-ns{margin-right:2rem}.mr5-ns{margin-right:4rem}.mr6-ns{margin-right:8rem}.mr7-ns{margin-right:16rem}.mb0-ns{margin-bottom:0}.mb1-ns{margin-bottom:.25rem}.mb2-ns{margin-bottom:.5rem}.mb3-ns{margin-bottom:1rem}.mb4-ns{margin-bottom:2rem}.mb5-ns{margin-bottom:4rem}.mb6-ns{margin-bottom:8rem}.mb7-ns{margin-bottom:16rem}.mt0-ns{margin-top:0}.mt1-ns{margin-top:.25rem}.mt2-ns{margin-top:.5rem}.mt3-ns{margin-top:1rem}.mt4-ns{margin-top:2rem}.mt5-ns{margin-top:4rem}.mt6-ns{margin-top:8rem}.mt7-ns{margin-top:16rem}.mv0-ns{margin-top:0;margin-bottom:0}.mv1-ns{margin-top:.25rem;margin-bottom:.25rem}.mv2-ns{margin-top:.5rem;margin-bottom:.5rem}.mv3-ns{margin-top:1rem;margin-bottom:1rem}.mv4-ns{margin-top:2rem;margin-bottom:2rem}.mv5-ns{margin-top:4rem;margin-bottom:4rem}.mv6-ns{margin-top:8rem;margin-bottom:8rem}.mv7-ns{margin-top:16rem;margin-bottom:16rem}.mh0-ns{margin-left:0;margin-right:0}.mh1-ns{margin-left:.25rem;margin-right:.25rem}.mh2-ns{margin-left:.5rem;margin-right:.5rem}.mh3-ns{margin-left:1rem;margin-right:1rem}.mh4-ns{margin-left:2rem;margin-right:2rem}.mh5-ns{margin-left:4rem;margin-right:4rem}.mh6-ns{margin-left:8rem;margin-right:8rem}.mh7-ns{margin-left:16rem;margin-right:16rem}.na1-ns{margin:-.25rem}.na2-ns{margin:-.5rem}.na3-ns{margin:-1rem}.na4-ns{margin:-2rem}.na5-ns{margin:-4rem}.na6-ns{margin:-8rem}.na7-ns{margin:-16rem}.nl1-ns{margin-left:-.25rem}.nl2-ns{margin-left:-.5rem}.nl3-ns{margin-left:-1rem}.nl4-ns{margin-left:-2rem}.nl5-ns{margin-left:-4rem}.nl6-ns{margin-left:-8rem}.nl7-ns{margin-left:-16rem}.nr1-ns{margin-right:-.25rem}.nr2-ns{margin-right:-.5rem}.nr3-ns{margin-right:-1rem}.nr4-ns{margin-right:-2rem}.nr5-ns{margin-right:-4rem}.nr6-ns{margin-right:-8rem}.nr7-ns{margin-right:-16rem}.nb1-ns{margin-bottom:-.25rem}.nb2-ns{margin-bottom:-.5rem}.nb3-ns{margin-bottom:-1rem}.nb4-ns{margin-bottom:-2rem}.nb5-ns{margin-bottom:-4rem}.nb6-ns{margin-bottom:-8rem}.nb7-ns{margin-bottom:-16rem}.nt1-ns{margin-top:-.25rem}.nt2-ns{margin-top:-.5rem}.nt3-ns{margin-top:-1rem}.nt4-ns{margin-top:-2rem}.nt5-ns{margin-top:-4rem}.nt6-ns{margin-top:-8rem}.nt7-ns{margin-top:-16rem}.strike-ns{text-decoration:line-through}.underline-ns{text-decoration:underline}.no-underline-ns{text-decoration:none}.tl-ns{text-align:left}.tr-ns{text-align:right}.tc-ns{text-align:center}.tj-ns{text-align:justify}.ttc-ns{text-transform:capitalize}.ttl-ns{text-transform:lowercase}.ttu-ns{text-transform:uppercase}.ttn-ns{text-transform:none}.f-6-ns,.f-headline-ns{font-size:6rem}.f-5-ns,.f-subheadline-ns{font-size:5rem}.f1-ns{font-size:3rem}.f2-ns{font-size:2.25rem}.f3-ns{font-size:1.5rem}.f4-ns{font-size:1.25rem}.f5-ns{font-size:1rem}.f6-ns{font-size:.875rem}.f7-ns{font-size:.75rem}.measure-ns{max-width:30em}.measure-wide-ns{max-width:34em}.measure-narrow-ns{max-width:20em}.indent-ns{text-indent:1em;margin-top:0;margin-bottom:0}.small-caps-ns{font-variant:small-caps}.truncate-ns{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.center-ns{margin-left:auto}.center-ns,.mr-auto-ns{margin-right:auto}.ml-auto-ns{margin-left:auto}.clip-ns{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}.ws-normal-ns{white-space:normal}.nowrap-ns{white-space:nowrap}.pre-ns{white-space:pre}.v-base-ns{vertical-align:baseline}.v-mid-ns{vertical-align:middle}.v-top-ns{vertical-align:top}.v-btm-ns{vertical-align:bottom}}@media screen and (min-width:30em) and (max-width:60em){.aspect-ratio-m{height:0;position:relative}.aspect-ratio--16x9-m{padding-bottom:56.25%}.aspect-ratio--9x16-m{padding-bottom:177.77%}.aspect-ratio--4x3-m{padding-bottom:75%}.aspect-ratio--3x4-m{padding-bottom:133.33%}.aspect-ratio--6x4-m{padding-bottom:66.6%}.aspect-ratio--4x6-m{padding-bottom:150%}.aspect-ratio--8x5-m{padding-bottom:62.5%}.aspect-ratio--5x8-m{padding-bottom:160%}.aspect-ratio--7x5-m{padding-bottom:71.42%}.aspect-ratio--5x7-m{padding-bottom:140%}.aspect-ratio--1x1-m{padding-bottom:100%}.aspect-ratio--object-m{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}.cover-m{background-size:cover!important}.contain-m{background-size:contain!important}.bg-center-m{background-position:50%}.bg-center-m,.bg-top-m{background-repeat:no-repeat}.bg-top-m{background-position:top}.bg-right-m{background-position:100%}.bg-bottom-m,.bg-right-m{background-repeat:no-repeat}.bg-bottom-m{background-position:bottom}.bg-left-m{background-repeat:no-repeat;background-position:0}.outline-m{outline:1px solid}.outline-transparent-m{outline:1px solid transparent}.outline-0-m{outline:0}.ba-m{border-style:solid;border-width:1px}.bt-m{border-top-style:solid;border-top-width:1px}.br-m{border-right-style:solid;border-right-width:1px}.bb-m{border-bottom-style:solid;border-bottom-width:1px}.bl-m{border-left-style:solid;border-left-width:1px}.bn-m{border-style:none;border-width:0}.br0-m{border-radius:0}.br1-m{border-radius:.125rem}.br2-m{border-radius:.25rem}.br3-m{border-radius:.5rem}.br4-m{border-radius:1rem}.br-100-m{border-radius:100%}.br-pill-m{border-radius:9999px}.br--bottom-m{border-top-left-radius:0;border-top-right-radius:0}.br--top-m{border-bottom-right-radius:0}.br--right-m,.br--top-m{border-bottom-left-radius:0}.br--right-m{border-top-left-radius:0}.br--left-m{border-top-right-radius:0;border-bottom-right-radius:0}.br-inherit-m{border-radius:inherit}.br-initial-m{border-radius:initial}.br-unset-m{border-radius:unset}.b--dotted-m{border-style:dotted}.b--dashed-m{border-style:dashed}.b--solid-m{border-style:solid}.b--none-m{border-style:none}.bw0-m{border-width:0}.bw1-m{border-width:.125rem}.bw2-m{border-width:.25rem}.bw3-m{border-width:.5rem}.bw4-m{border-width:1rem}.bw5-m{border-width:2rem}.bt-0-m{border-top-width:0}.br-0-m{border-right-width:0}.bb-0-m{border-bottom-width:0}.bl-0-m{border-left-width:0}.shadow-1-m{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.shadow-2-m{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.shadow-3-m{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.shadow-4-m{box-shadow:2px 2px 8px rgba(0,0,0,.2)}.shadow-5-m{box-shadow:4px 4px 8px rgba(0,0,0,.2)}.top-0-m{top:0}.left-0-m{left:0}.right-0-m{right:0}.bottom-0-m{bottom:0}.top-1-m{top:1rem}.left-1-m{left:1rem}.right-1-m{right:1rem}.bottom-1-m{bottom:1rem}.top-2-m{top:2rem}.left-2-m{left:2rem}.right-2-m{right:2rem}.bottom-2-m{bottom:2rem}.top--1-m{top:-1rem}.right--1-m{right:-1rem}.bottom--1-m{bottom:-1rem}.left--1-m{left:-1rem}.top--2-m{top:-2rem}.right--2-m{right:-2rem}.bottom--2-m{bottom:-2rem}.left--2-m{left:-2rem}.absolute--fill-m{top:0;right:0;bottom:0;left:0}.cl-m{clear:left}.cr-m{clear:right}.cb-m{clear:both}.cn-m{clear:none}.dn-m{display:none}.di-m{display:inline}.db-m{display:block}.dib-m{display:inline-block}.dit-m{display:inline-table}.dt-m{display:table}.dtc-m{display:table-cell}.dt-row-m{display:table-row}.dt-row-group-m{display:table-row-group}.dt-column-m{display:table-column}.dt-column-group-m{display:table-column-group}.dt--fixed-m{table-layout:fixed;width:100%}.flex-m{display:flex}.inline-flex-m{display:inline-flex}.flex-auto-m{flex:auto;min-width:0;min-height:0}.flex-none-m{flex:none}.flex-column-m{flex-direction:column}.flex-row-m{flex-direction:row}.flex-wrap-m{flex-wrap:wrap}.flex-nowrap-m{flex-wrap:nowrap}.flex-wrap-reverse-m{flex-wrap:wrap-reverse}.flex-column-reverse-m{flex-direction:column-reverse}.flex-row-reverse-m{flex-direction:row-reverse}.items-start-m{align-items:flex-start}.items-end-m{align-items:flex-end}.items-center-m{align-items:center}.items-baseline-m{align-items:baseline}.items-stretch-m{align-items:stretch}.self-start-m{align-self:flex-start}.self-end-m{align-self:flex-end}.self-center-m{align-self:center}.self-baseline-m{align-self:baseline}.self-stretch-m{align-self:stretch}.justify-start-m{justify-content:flex-start}.justify-end-m{justify-content:flex-end}.justify-center-m{justify-content:center}.justify-between-m{justify-content:space-between}.justify-around-m{justify-content:space-around}.content-start-m{align-content:flex-start}.content-end-m{align-content:flex-end}.content-center-m{align-content:center}.content-between-m{align-content:space-between}.content-around-m{align-content:space-around}.content-stretch-m{align-content:stretch}.order-0-m{order:0}.order-1-m{order:1}.order-2-m{order:2}.order-3-m{order:3}.order-4-m{order:4}.order-5-m{order:5}.order-6-m{order:6}.order-7-m{order:7}.order-8-m{order:8}.order-last-m{order:99999}.flex-grow-0-m{flex-grow:0}.flex-grow-1-m{flex-grow:1}.flex-shrink-0-m{flex-shrink:0}.flex-shrink-1-m{flex-shrink:1}.fl-m{float:left}.fl-m,.fr-m{_display:inline}.fr-m{float:right}.fn-m{float:none}.i-m{font-style:italic}.fs-normal-m{font-style:normal}.normal-m{font-weight:400}.b-m{font-weight:700}.fw1-m{font-weight:100}.fw2-m{font-weight:200}.fw3-m{font-weight:300}.fw4-m{font-weight:400}.fw5-m{font-weight:500}.fw6-m{font-weight:600}.fw7-m{font-weight:700}.fw8-m{font-weight:800}.fw9-m{font-weight:900}.h1-m{height:1rem}.h2-m{height:2rem}.h3-m{height:4rem}.h4-m{height:8rem}.h5-m{height:16rem}.h-25-m{height:25%}.h-50-m{height:50%}.h-75-m{height:75%}.h-100-m{height:100%}.min-h-100-m{min-height:100%}.vh-25-m{height:25vh}.vh-50-m{height:50vh}.vh-75-m{height:75vh}.vh-100-m{height:100vh}.min-vh-100-m{min-height:100vh}.h-auto-m{height:auto}.h-inherit-m{height:inherit}.tracked-m{letter-spacing:.1em}.tracked-tight-m{letter-spacing:-.05em}.tracked-mega-m{letter-spacing:.25em}.lh-solid-m{line-height:1}.lh-title-m{line-height:1.25}.lh-copy-m{line-height:1.5}.mw-100-m{max-width:100%}.mw1-m{max-width:1rem}.mw2-m{max-width:2rem}.mw3-m{max-width:4rem}.mw4-m{max-width:8rem}.mw5-m{max-width:16rem}.mw6-m{max-width:32rem}.mw7-m{max-width:48rem}.mw8-m{max-width:64rem}.mw9-m{max-width:96rem}.mw-none-m{max-width:none}.w1-m{width:1rem}.w2-m{width:2rem}.w3-m{width:4rem}.w4-m{width:8rem}.w5-m{width:16rem}.w-10-m{width:10%}.w-20-m{width:20%}.w-25-m{width:25%}.w-30-m{width:30%}.w-33-m{width:33%}.w-34-m{width:34%}.w-40-m{width:40%}.w-50-m{width:50%}.w-60-m{width:60%}.w-70-m{width:70%}.w-75-m{width:75%}.w-80-m{width:80%}.w-90-m{width:90%}.w-100-m{width:100%}.w-third-m{width:33.33333%}.w-two-thirds-m{width:66.66667%}.w-auto-m{width:auto}.overflow-visible-m{overflow:visible}.overflow-hidden-m{overflow:hidden}.overflow-scroll-m{overflow:scroll}.overflow-auto-m{overflow:auto}.overflow-x-visible-m{overflow-x:visible}.overflow-x-hidden-m{overflow-x:hidden}.overflow-x-scroll-m{overflow-x:scroll}.overflow-x-auto-m{overflow-x:auto}.overflow-y-visible-m{overflow-y:visible}.overflow-y-hidden-m{overflow-y:hidden}.overflow-y-scroll-m{overflow-y:scroll}.overflow-y-auto-m{overflow-y:auto}.static-m{position:static}.relative-m{position:relative}.absolute-m{position:absolute}.fixed-m{position:fixed}.rotate-45-m{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.rotate-90-m{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.rotate-135-m{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.rotate-180-m{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.rotate-225-m{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.rotate-270-m{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.rotate-315-m{-webkit-transform:rotate(315deg);transform:rotate(315deg)}.pa0-m{padding:0}.pa1-m{padding:.25rem}.pa2-m{padding:.5rem}.pa3-m{padding:1rem}.pa4-m{padding:2rem}.pa5-m{padding:4rem}.pa6-m{padding:8rem}.pa7-m{padding:16rem}.pl0-m{padding-left:0}.pl1-m{padding-left:.25rem}.pl2-m{padding-left:.5rem}.pl3-m{padding-left:1rem}.pl4-m{padding-left:2rem}.pl5-m{padding-left:4rem}.pl6-m{padding-left:8rem}.pl7-m{padding-left:16rem}.pr0-m{padding-right:0}.pr1-m{padding-right:.25rem}.pr2-m{padding-right:.5rem}.pr3-m{padding-right:1rem}.pr4-m{padding-right:2rem}.pr5-m{padding-right:4rem}.pr6-m{padding-right:8rem}.pr7-m{padding-right:16rem}.pb0-m{padding-bottom:0}.pb1-m{padding-bottom:.25rem}.pb2-m{padding-bottom:.5rem}.pb3-m{padding-bottom:1rem}.pb4-m{padding-bottom:2rem}.pb5-m{padding-bottom:4rem}.pb6-m{padding-bottom:8rem}.pb7-m{padding-bottom:16rem}.pt0-m{padding-top:0}.pt1-m{padding-top:.25rem}.pt2-m{padding-top:.5rem}.pt3-m{padding-top:1rem}.pt4-m{padding-top:2rem}.pt5-m{padding-top:4rem}.pt6-m{padding-top:8rem}.pt7-m{padding-top:16rem}.pv0-m{padding-top:0;padding-bottom:0}.pv1-m{padding-top:.25rem;padding-bottom:.25rem}.pv2-m{padding-top:.5rem;padding-bottom:.5rem}.pv3-m{padding-top:1rem;padding-bottom:1rem}.pv4-m{padding-top:2rem;padding-bottom:2rem}.pv5-m{padding-top:4rem;padding-bottom:4rem}.pv6-m{padding-top:8rem;padding-bottom:8rem}.pv7-m{padding-top:16rem;padding-bottom:16rem}.ph0-m{padding-left:0;padding-right:0}.ph1-m{padding-left:.25rem;padding-right:.25rem}.ph2-m{padding-left:.5rem;padding-right:.5rem}.ph3-m{padding-left:1rem;padding-right:1rem}.ph4-m{padding-left:2rem;padding-right:2rem}.ph5-m{padding-left:4rem;padding-right:4rem}.ph6-m{padding-left:8rem;padding-right:8rem}.ph7-m{padding-left:16rem;padding-right:16rem}.ma0-m{margin:0}.ma1-m{margin:.25rem}.ma2-m{margin:.5rem}.ma3-m{margin:1rem}.ma4-m{margin:2rem}.ma5-m{margin:4rem}.ma6-m{margin:8rem}.ma7-m{margin:16rem}.ml0-m{margin-left:0}.ml1-m{margin-left:.25rem}.ml2-m{margin-left:.5rem}.ml3-m{margin-left:1rem}.ml4-m{margin-left:2rem}.ml5-m{margin-left:4rem}.ml6-m{margin-left:8rem}.ml7-m{margin-left:16rem}.mr0-m{margin-right:0}.mr1-m{margin-right:.25rem}.mr2-m{margin-right:.5rem}.mr3-m{margin-right:1rem}.mr4-m{margin-right:2rem}.mr5-m{margin-right:4rem}.mr6-m{margin-right:8rem}.mr7-m{margin-right:16rem}.mb0-m{margin-bottom:0}.mb1-m{margin-bottom:.25rem}.mb2-m{margin-bottom:.5rem}.mb3-m{margin-bottom:1rem}.mb4-m{margin-bottom:2rem}.mb5-m{margin-bottom:4rem}.mb6-m{margin-bottom:8rem}.mb7-m{margin-bottom:16rem}.mt0-m{margin-top:0}.mt1-m{margin-top:.25rem}.mt2-m{margin-top:.5rem}.mt3-m{margin-top:1rem}.mt4-m{margin-top:2rem}.mt5-m{margin-top:4rem}.mt6-m{margin-top:8rem}.mt7-m{margin-top:16rem}.mv0-m{margin-top:0;margin-bottom:0}.mv1-m{margin-top:.25rem;margin-bottom:.25rem}.mv2-m{margin-top:.5rem;margin-bottom:.5rem}.mv3-m{margin-top:1rem;margin-bottom:1rem}.mv4-m{margin-top:2rem;margin-bottom:2rem}.mv5-m{margin-top:4rem;margin-bottom:4rem}.mv6-m{margin-top:8rem;margin-bottom:8rem}.mv7-m{margin-top:16rem;margin-bottom:16rem}.mh0-m{margin-left:0;margin-right:0}.mh1-m{margin-left:.25rem;margin-right:.25rem}.mh2-m{margin-left:.5rem;margin-right:.5rem}.mh3-m{margin-left:1rem;margin-right:1rem}.mh4-m{margin-left:2rem;margin-right:2rem}.mh5-m{margin-left:4rem;margin-right:4rem}.mh6-m{margin-left:8rem;margin-right:8rem}.mh7-m{margin-left:16rem;margin-right:16rem}.na1-m{margin:-.25rem}.na2-m{margin:-.5rem}.na3-m{margin:-1rem}.na4-m{margin:-2rem}.na5-m{margin:-4rem}.na6-m{margin:-8rem}.na7-m{margin:-16rem}.nl1-m{margin-left:-.25rem}.nl2-m{margin-left:-.5rem}.nl3-m{margin-left:-1rem}.nl4-m{margin-left:-2rem}.nl5-m{margin-left:-4rem}.nl6-m{margin-left:-8rem}.nl7-m{margin-left:-16rem}.nr1-m{margin-right:-.25rem}.nr2-m{margin-right:-.5rem}.nr3-m{margin-right:-1rem}.nr4-m{margin-right:-2rem}.nr5-m{margin-right:-4rem}.nr6-m{margin-right:-8rem}.nr7-m{margin-right:-16rem}.nb1-m{margin-bottom:-.25rem}.nb2-m{margin-bottom:-.5rem}.nb3-m{margin-bottom:-1rem}.nb4-m{margin-bottom:-2rem}.nb5-m{margin-bottom:-4rem}.nb6-m{margin-bottom:-8rem}.nb7-m{margin-bottom:-16rem}.nt1-m{margin-top:-.25rem}.nt2-m{margin-top:-.5rem}.nt3-m{margin-top:-1rem}.nt4-m{margin-top:-2rem}.nt5-m{margin-top:-4rem}.nt6-m{margin-top:-8rem}.nt7-m{margin-top:-16rem}.strike-m{text-decoration:line-through}.underline-m{text-decoration:underline}.no-underline-m{text-decoration:none}.tl-m{text-align:left}.tr-m{text-align:right}.tc-m{text-align:center}.tj-m{text-align:justify}.ttc-m{text-transform:capitalize}.ttl-m{text-transform:lowercase}.ttu-m{text-transform:uppercase}.ttn-m{text-transform:none}.f-6-m,.f-headline-m{font-size:6rem}.f-5-m,.f-subheadline-m{font-size:5rem}.f1-m{font-size:3rem}.f2-m{font-size:2.25rem}.f3-m{font-size:1.5rem}.f4-m{font-size:1.25rem}.f5-m{font-size:1rem}.f6-m{font-size:.875rem}.f7-m{font-size:.75rem}.measure-m{max-width:30em}.measure-wide-m{max-width:34em}.measure-narrow-m{max-width:20em}.indent-m{text-indent:1em;margin-top:0;margin-bottom:0}.small-caps-m{font-variant:small-caps}.truncate-m{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.center-m{margin-left:auto}.center-m,.mr-auto-m{margin-right:auto}.ml-auto-m{margin-left:auto}.clip-m{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}.ws-normal-m{white-space:normal}.nowrap-m{white-space:nowrap}.pre-m{white-space:pre}.v-base-m{vertical-align:baseline}.v-mid-m{vertical-align:middle}.v-top-m{vertical-align:top}.v-btm-m{vertical-align:bottom}}@media screen and (min-width:60em){.aspect-ratio-l{height:0;position:relative}.aspect-ratio--16x9-l{padding-bottom:56.25%}.aspect-ratio--9x16-l{padding-bottom:177.77%}.aspect-ratio--4x3-l{padding-bottom:75%}.aspect-ratio--3x4-l{padding-bottom:133.33%}.aspect-ratio--6x4-l{padding-bottom:66.6%}.aspect-ratio--4x6-l{padding-bottom:150%}.aspect-ratio--8x5-l{padding-bottom:62.5%}.aspect-ratio--5x8-l{padding-bottom:160%}.aspect-ratio--7x5-l{padding-bottom:71.42%}.aspect-ratio--5x7-l{padding-bottom:140%}.aspect-ratio--1x1-l{padding-bottom:100%}.aspect-ratio--object-l{position:absolute;top:0;right:0;bottom:0;left:0;width:100%;height:100%;z-index:100}.cover-l{background-size:cover!important}.contain-l{background-size:contain!important}.bg-center-l{background-position:50%}.bg-center-l,.bg-top-l{background-repeat:no-repeat}.bg-top-l{background-position:top}.bg-right-l{background-position:100%}.bg-bottom-l,.bg-right-l{background-repeat:no-repeat}.bg-bottom-l{background-position:bottom}.bg-left-l{background-repeat:no-repeat;background-position:0}.outline-l{outline:1px solid}.outline-transparent-l{outline:1px solid transparent}.outline-0-l{outline:0}.ba-l{border-style:solid;border-width:1px}.bt-l{border-top-style:solid;border-top-width:1px}.br-l{border-right-style:solid;border-right-width:1px}.bb-l{border-bottom-style:solid;border-bottom-width:1px}.bl-l{border-left-style:solid;border-left-width:1px}.bn-l{border-style:none;border-width:0}.br0-l{border-radius:0}.br1-l{border-radius:.125rem}.br2-l{border-radius:.25rem}.br3-l{border-radius:.5rem}.br4-l{border-radius:1rem}.br-100-l{border-radius:100%}.br-pill-l{border-radius:9999px}.br--bottom-l{border-top-left-radius:0;border-top-right-radius:0}.br--top-l{border-bottom-right-radius:0}.br--right-l,.br--top-l{border-bottom-left-radius:0}.br--right-l{border-top-left-radius:0}.br--left-l{border-top-right-radius:0;border-bottom-right-radius:0}.br-inherit-l{border-radius:inherit}.br-initial-l{border-radius:initial}.br-unset-l{border-radius:unset}.b--dotted-l{border-style:dotted}.b--dashed-l{border-style:dashed}.b--solid-l{border-style:solid}.b--none-l{border-style:none}.bw0-l{border-width:0}.bw1-l{border-width:.125rem}.bw2-l{border-width:.25rem}.bw3-l{border-width:.5rem}.bw4-l{border-width:1rem}.bw5-l{border-width:2rem}.bt-0-l{border-top-width:0}.br-0-l{border-right-width:0}.bb-0-l{border-bottom-width:0}.bl-0-l{border-left-width:0}.shadow-1-l{box-shadow:0 0 4px 2px rgba(0,0,0,.2)}.shadow-2-l{box-shadow:0 0 8px 2px rgba(0,0,0,.2)}.shadow-3-l{box-shadow:2px 2px 4px 2px rgba(0,0,0,.2)}.shadow-4-l{box-shadow:2px 2px 8px rgba(0,0,0,.2)}.shadow-5-l{box-shadow:4px 4px 8px rgba(0,0,0,.2)}.top-0-l{top:0}.left-0-l{left:0}.right-0-l{right:0}.bottom-0-l{bottom:0}.top-1-l{top:1rem}.left-1-l{left:1rem}.right-1-l{right:1rem}.bottom-1-l{bottom:1rem}.top-2-l{top:2rem}.left-2-l{left:2rem}.right-2-l{right:2rem}.bottom-2-l{bottom:2rem}.top--1-l{top:-1rem}.right--1-l{right:-1rem}.bottom--1-l{bottom:-1rem}.left--1-l{left:-1rem}.top--2-l{top:-2rem}.right--2-l{right:-2rem}.bottom--2-l{bottom:-2rem}.left--2-l{left:-2rem}.absolute--fill-l{top:0;right:0;bottom:0;left:0}.cl-l{clear:left}.cr-l{clear:right}.cb-l{clear:both}.cn-l{clear:none}.dn-l{display:none}.di-l{display:inline}.db-l{display:block}.dib-l{display:inline-block}.dit-l{display:inline-table}.dt-l{display:table}.dtc-l{display:table-cell}.dt-row-l{display:table-row}.dt-row-group-l{display:table-row-group}.dt-column-l{display:table-column}.dt-column-group-l{display:table-column-group}.dt--fixed-l{table-layout:fixed;width:100%}.flex-l{display:flex}.inline-flex-l{display:inline-flex}.flex-auto-l{flex:auto;min-width:0;min-height:0}.flex-none-l{flex:none}.flex-column-l{flex-direction:column}.flex-row-l{flex-direction:row}.flex-wrap-l{flex-wrap:wrap}.flex-nowrap-l{flex-wrap:nowrap}.flex-wrap-reverse-l{flex-wrap:wrap-reverse}.flex-column-reverse-l{flex-direction:column-reverse}.flex-row-reverse-l{flex-direction:row-reverse}.items-start-l{align-items:flex-start}.items-end-l{align-items:flex-end}.items-center-l{align-items:center}.items-baseline-l{align-items:baseline}.items-stretch-l{align-items:stretch}.self-start-l{align-self:flex-start}.self-end-l{align-self:flex-end}.self-center-l{align-self:center}.self-baseline-l{align-self:baseline}.self-stretch-l{align-self:stretch}.justify-start-l{justify-content:flex-start}.justify-end-l{justify-content:flex-end}.justify-center-l{justify-content:center}.justify-between-l{justify-content:space-between}.justify-around-l{justify-content:space-around}.content-start-l{align-content:flex-start}.content-end-l{align-content:flex-end}.content-center-l{align-content:center}.content-between-l{align-content:space-between}.content-around-l{align-content:space-around}.content-stretch-l{align-content:stretch}.order-0-l{order:0}.order-1-l{order:1}.order-2-l{order:2}.order-3-l{order:3}.order-4-l{order:4}.order-5-l{order:5}.order-6-l{order:6}.order-7-l{order:7}.order-8-l{order:8}.order-last-l{order:99999}.flex-grow-0-l{flex-grow:0}.flex-grow-1-l{flex-grow:1}.flex-shrink-0-l{flex-shrink:0}.flex-shrink-1-l{flex-shrink:1}.fl-l{float:left}.fl-l,.fr-l{_display:inline}.fr-l{float:right}.fn-l{float:none}.i-l{font-style:italic}.fs-normal-l{font-style:normal}.normal-l{font-weight:400}.b-l{font-weight:700}.fw1-l{font-weight:100}.fw2-l{font-weight:200}.fw3-l{font-weight:300}.fw4-l{font-weight:400}.fw5-l{font-weight:500}.fw6-l{font-weight:600}.fw7-l{font-weight:700}.fw8-l{font-weight:800}.fw9-l{font-weight:900}.h1-l{height:1rem}.h2-l{height:2rem}.h3-l{height:4rem}.h4-l{height:8rem}.h5-l{height:16rem}.h-25-l{height:25%}.h-50-l{height:50%}.h-75-l{height:75%}.h-100-l{height:100%}.min-h-100-l{min-height:100%}.vh-25-l{height:25vh}.vh-50-l{height:50vh}.vh-75-l{height:75vh}.vh-100-l{height:100vh}.min-vh-100-l{min-height:100vh}.h-auto-l{height:auto}.h-inherit-l{height:inherit}.tracked-l{letter-spacing:.1em}.tracked-tight-l{letter-spacing:-.05em}.tracked-mega-l{letter-spacing:.25em}.lh-solid-l{line-height:1}.lh-title-l{line-height:1.25}.lh-copy-l{line-height:1.5}.mw-100-l{max-width:100%}.mw1-l{max-width:1rem}.mw2-l{max-width:2rem}.mw3-l{max-width:4rem}.mw4-l{max-width:8rem}.mw5-l{max-width:16rem}.mw6-l{max-width:32rem}.mw7-l{max-width:48rem}.mw8-l{max-width:64rem}.mw9-l{max-width:96rem}.mw-none-l{max-width:none}.w1-l{width:1rem}.w2-l{width:2rem}.w3-l{width:4rem}.w4-l{width:8rem}.w5-l{width:16rem}.w-10-l{width:10%}.w-20-l{width:20%}.w-25-l{width:25%}.w-30-l{width:30%}.w-33-l{width:33%}.w-34-l{width:34%}.w-40-l{width:40%}.w-50-l{width:50%}.w-60-l{width:60%}.w-70-l{width:70%}.w-75-l{width:75%}.w-80-l{width:80%}.w-90-l{width:90%}.w-100-l{width:100%}.w-third-l{width:33.33333%}.w-two-thirds-l{width:66.66667%}.w-auto-l{width:auto}.overflow-visible-l{overflow:visible}.overflow-hidden-l{overflow:hidden}.overflow-scroll-l{overflow:scroll}.overflow-auto-l{overflow:auto}.overflow-x-visible-l{overflow-x:visible}.overflow-x-hidden-l{overflow-x:hidden}.overflow-x-scroll-l{overflow-x:scroll}.overflow-x-auto-l{overflow-x:auto}.overflow-y-visible-l{overflow-y:visible}.overflow-y-hidden-l{overflow-y:hidden}.overflow-y-scroll-l{overflow-y:scroll}.overflow-y-auto-l{overflow-y:auto}.static-l{position:static}.relative-l{position:relative}.absolute-l{position:absolute}.fixed-l{position:fixed}.rotate-45-l{-webkit-transform:rotate(45deg);transform:rotate(45deg)}.rotate-90-l{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.rotate-135-l{-webkit-transform:rotate(135deg);transform:rotate(135deg)}.rotate-180-l{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.rotate-225-l{-webkit-transform:rotate(225deg);transform:rotate(225deg)}.rotate-270-l{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.rotate-315-l{-webkit-transform:rotate(315deg);transform:rotate(315deg)}.pa0-l{padding:0}.pa1-l{padding:.25rem}.pa2-l{padding:.5rem}.pa3-l{padding:1rem}.pa4-l{padding:2rem}.pa5-l{padding:4rem}.pa6-l{padding:8rem}.pa7-l{padding:16rem}.pl0-l{padding-left:0}.pl1-l{padding-left:.25rem}.pl2-l{padding-left:.5rem}.pl3-l{padding-left:1rem}.pl4-l{padding-left:2rem}.pl5-l{padding-left:4rem}.pl6-l{padding-left:8rem}.pl7-l{padding-left:16rem}.pr0-l{padding-right:0}.pr1-l{padding-right:.25rem}.pr2-l{padding-right:.5rem}.pr3-l{padding-right:1rem}.pr4-l{padding-right:2rem}.pr5-l{padding-right:4rem}.pr6-l{padding-right:8rem}.pr7-l{padding-right:16rem}.pb0-l{padding-bottom:0}.pb1-l{padding-bottom:.25rem}.pb2-l{padding-bottom:.5rem}.pb3-l{padding-bottom:1rem}.pb4-l{padding-bottom:2rem}.pb5-l{padding-bottom:4rem}.pb6-l{padding-bottom:8rem}.pb7-l{padding-bottom:16rem}.pt0-l{padding-top:0}.pt1-l{padding-top:.25rem}.pt2-l{padding-top:.5rem}.pt3-l{padding-top:1rem}.pt4-l{padding-top:2rem}.pt5-l{padding-top:4rem}.pt6-l{padding-top:8rem}.pt7-l{padding-top:16rem}.pv0-l{padding-top:0;padding-bottom:0}.pv1-l{padding-top:.25rem;padding-bottom:.25rem}.pv2-l{padding-top:.5rem;padding-bottom:.5rem}.pv3-l{padding-top:1rem;padding-bottom:1rem}.pv4-l{padding-top:2rem;padding-bottom:2rem}.pv5-l{padding-top:4rem;padding-bottom:4rem}.pv6-l{padding-top:8rem;padding-bottom:8rem}.pv7-l{padding-top:16rem;padding-bottom:16rem}.ph0-l{padding-left:0;padding-right:0}.ph1-l{padding-left:.25rem;padding-right:.25rem}.ph2-l{padding-left:.5rem;padding-right:.5rem}.ph3-l{padding-left:1rem;padding-right:1rem}.ph4-l{padding-left:2rem;padding-right:2rem}.ph5-l{padding-left:4rem;padding-right:4rem}.ph6-l{padding-left:8rem;padding-right:8rem}.ph7-l{padding-left:16rem;padding-right:16rem}.ma0-l{margin:0}.ma1-l{margin:.25rem}.ma2-l{margin:.5rem}.ma3-l{margin:1rem}.ma4-l{margin:2rem}.ma5-l{margin:4rem}.ma6-l{margin:8rem}.ma7-l{margin:16rem}.ml0-l{margin-left:0}.ml1-l{margin-left:.25rem}.ml2-l{margin-left:.5rem}.ml3-l{margin-left:1rem}.ml4-l{margin-left:2rem}.ml5-l{margin-left:4rem}.ml6-l{margin-left:8rem}.ml7-l{margin-left:16rem}.mr0-l{margin-right:0}.mr1-l{margin-right:.25rem}.mr2-l{margin-right:.5rem}.mr3-l{margin-right:1rem}.mr4-l{margin-right:2rem}.mr5-l{margin-right:4rem}.mr6-l{margin-right:8rem}.mr7-l{margin-right:16rem}.mb0-l{margin-bottom:0}.mb1-l{margin-bottom:.25rem}.mb2-l{margin-bottom:.5rem}.mb3-l{margin-bottom:1rem}.mb4-l{margin-bottom:2rem}.mb5-l{margin-bottom:4rem}.mb6-l{margin-bottom:8rem}.mb7-l{margin-bottom:16rem}.mt0-l{margin-top:0}.mt1-l{margin-top:.25rem}.mt2-l{margin-top:.5rem}.mt3-l{margin-top:1rem}.mt4-l{margin-top:2rem}.mt5-l{margin-top:4rem}.mt6-l{margin-top:8rem}.mt7-l{margin-top:16rem}.mv0-l{margin-top:0;margin-bottom:0}.mv1-l{margin-top:.25rem;margin-bottom:.25rem}.mv2-l{margin-top:.5rem;margin-bottom:.5rem}.mv3-l{margin-top:1rem;margin-bottom:1rem}.mv4-l{margin-top:2rem;margin-bottom:2rem}.mv5-l{margin-top:4rem;margin-bottom:4rem}.mv6-l{margin-top:8rem;margin-bottom:8rem}.mv7-l{margin-top:16rem;margin-bottom:16rem}.mh0-l{margin-left:0;margin-right:0}.mh1-l{margin-left:.25rem;margin-right:.25rem}.mh2-l{margin-left:.5rem;margin-right:.5rem}.mh3-l{margin-left:1rem;margin-right:1rem}.mh4-l{margin-left:2rem;margin-right:2rem}.mh5-l{margin-left:4rem;margin-right:4rem}.mh6-l{margin-left:8rem;margin-right:8rem}.mh7-l{margin-left:16rem;margin-right:16rem}.na1-l{margin:-.25rem}.na2-l{margin:-.5rem}.na3-l{margin:-1rem}.na4-l{margin:-2rem}.na5-l{margin:-4rem}.na6-l{margin:-8rem}.na7-l{margin:-16rem}.nl1-l{margin-left:-.25rem}.nl2-l{margin-left:-.5rem}.nl3-l{margin-left:-1rem}.nl4-l{margin-left:-2rem}.nl5-l{margin-left:-4rem}.nl6-l{margin-left:-8rem}.nl7-l{margin-left:-16rem}.nr1-l{margin-right:-.25rem}.nr2-l{margin-right:-.5rem}.nr3-l{margin-right:-1rem}.nr4-l{margin-right:-2rem}.nr5-l{margin-right:-4rem}.nr6-l{margin-right:-8rem}.nr7-l{margin-right:-16rem}.nb1-l{margin-bottom:-.25rem}.nb2-l{margin-bottom:-.5rem}.nb3-l{margin-bottom:-1rem}.nb4-l{margin-bottom:-2rem}.nb5-l{margin-bottom:-4rem}.nb6-l{margin-bottom:-8rem}.nb7-l{margin-bottom:-16rem}.nt1-l{margin-top:-.25rem}.nt2-l{margin-top:-.5rem}.nt3-l{margin-top:-1rem}.nt4-l{margin-top:-2rem}.nt5-l{margin-top:-4rem}.nt6-l{margin-top:-8rem}.nt7-l{margin-top:-16rem}.strike-l{text-decoration:line-through}.underline-l{text-decoration:underline}.no-underline-l{text-decoration:none}.tl-l{text-align:left}.tr-l{text-align:right}.tc-l{text-align:center}.tj-l{text-align:justify}.ttc-l{text-transform:capitalize}.ttl-l{text-transform:lowercase}.ttu-l{text-transform:uppercase}.ttn-l{text-transform:none}.f-6-l,.f-headline-l{font-size:6rem}.f-5-l,.f-subheadline-l{font-size:5rem}.f1-l{font-size:3rem}.f2-l{font-size:2.25rem}.f3-l{font-size:1.5rem}.f4-l{font-size:1.25rem}.f5-l{font-size:1rem}.f6-l{font-size:.875rem}.f7-l{font-size:.75rem}.measure-l{max-width:30em}.measure-wide-l{max-width:34em}.measure-narrow-l{max-width:20em}.indent-l{text-indent:1em;margin-top:0;margin-bottom:0}.small-caps-l{font-variant:small-caps}.truncate-l{white-space:nowrap;overflow:hidden;text-overflow:ellipsis}.center-l{margin-left:auto}.center-l,.mr-auto-l{margin-right:auto}.ml-auto-l{margin-left:auto}.clip-l{position:fixed!important;_position:absolute!important;clip:rect(1px 1px 1px 1px);clip:rect(1px,1px,1px,1px)}.ws-normal-l{white-space:normal}.nowrap-l{white-space:nowrap}.pre-l{white-space:pre}.v-base-l{vertical-align:baseline}.v-mid-l{vertical-align:middle}.v-top-l{vertical-align:top}.v-btm-l{vertical-align:bottom}}pre,.pre{overflow-x:auto;overflow-y:hidden;overflow:scroll}pre code{display:block;padding:1.5em;white-space:pre;font-size:.875rem;line-height:2}pre{background-color:#222;color:#ddd;white-space:pre;hyphens:none;position:relative}.pagination{margin:3rem 0}.pagination li{display:inline-block;margin-right:.375rem;font-size:.875rem;margin-bottom:2.5em}[dir=rtl] .pagination li{margin-left:.375rem;margin-right:0}.pagination li a{padding:.5rem .625rem;background-color:#fff;color:#333;border:1px solid #ddd;border-radius:3px;text-decoration:none}.pagination li.disabled{display:none}.pagination li.active a:link,.pagination li.active a:active,.pagination li.active a:visited{background-color:#ddd}#TableOfContents ul li{margin-bottom:1em}.ananke-socials a{display:inline-block;vertical-align:middle;color:#bababa;fill:currentColor}.ananke-socials a .icon svg{width:32px;height:32px}.ananke-socials a:hover{color:#6b7280}.new-window{opacity:0;display:inline-block;vertical-align:top}.link-transition:hover .new-window{opacity:1}#TableOfContents ul li{margin-bottom:1em}.lh-copy blockquote{display:block;font-size:.875em;margin-left:2rem;margin-top:2rem;margin-bottom:2rem;border-left:4px solid #ccc;padding-left:1rem}.nested-links a{overflow-wrap:break-word} \ No newline at end of file diff --git a/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.json b/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.json new file mode 100644 index 0000000..f3d1135 --- /dev/null +++ b/resources/_gen/assets/css/ananke/css/main.css_83735de7ca999e9c17f3419b41b93fdb.json @@ -0,0 +1 @@ +{"Target":"ananke/css/main.min.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_360x0_resize_q75_box.jpeg b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_360x0_resize_q75_box.jpeg new file mode 100644 index 0000000..5d4c900 Binary files /dev/null and b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_360x0_resize_q75_box.jpeg differ diff --git a/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_480x0_resize_q75_box.jpeg b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_480x0_resize_q75_box.jpeg new file mode 100644 index 0000000..bfd181f Binary files /dev/null and b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_480x0_resize_q75_box.jpeg differ diff --git a/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_720x0_resize_q75_box.jpeg b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_720x0_resize_q75_box.jpeg new file mode 100644 index 0000000..098c79a Binary files /dev/null and b/resources/_gen/images/cv/myself_hu3552b4e58c95d30acc65db11bb307f36_106705_720x0_resize_q75_box.jpeg differ diff --git a/resources/_gen/images/posts/vst-on-linux-1/cover_hu99efd4435c8e28b49cfffcf78d63281d_702566_300x200_fill_q75_h2_box_smart1_3.webp b/resources/_gen/images/posts/vst-on-linux-1/cover_hu99efd4435c8e28b49cfffcf78d63281d_702566_300x200_fill_q75_h2_box_smart1_3.webp new file mode 100644 index 0000000..9ca1fdb Binary files /dev/null and b/resources/_gen/images/posts/vst-on-linux-1/cover_hu99efd4435c8e28b49cfffcf78d63281d_702566_300x200_fill_q75_h2_box_smart1_3.webp differ diff --git a/themes/ananke b/themes/ananke new file mode 160000 index 0000000..315a006 --- /dev/null +++ b/themes/ananke @@ -0,0 +1 @@ +Subproject commit 315a00623c9f1c0074ad369c1fd39a960cd01e15 diff --git a/themes/papermod b/themes/papermod new file mode 160000 index 0000000..d3d90be --- /dev/null +++ b/themes/papermod @@ -0,0 +1 @@ +Subproject commit d3d90be8a4ea04433d95d02a1dc07b0014c5b8b8