First commit

This commit is contained in:
2025-06-09 23:32:10 -04:00
commit 863aaeabc7
92 changed files with 2992 additions and 0 deletions

1
ansible/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
kubeconfig.yaml

2
ansible/k3s/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.ansible
inventory.ini

View File

@@ -0,0 +1,51 @@
- name: Configurar disco adicional en las VMs
hosts: k3s_cluster
become: yes
collections:
- community.general
tasks:
- name: Verificar si el disco /dev/vdb existe
ansible.builtin.stat:
path: /dev/vdb
register: disk_check
- name: Crear particion de datos en disco /dev/vdb
community.general.parted:
device: /dev/vdb
fs_type: ext4
label: gpt
number: 1
state: present
when: disk_check.stat.exists
- name: Formatear la partición en ext4
community.general.filesystem:
fstype: ext4
dev: /dev/vdb1
when: disk_check.stat.exists
- name: Obtener UUID del disco
ansible.builtin.command: blkid -s UUID -o value /dev/vdb1
register: disk_uuid
when: disk_check.stat.exists
- name: Montar disco en /mnt/data
ansible.posix.mount:
path: /mnt/data
src: UUID={{ disk_uuid.stdout }}
fstype: ext4
state: "mounted"
when: disk_check.stat.exists
- name: Crear carpeta para longhorn
ansible.builtin.file:
path: /mnt/data/longhorn
state: directory
mode: '0755'
- name: Crear carpeta para postgres
ansible.builtin.file:
path: /mnt/data/postgres
state: directory
mode: '0755'

View File

@@ -0,0 +1,18 @@
- name: Configurar carpetas para longhorn y postrges
hosts: k3s_cluster
become: yes
collections:
- community.general
tasks:
- name: Crear carpeta para longhorn
ansible.builtin.file:
path: /mnt/data/longhorn
state: directory
mode: '0755'
- name: Crear carpeta para postgres
ansible.builtin.file:
path: /mnt/data/postgres
state: directory
mode: '0755'

59
ansible/k3s/install.yml Normal file
View File

@@ -0,0 +1,59 @@
- name: Instalar K3s en el Cluster
hosts: k3s_cluster
become: true
tasks:
- name: Actualizar paquetes
ansible.builtin.apt:
update_cache: true
upgrade: true
- name: Instalar dependencias
ansible.builtin.apt:
name:
- curl
- vim
- unzip
- nfs-common
- name: Descargar instalador de K3s si no existe
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s-install.sh
mode: '0755'
register: k3s_install_script
- name: Instalar K3s en master
ansible.builtin.command: /tmp/k3s-install.sh server --disable=servicelb
args:
creates: /usr/local/bin/k3s
when: inventory_hostname in groups['k3s_master']
- name: Obtener el token de K3s
ansible.builtin.command: cat /var/lib/rancher/k3s/server/node-token
register: k3s_token
changed_when: false
delegate_to: "{{ groups['k3s_master'][0] }}"
- name: Instalar K3s en nodos worker
ansible.builtin.command: /tmp/k3s-install.sh
args:
creates: /usr/local/bin/k3s
environment:
K3S_URL: "https://{{ hostvars[groups['k3s_master'][0]]['inventory_hostname'] }}:6443"
K3S_TOKEN: "{{ k3s_token.stdout }}"
when: inventory_hostname in groups['k3s_workers']
- name: Copiar kubeconfig al host local
ansible.builtin.fetch:
src: /etc/rancher/k3s/k3s.yaml
dest: ../kubeconfig.yaml
flat: true
delegate_to: "{{ groups['k3s_master'][0] }}"
- name: Ajustar kubeconfig para acceso externo
ansible.builtin.replace:
path: ../kubeconfig.yaml
regexp: '127.0.0.1'
replace: "{{ hostvars[groups['k3s_master'][0]]['inventory_hostname'] }}"
delegate_to: localhost

View File

@@ -0,0 +1,16 @@
- name: Desinstalar K3s en el Cluster
hosts: k3s_cluster
become: yes
tasks:
- name: Desinstalar K3s en los nodos master
shell: |
/usr/local/bin/k3s-uninstall.sh
when:
- inventory_hostname in groups['k3s_master']
- name: Desinstalar K3s en los nodos workers
shell: |
/usr/local/bin/k3s-agent-uninstall.sh
when:
- inventory_hostname in groups['k3s_workers']

4
ansible/pihole/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
.ansible
inventory.ini
auth_body.json
secrets.yml

View File

@@ -0,0 +1,25 @@
{
"config": {
"dns": {
"listeningMode": "local",
"upstreams": [
"8.8.8.8",
"8.8.4.4",
"9.9.9.10",
"149.112.112.10",
"1.1.1.1",
"1.0.0.1"
],
"hosts": [
]
},
"dhcp": {
"active": true,
"start": "192.168.1.100",
"end": "192.168.1.254",
"router": "192.168.1.1",
"netmask": "255.255.0.0"
}
}
}

View File

@@ -0,0 +1,28 @@
- name: Configurar Pihole
hosts: pihole
become: false
tasks:
- name: Autenticar la API de pihole
ansible.builtin.uri:
url: http://localhost/api/auth
method: POST
body: |
{
"password": "{{ pihole_password }}"
}
body_format: json
return_content: true
register: auth_response
changed_when: false
- name: Extraer SID de la respuesta de autenticación
ansible.builtin.set_fact:
pihole_sid: "{{ auth_response.json.session.sid | urlencode }}"
- name: Configurar pihole
ansible.builtin.uri:
url: http://localhost/api/config?sid={{ pihole_sid }}
method: PATCH
src: config.json
body_format: json

View File

@@ -0,0 +1,43 @@
- name: Instalar pihole
hosts: pihole
become: true
vars_files:
- secrets.yml
tasks:
- name: Actualizar e instalar paquetes
ansible.builtin.apt:
update_cache: true
upgrade: true
name:
- curl
- name: Crear carpeta para archivo necesario para unattended
ansible.builtin.file:
path: /etc/pihole
state: directory
mode: '0755'
- name: Creando archivo necesario para unattended
ansible.builtin.copy:
content: ""
dest: /etc/pihole/setupVars.conf
force: false
mode: '0755'
- name: Descargar script
ansible.builtin.get_url:
url: https://install.pi-hole.net
dest: /tmp/install_pihole.sh
mode: '0755'
- name: Instalar pihole
ansible.builtin.command: /tmp/install_pihole.sh --unattended
environment:
PIHOLE_SKIP_OS_CHECK: "true"
args:
creates: '/usr/local/bin/pihole'
- name: Habilitar contraseña si no está configurada
ansible.builtin.command: pihole setpassword {{ pihole_password }}
changed_when: false

View File

@@ -0,0 +1 @@
pihole_password: "SECRET"

2
haos/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.env
!*.env.example

View File

@@ -0,0 +1,45 @@
import requests
API_KEY = "SECRET" #Reemplazar por la del .env ( no puedo leer realmente un archivo en haos con estos scripts :/ )
PLAYLIST_ID = "PLSRBwQZwqGmKf9dHlAuhKSyNy0cH-3RyT"
def get_videos():
"""Obtiene todos los IDs de video de una playlist de YouTube."""
video_ids = []
next_page_token = None
while True:
params = {
"part": "contentDetails",
"maxResults": 50,
"playlistId": PLAYLIST_ID,
"key": API_KEY,
}
if next_page_token:
params["pageToken"] = next_page_token
response = requests.get(
"https://youtube.googleapis.com/youtube/v3/playlistItems",
params=params,
headers={"accept": "application/json"},
).json()
# Manejo de errores
if "error" in response:
print(f"Error en la API: {response['error']['message']}")
return []
# Extraer IDs de video
for item in response.get("items", []):
video_ids.append(item["contentDetails"]["videoId"])
# Verificar si hay más páginas
next_page_token = response.get("nextPageToken")
if not next_page_token:
break
return [f"https://www.youtube.com/watch?v={video_id}" for video_id in video_ids]
print(get_videos())

1
haos/haos.env.example Normal file
View File

@@ -0,0 +1 @@
API_KEY=EXAMPLE

View File

@@ -0,0 +1,10 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: metallb
namespace: kube-system
spec:
repo: https://metallb.github.io/metallb
chart: metallb
targetNamespace: metallb-system
createNamespace: true

View File

@@ -0,0 +1,17 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: main-pool
namespace: metallb-system
spec:
addresses:
- 192.168.4.1-192.168.4.254
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: metallb-advertisement
namespace: metallb-system
spec:
ipAddressPools:
- main-pool

View File

@@ -0,0 +1,38 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
service:
annotations:
metallb.universe.tf/loadBalancerIPs: 192.168.4.1
spec:
externalTrafficPolicy: Local
additionalArguments:
- "--providers.kubernetesingress.allowexternalnameservices"
- "--providers.kubernetescrd.allowexternalnameservices"
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
namespace: default
name: allow-local-only
spec:
ipWhiteList:
sourceRange:
- 127.0.0.1/32
- 192.168.0.0/16
- 10.0.0.0/8
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: redirect-https
namespace: default
spec:
redirectScheme:
scheme: https
permanent: true

1
manifest/02-certmanager/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
secret.yml

View File

@@ -0,0 +1,33 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
email: hola@danielcortes.xyz
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: acme
solvers:
- dns01:
cloudflare:
apiTokenSecretRef:
name: cloudflare-api-token-secret
key: api-token
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
email: hola@danielcortes.xyz
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: acme
solvers:
- dns01:
cloudflare:
apiTokenSecretRef:
name: cloudflare-api-token-secret
key: api-token

View File

@@ -0,0 +1,12 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: kube-system
spec:
repo: https://charts.jetstack.io
chart: cert-manager
targetNamespace: cert-manager
createNamespace: true
set:
crds.enabled: "true"

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: cloudflare-api-token-secret
namespace: cert-manager
type: Opaque
stringData:
api-token: EXAMPLE

View File

@@ -0,0 +1,35 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: longhorn-certificate
namespace: longhorn-system
spec:
secretName: longhorn-skrd-fun-tls
dnsNames:
- "longhorn.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn
namespace: longhorn-system
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: longhorn.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80
tls:
- secretName: longhorn-skrd-fun-tls

View File

@@ -0,0 +1,13 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: longhorn
namespace: kube-system
spec:
repo: https://charts.longhorn.io
chart: longhorn
targetNamespace: longhorn-system
createNamespace: true
set:
defaultSettings.defaultDataPath: "/mnt/data/longhorn"

1
manifest/04-minio/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
secret.yml

View File

@@ -0,0 +1,72 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: minio-certificate
namespace: minio
spec:
secretName: minio-skrd-fun-tls
dnsNames:
- "minio.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: minio-api-certificate
namespace: minio
spec:
secretName: minio-api-skrd-fun-tls
dnsNames:
- "minio-api.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio
namespace: minio
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: minio.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 9001
tls:
- secretName: minio-skrd-fun-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-api
namespace: minio
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: minio-api.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 9000
tls:
- secretName: minio-api-skrd-fun-tls

View File

@@ -0,0 +1,78 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minio
namespace: minio
labels:
app: minio
spec:
selector:
matchLabels:
app: minio
serviceName: minio
replicas: 3
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
env:
- name: MINIO_REGION_NAME
value: "us-east-1"
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: user
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: pass
image: minio/minio:RELEASE.2025-03-12T18-04-18Z
args:
- server
- http://minio-0.minio.minio.svc.cluster.local/data
- http://minio-1.minio.minio.svc.cluster.local/data
- http://minio-2.minio.minio.svc.cluster.local/data
- --console-address=:9001
ports:
- containerPort: 9000
protocol: TCP
name: api
- containerPort: 9001
protocol: TCP
name: console
volumeMounts:
- name: data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio
labels:
app: minio
spec:
clusterIP: None
ports:
- port: 9000
name: minio
- port: 9001
name: console
selector:
app: minio
---

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: minio
type: Opaque
stringData:
user: EXAMPLE
pass: EXAMPLE

View File

@@ -0,0 +1,517 @@
# Root key for MinIO Tenant Chart
tenant:
###
# The Tenant name
#
# Change this to match your preferred MinIO Tenant name.
name: minio-tenant
###
# Specify the Operator container image to use for the deployment.
# ``image.tag``
# For example, the following sets the image to the ``quay.io/minio/operator`` repo and the v7.0.0 tag.
# The container pulls the image if not already present:
#
# .. code-block:: yaml
#
# image:
# repository: quay.io/minio/minio
# tag: RELEASE.2024-11-07T00-52-20Z
# pullPolicy: IfNotPresent
#
# The chart also supports specifying an image based on digest value:
#
# .. code-block:: yaml
#
# image:
# repository: quay.io/minio/minio@sha256
# digest: 28c80b379c75242c6fe793dfbf212f43c602140a0de5ebe3d9c2a3a7b9f9f983
# pullPolicy: IfNotPresent
#
#
image:
repository: quay.io/minio/minio
tag: RELEASE.2024-11-07T00-52-20Z
pullPolicy: IfNotPresent
###
#
# An array of Kubernetes secrets to use for pulling images from a private ``image.repository``.
# Only one array element is supported at this time.
imagePullSecret: { }
###
#
# Specify `initContainers <https://kubernetes.io/docs/concepts/workloads/pods/init-containers/>`__ to perform setup or configuration tasks before the main Tenant pods starts.
#
# Example of init container which waits for idenity provider to be reachable before starting MinIO Tenant:
#
# .. code-block:: yaml
#
# initContainers:
# - name: wait-for-idp
# image: busybox
# command:
# - sh
# - -c
# - |
# URL="https://idp-url"
# echo "Checking IdP reachability (${URL})"
# until $(wget -q -O "/dev/null" ${URL}) ; do
# echo "IdP (${URL}) not reachable. Waiting to be reachable..."
# sleep 5
# done
# echo "IdP (${URL}) reachable. Starting MinIO..."
#
initContainers: [ ]
###
# The Kubernetes `Scheduler <https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/>`__ to use for dispatching Tenant pods.
#
# Specify an empty dictionary ``{}`` to dispatch pods with the default scheduler.
scheduler: { }
###
# The Kubernetes secret name that contains MinIO environment variable configurations.
# The secret is expected to have a key named config.env containing environment variables exports.
configuration:
name: myminio-env-configuration
###
# Root key for dynamically creating a secret for use with configuring root MinIO User
# Specify the ``name`` and then a list of environment variables.
#
# .. important::
#
# Do not use this in production environments.
# This field is intended for use with rapid development or testing only.
#
# For example:
#
# .. code-block:: yaml
#
# name: myminio-env-configuration
# accessKey: minio
# secretKey: minio123
#
configSecret:
name: myminio-env-configuration
accessKey: minio
secretKey: minio123
#existingSecret: true
###
# Metadata that will be added to the statefulset and pods of all pools
poolsMetadata:
###
# Specify `annotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to Tenant pods.
annotations: { }
###
# Specify `labels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to Tenant pods.
labels: { }
###
# If this variable is set to true, then enable the usage of an existing Kubernetes secret to set environment variables for the Tenant.
# The existing Kubernetes secret name must be placed under .tenant.configuration.name e.g. existing-minio-env-configuration
# The secret must contain a key ``config.env``.
# The values should be a series of export statements to set environment variables for the Tenant.
# For example:
#
# .. code-block:: shell
#
# stringData:
# config.env: |-
# export MINIO_ROOT_USER=ROOTUSERNAME
# export MINIO_ROOT_PASSWORD=ROOTUSERPASSWORD
#
# existingSecret: false
###
# Top level key for configuring MinIO Pool(s) in this Tenant.
#
# See `Operator CRD: Pools <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#pool>`__ for more information on all subfields.
pools:
###
# The number of MinIO Tenant Pods / Servers in this pool.
# For standalone mode, supply 1. For distributed mode, supply 4 or more.
# Note that the operator does not support upgrading from standalone to distributed mode.
- servers: 4
###
# Custom name for the pool
name: pool-0
###
# The number of volumes attached per MinIO Tenant Pod / Server.
volumesPerServer: 4
###
# The capacity per volume requested per MinIO Tenant Pod.
size: 10Gi
###
# The `storageClass <https://kubernetes.io/docs/concepts/storage/storage-classes/>`__ to associate with volumes generated for this pool.
#
# If using Amazon Elastic Block Store (EBS) CSI driver
# Please make sure to set xfs for "csi.storage.k8s.io/fstype" parameter under StorageClass.parameters.
# Docs: https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md
storageClassName: longhorn
###
# Specify `storageAnnotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to PVCs.
storageAnnotations: { }
###
# Specify `storageLabels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to PVCs.
storageLabels: { }
###
# Specify `annotations <https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/>`__ to associate to Tenant pods.
annotations: { }
###
# Specify `labels <https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/>`__ to associate to Tenant pods.
labels: { }
###
#
# An array of `Toleration labels <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/>`__ to associate to Tenant pods.
#
# These settings determine the distribution of pods across worker nodes.
tolerations: [ ]
###
# Any `Node Selectors <https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/>`__ to apply to Tenant pods.
#
# The Kubernetes scheduler uses these selectors to determine which worker nodes onto which it can deploy Tenant pods.
#
# If no worker nodes match the specified selectors, the Tenant deployment will fail.
nodeSelector: { }
###
#
# The `affinity <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/>`__ or anti-affinity settings to apply to Tenant pods.
#
# These settings determine the distribution of pods across worker nodes and can help prevent or allow colocating pods onto the same worker nodes.
affinity: { }
###
#
# The `Requests or Limits <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>`__ for resources to associate to Tenant pods.
#
# These settings can control the minimum and maximum resources requested for each pod.
# If no worker nodes can meet the specified requests, the Operator may fail to deploy.
resources: { }
###
# The Kubernetes `SecurityContext <https://kubernetes.io/docs/tasks/configure-pod-container/security-context/>`__ to use for deploying Tenant resources.
#
# You may need to modify these values to meet your cluster's security and access settings.
#
# We recommend disabling recursive permission changes by setting ``fsGroupChangePolicy`` to ``OnRootMismatch`` as those operations can be expensive for certain workloads (e.g. large volumes with many small files).
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
runAsNonRoot: true
###
# The Kubernetes `SecurityContext <https://kubernetes.io/docs/tasks/configure-pod-container/security-context/>`__ to use for deploying Tenant containers.
# You may need to modify these values to meet your cluster's security and access settings.
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
###
#
# An array of `Topology Spread Constraints <https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/>`__ to associate to Operator Console pods.
#
# These settings determine the distribution of pods across worker nodes.
topologySpreadConstraints: [ ]
###
#
# The name of a custom `Container Runtime <https://kubernetes.io/docs/concepts/containers/runtime-class/>`__ to use for the Operator Console pods.
# runtimeClassName: ""
###
# The mount path where Persistent Volumes are mounted inside Tenant container(s).
mountPath: /export
###
# The Sub path inside Mount path where MinIO stores data.
#
# .. warning::
#
# Treat the ``mountPath`` and ``subPath`` values as immutable once you deploy the Tenant.
# If you change these values post-deployment, then you may have different paths for new and pre-existing data.
# This can vastly increase operational complexity and may result in unpredictable data states.
subPath: /data
###
# Configures a Prometheus-compatible scraping endpoint at the specified port.
metrics:
enabled: false
port: 9000
protocol: http
###
# Configures external certificate settings for the Tenant.
certificate:
###
# Specify an array of Kubernetes TLS secrets, where each entry corresponds to a secret the TLS private key and public certificate pair.
#
# This is used by MinIO to verify TLS connections from clients using those CAs
# If you omit this and have clients using TLS certificates minted by an external CA, those connections may fail with warnings around certificate verification.
# See `Operator CRD: TenantSpec <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#tenantspec>`__.
externalCaCertSecret: [ ]
###
# Specify an array of Kubernetes secrets, where each entry corresponds to a secret contains the TLS private key and public certificate pair.
#
# Omit this to use only the MinIO Operator autogenerated certificates.
#
# If you omit this field *and* set ``requestAutoCert`` to false, the Tenant starts without TLS.
#
# See `Operator CRD: TenantSpec <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#tenantspec>`__.
#
# .. important::
#
# The MinIO Operator may output TLS connectivity errors if it cannot trust the Certificate Authority (CA) which minted the custom certificates.
#
# You can pass the CA to the Operator to allow it to trust that cert.
# See `Self-Signed, Internal, and Private Certificates <https://min.io/docs/minio/kubernetes/upstream/operations/network-encryption.html#self-signed-internal-and-private-certificates>`__ for more information.
# This step may also be necessary for globally trusted CAs where you must provide intermediate certificates to the Operator to help build the full chain of trust.
externalCertSecret: [ ]
###
# Enable automatic Kubernetes based `certificate generation and signing <https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster>`__
requestAutoCert: true
###
# The minimum number of days to expiry before an alert for an expiring certificate is fired.
# In the below example, if a given certificate will expire in 7 days then expiration events will only be triggered 1 day before expiry
# certExpiryAlertThreshold: 1
###
# This field is used only when ``requestAutoCert: true``.
# Use this field to set CommonName for the auto-generated certificate.
# MinIO defaults to using the internal Kubernetes DNS name for the pod
# The default DNS name format is typically ``*.minio.default.svc.cluster.local``.
#
# See `Operator CRD: CertificateConfig <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#certificateconfig>`__
certConfig: { }
###
# MinIO features to enable or disable in the MinIO Tenant
# See `Operator CRD: Features <https://min.io/docs/minio/kubernetes/upstream/reference/operator-crd.html#features>`__.
features:
bucketDNS: false
domains: { }
enableSFTP: false
###
# Array of objects describing one or more buckets to create during tenant provisioning.
# Example:
#
# .. code-block:: yaml
#
# - name: my-minio-bucket
# objectLock: false # optional
# region: us-east-1 # optional
buckets: [ ]
###
# Array of Kubernetes secrets from which the Operator generates MinIO users during tenant provisioning.
#
# Each secret should specify the ``CONSOLE_ACCESS_KEY`` and ``CONSOLE_SECRET_KEY`` as the access key and secret key for that user.
users: [ ]
###
# The `PodManagement <https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy>`__ policy for MinIO Tenant Pods.
# Can be "OrderedReady" or "Parallel"
podManagementPolicy: Parallel
# The `Liveness Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes>`__ for monitoring Tenant pod liveness.
# Tenant pods will be restarted if the probe fails.
liveness: { }
###
# `Readiness Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>`__ for monitoring Tenant container readiness.
# Tenant pods will be removed from service endpoints if the probe fails.
readiness: { }
###
# `Startup Probe <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>`__ for monitoring container startup.
# Tenant pods will be restarted if the probe fails.
# Refer
startup: { }
###
# The `Lifecycle hooks <https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/>`__ for container.
lifecycle: { }
###
# Directs the Operator to deploy the MinIO S3 API and Console services as LoadBalancer objects.
#
# If the Kubernetes cluster has a configured LoadBalancer, it can attempt to route traffic to those services automatically.
#
# - Specify ``minio: true`` to expose the MinIO S3 API.
# - Specify ``console: true`` to expose the Console.
#
# Both fields default to ``false``.
exposeServices:
minio: true
console: true
###
# The `Kubernetes Service Account <https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/>`__ associated with the Tenant.
serviceAccountName: ""
###
# Directs the Operator to add the Tenant's metric scrape configuration to an existing Kubernetes Prometheus deployment managed by the Prometheus Operator.
prometheusOperator: false
###
# Configure pod logging configuration for the MinIO Tenant.
#
# - Specify ``json`` for JSON-formatted logs.
# - Specify ``anonymous`` for anonymized logs.
# - Specify ``quiet`` to supress logging.
#
# An example of JSON-formatted logs is as follows:
#
# .. code-block:: shell
#
# $ k logs myminio-pool-0-0 -n default
# {"level":"INFO","errKind":"","time":"2022-04-07T21:49:33.740058549Z","message":"All MinIO sub-systems initialized successfully"}
logging: { }
###
# serviceMetadata allows passing additional labels and annotations to MinIO and Console specific
# services created by the operator.
serviceMetadata: { }
###
# Add environment variables to be set in MinIO container (https://github.com/minio/minio/tree/master/docs/config)
env: [ ]
###
# PriorityClassName indicates the Pod priority and hence importance of a Pod relative to other Pods.
# This is applied to MinIO pods only.
# Refer Kubernetes documentation for details https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass/
priorityClassName: ""
###
# An array of `Volumes <https://kubernetes.io/docs/concepts/storage/volumes/>`__ which the Operator can mount to Tenant pods.
#
# The volumes must exist *and* be accessible to the Tenant pods.
additionalVolumes: [ ]
###
# An array of volume mount points associated to each Tenant container.
#
# Specify each item in the array as follows:
#
# .. code-block:: yaml
#
# volumeMounts:
# - name: volumename
# mountPath: /path/to/mount
#
# The ``name`` field must correspond to an entry in the ``additionalVolumes`` array.
additionalVolumeMounts: [ ]
# Define configuration for KES (stateless and distributed key-management system)
# Refer https://github.com/minio/kes
#kes:
# ## Image field:
# # Image from tag (original behavior), for example:
# # image:
# # repository: quay.io/minio/kes
# # tag: 2024-11-25T13-44-31Z
# # Image from digest (added after original behavior), for example:
# # image:
# # repository: quay.io/minio/kes@sha256
# # digest: fb15af611149892f357a8a99d1bcd8bf5dae713bd64c15e6eb27fbdb88fc208b
# image:
# repository: quay.io/minio/kes
# tag: 2024-11-25T13-44-31Z
# pullPolicy: IfNotPresent
# env: [ ]
# replicas: 2
# configuration: |-
# address: :7373
# tls:
# key: /tmp/kes/server.key # Path to the TLS private key
# cert: /tmp/kes/server.crt # Path to the TLS certificate
# proxy:
# identities: []
# header:
# cert: X-Tls-Client-Cert
# admin:
# identity: ${MINIO_KES_IDENTITY}
# cache:
# expiry:
# any: 5m0s
# unused: 20s
# log:
# error: on
# audit: off
# keystore:
# # KES configured with fs (File System mode) doesn't work in Kubernetes environments and is not recommended
# # use a real KMS
# # fs:
# # path: "./keys" # Path to directory. Keys will be stored as files. Not Recommended for Production.
# vault:
# endpoint: "http://vault.default.svc.cluster.local:8200" # The Vault endpoint
# namespace: "" # An optional Vault namespace. See: https://www.vaultproject.io/docs/enterprise/namespaces/index.html
# prefix: "my-minio" # An optional K/V prefix. The server will store keys under this prefix.
# approle: # AppRole credentials. See: https://www.vaultproject.io/docs/auth/approle.html
# id: "<YOUR APPROLE ID HERE>" # Your AppRole Role ID
# secret: "<YOUR APPROLE SECRET ID HERE>" # Your AppRole Secret ID
# retry: 15s # Duration until the server tries to re-authenticate after connection loss.
# tls: # The Vault client TLS configuration for mTLS authentication and certificate verification
# key: "" # Path to the TLS client private key for mTLS authentication to Vault
# cert: "" # Path to the TLS client certificate for mTLS authentication to Vault
# ca: "" # Path to one or multiple PEM root CA certificates
# status: # Vault status configuration. The server will periodically reach out to Vault to check its status.
# ping: 10s # Duration until the server checks Vault's status again.
# # aws:
# # # The AWS SecretsManager key store. The server will store
# # # secret keys at the AWS SecretsManager encrypted with
# # # AWS-KMS. See: https://aws.amazon.com/secrets-manager
# # secretsmanager:
# # endpoint: "" # The AWS SecretsManager endpoint - e.g.: secretsmanager.us-east-2.amazonaws.com
# # region: "" # The AWS region of the SecretsManager - e.g.: us-east-2
# # kmskey: "" # The AWS-KMS key ID used to en/decrypt secrets at the SecretsManager. By default (if not set) the default AWS-KMS key will be used.
# # credentials: # The AWS credentials for accessing secrets at the AWS SecretsManager.
# # accesskey: "" # Your AWS Access Key
# # secretkey: "" # Your AWS Secret Key
# # token: "" # Your AWS session token (usually optional)
# imagePullPolicy: "IfNotPresent"
# externalCertSecret: null
# clientCertSecret: null
# # Key name to be created on the KMS, default is "my-minio-key"
# keyName: ""
# resources: { }
# nodeSelector: { }
# affinity:
# nodeAffinity: { }
# podAffinity: { }
# podAntiAffinity: { }
# tolerations: [ ]
# annotations: { }
# labels: { }
# serviceAccountName: ""
# securityContext:
# runAsUser: 1000
# runAsGroup: 1000
# runAsNonRoot: true
# fsGroup: 1000
# containerSecurityContext:
# runAsUser: 1000
# runAsGroup: 1000
# runAsNonRoot: true
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# seccompProfile:
# type: RuntimeDefault
###
# Configures `Ingress <https://kubernetes.io/docs/concepts/services-networking/ingress/>`__ for the Tenant S3 API and Console.
#
# Set the keys to conform to the Ingress controller and configuration of your choice.
ingress:
api:
enabled: true
ingressClassName: ""
labels: { }
annotations: { }
tls: [ ]
host: minio.skrd.fun
path: /
pathType: Prefix
console:
enabled: false
ingressClassName: ""
labels: { }
annotations: { }
tls: [ ]
host: minio-console.skrd.fun
path: /
pathType: Prefix
# Use an extraResources template section to include additional Kubernetes resources
# with the Helm deployment.
#extraResources:
# - |
# apiVersion: v1
# kind: Secret
# type: Opaque
# metadata:
# name: {{ dig "tenant" "configSecret" "name" "" (.Values | merge (dict)) }}
# stringData:
# config.env: |-
# export MINIO_ROOT_USER='minio'
# export MINIO_ROOT_PASSWORD='minio123'

1
manifest/05-postgres/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
secret.yml

View File

@@ -0,0 +1,9 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: daily-backup
spec:
schedule: "0 * * * * *"
backupOwnerReference: self
cluster:
name: cnpg-cluster

View File

@@ -0,0 +1,45 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: cnpg-cluster
spec:
instances: 3
bootstrap:
initdb:
database: app
owner: app
secret:
name: cnpg-secret
storage:
pvcTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: cnpg-storage
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
podAntiAffinityType: required
backup:
retentionPolicy: "7d"
barmanObjectStore:
destinationPath: "s3://backups/"
endpointURL: "https://minio-api.skrd.fun"
s3Credentials:
accessKeyId:
name: aws-creds
key: ACCESS_KEY
secretAccessKey:
name: aws-creds
key: SECRET_KEY
managed:
services:
additional:
- selectorType: rw
serviceTemplate:
metadata:
name: "cnpg-cluster-open"
spec:
type: LoadBalancer

View File

@@ -0,0 +1,10 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: postgres
namespace: kube-system
spec:
repo: https://cloudnative-pg.github.io/charts
chart: cloudnative-pg
targetNamespace: cnpg-system
createNamespace: true

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: cnpg-secret
stringData:
username: EXAMPLE
password: EXAMPLE
type: kubernetes.io/basic-auth
---
apiVersion: v1
kind: Secret
metadata:
name: aws-creds
type: Opaque
stringData:
ACCESS_KEY: EXAMPLE
SECRET_KEY: EXAMPLE

View File

@@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: cnpg-storage
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "1"
dataLocality: "strict-local"
staleReplicaTimeout: "2880"
---

View File

@@ -0,0 +1,221 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: actual-certificate
namespace: external-services
spec:
secretName: actual-skrd-fun-tls
dnsNames:
- "actual.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: haos-certificate
namespace: external-services
spec:
secretName: haos-skrd-fun-tls
dnsNames:
- "haos.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: firefly-certificate
namespace: external-services
spec:
secretName: firefly-skrd-fun-tls
dnsNames:
- "firefly.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: gametabs-certificate
namespace: external-services
spec:
secretName: gametabs-skrd-fun-tls
dnsNames:
- "gametabs.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: git-certificate
namespace: external-services
spec:
secretName: git-skrd-fun-tls
dnsNames:
- "git.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: jackett-certificate
namespace: external-services
spec:
secretName: jackett-skrd-fun-tls
dnsNames:
- "jackett.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: jelly-certificate
namespace: external-services
spec:
secretName: jelly-skrd-fun-tls
dnsNames:
- "jelly.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: navi-certificate
namespace: external-services
spec:
secretName: navi-skrd-fun-tls
dnsNames:
- "navi.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: npm-certificate
namespace: external-services
spec:
secretName: npm-skrd-fun-tls
dnsNames:
- "npm.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: omv-certificate
namespace: external-services
spec:
secretName: omv-skrd-fun-tls
dnsNames:
- "omv.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: photos-certificate
namespace: external-services
spec:
secretName: photos-skrd-fun-tls
dnsNames:
- "photos.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: pihole-certificate
namespace: external-services
spec:
secretName: pihole-skrd-fun-tls
dnsNames:
- "pihole.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: portainer-certificate
namespace: external-services
spec:
secretName: portainer-skrd-fun-tls
dnsNames:
- "portainer.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: radarr-certificate
namespace: external-services
spec:
secretName: radarr-skrd-fun-tls
dnsNames:
- "radarr.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sonarr-certificate
namespace: external-services
spec:
secretName: sonarr-skrd-fun-tls
dnsNames:
- "sonarr.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: torrent-certificate
namespace: external-services
spec:
secretName: torrent-skrd-fun-tls
dnsNames:
- "torrent.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: notes-certificate
namespace: external-services
spec:
secretName: notes-skrd-fun-tls
dnsNames:
- "notes.skrd.fun"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
---

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: external-services

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: firefly
namespace: external-services
spec:
type: ExternalName
ports:
- port: 8080
externalName: 192.168.103.15
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: firefly-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: firefly.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: firefly
port:
number: 8080
tls:
- secretName: firefly-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: gametabs
namespace: external-services
spec:
type: ExternalName
ports:
- port: 80
externalName: 192.168.103.6
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gametabs-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: gametabs.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gametabs
port:
number: 80
tls:
- secretName: gametabs-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: git
namespace: external-services
spec:
type: ExternalName
ports:
- port: 80
externalName: 192.168.103.3
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: git-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: git.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: git
port:
number: 80
tls:
- secretName: git-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: hedgedoc
namespace: external-services
spec:
type: ExternalName
ports:
- port: 3000
externalName: 192.168.103.19
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hedgedoc-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: notes.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hedgedoc
port:
number: 3000
tls:
- secretName: notes-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: haos
namespace: external-services
spec:
type: ExternalName
ports:
- port: 8123
externalName: 192.168.3.6
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: haos-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: haos.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: haos
port:
number: 8123
tls:
- secretName: haos-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: jackett
namespace: external-services
spec:
type: ExternalName
ports:
- port: 9117
externalName: 192.168.103.18
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jackett-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: jackett.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jackett
port:
number: 9117
tls:
- secretName: jackett-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: jelly
namespace: external-services
spec:
type: ExternalName
ports:
- port: 8096
externalName: 192.168.103.11
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jelly-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: jelly.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jelly
port:
number: 8096
tls:
- secretName: jelly-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: navi
namespace: external-services
spec:
type: ExternalName
ports:
- port: 4533
externalName: 192.168.103.10
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: navi-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: navi.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: navi
port:
number: 4533
tls:
- secretName: navi-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: npm
namespace: external-services
spec:
type: ExternalName
ports:
- port: 81
externalName: 192.168.103.2
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: npm-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: npm.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: npm
port:
number: 81
tls:
- secretName: npm-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: omv
namespace: external-services
spec:
type: ExternalName
ports:
- port: 3000
externalName: 192.168.102.1
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: omv-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: omv.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: omv
port:
number: 3000
tls:
- secretName: omv-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: photos
namespace: external-services
spec:
type: ExternalName
ports:
- port: 2283
externalName: 192.168.103.12
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: photos-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: photos.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: photos
port:
number: 2283
tls:
- secretName: photos-skrd-fun-tls

View File

@@ -0,0 +1,43 @@
kind: Service
apiVersion: v1
metadata:
name: pihole
namespace: external-services
spec:
type: ExternalName
ports:
- port: 80
externalName: 192.168.3.5
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: pihole-rewrite
namespace: external-services
spec:
replacePathRegex:
regex: ^/$
replacement: /admin/
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pihole-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd, external-services-pihole-rewrite@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: pihole.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pihole
port:
number: 80
tls:
- secretName: pihole-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: portainer
namespace: external-services
spec:
type: ExternalName
ports:
- port: 9000
externalName: 192.168.102.1
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portainer-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: portainer.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portainer
port:
number: 9000
tls:
- secretName: portainer-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: radarr
namespace: external-services
spec:
type: ExternalName
ports:
- port: 7878
externalName: 192.168.103.17
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: radarr-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: radarr.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: radarr
port:
number: 7878
tls:
- secretName: radarr-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: sonarr
namespace: external-services
spec:
type: ExternalName
ports:
- port: 8989
externalName: 192.168.103.16
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sonarr-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: sonarr.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: sonarr
port:
number: 8989
tls:
- secretName: sonarr-skrd-fun-tls

View File

@@ -0,0 +1,33 @@
kind: Service
apiVersion: v1
metadata:
name: torrent
namespace: external-services
spec:
type: ExternalName
ports:
- port: 80
externalName: 192.168.103.4
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: torrent-ingress
namespace: external-services
annotations:
traefik.ingress.kubernetes.io/router.middlewares: default-redirect-https@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: torrent.skrd.fun
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: torrent
port:
number: 80
tls:
- secretName: torrent-skrd-fun-tls

1
manifest/07-ddns/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
secret.yml

44
manifest/07-ddns/ddns.yml Normal file
View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ddns
spec:
selector:
matchLabels:
app: ddns
replicas: 1
template:
metadata:
labels:
app: ddns
spec:
containers:
- name: ddns
image: favonia/cloudflare-ddns:latest
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: IP6_PROVIDER
value: "none"
- name: CF_API_TOKEN
valueFrom:
secretKeyRef:
name: ddns-secret
key: api_token
- name: DOMAINS
value: "local.skrd.fun,direct.skrd.fun"
- name: PROXIED
value: "false"
securityContext:
capabilities:
drop: ["ALL"]
add: ["SETUID", "SETGID"]
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
resources:
limits:
memory: '128Mi'
cpu: '10m'

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: ddns-secret
type: Opaque
stringData:
api_token: EXAMPLE

View File

@@ -0,0 +1,50 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hello-world
spec:
ingressClassName: traefik
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hello-world
port:
number: 80
---
apiVersion: v1
kind: Service
metadata:
name: hello-world
spec:
ports:
- port: 80
protocol: TCP
selector:
app: hello-world
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
selector:
matchLabels:
app: hello-world
replicas: 10
template:
metadata:
labels:
app: hello-world
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@@ -0,0 +1,48 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
spec:
selector:
matchLabels:
app: whoami
replicas: 10
template:
metadata:
labels:
app: whoami
spec:
containers:
- name: nginx
image: containous/whoami
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: whoami
spec:
ports:
- port: 80
protocol: TCP
selector:
app: whoami
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whoami
spec:
ingressClassName: traefik
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whoami
port:
number: 80

2
stacks/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.env
!*.env.example

13
stacks/actual.yml Normal file
View File

@@ -0,0 +1,13 @@
services:
actual_server:
image: docker.io/actualbudget/actual-server:latest
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.13
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/actual:/data
restart: unless-stopped
networks:
macvlan_192_168_3_0:
external: true

15
stacks/adguard.yml Normal file
View File

@@ -0,0 +1,15 @@
version: "3"
services:
adguard:
image: adguard/adguardhome
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/adguard/data:/opt/adguardhome/work
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/adguard/config:/opt/adguardhome/conf
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.1
networks:
macvlan_192_168_3_0:
external: true

6
stacks/ddns.env.example Normal file
View File

@@ -0,0 +1,6 @@
PUID=1000
PGID=1000
IP6_PROVIDER=none
CF_API_TOKEN=EXAMPLE
DOMAINS=local.skrd.fun,direct.skrd.fun
PROXIED=false

14
stacks/ddns.yml Normal file
View File

@@ -0,0 +1,14 @@
version: "3"
services:
cloudflare-ddns:
image: favonia/cloudflare-ddns:latest
restart: always
cap_add:
- SETUID
- SETGID
cap_drop:
- all
read_only: true
security_opt:
- no-new-privileges:true
env_file: stack.env

View File

@@ -0,0 +1,6 @@
TYPE="AUTO_CURSEFORGE"
CF_API_KEY="SECRET"
CF_PAGE_URL="https://www.curseforge.com/minecraft/modpacks/create-astral"
MEMORY="10G"
EULA="TRUE"
ONLINE_MODE="TRUE"

18
stacks/feed-the-beast.yml Normal file
View File

@@ -0,0 +1,18 @@
version: "3"
services:
feed_the_beast:
image: itzg/minecraft-server
tty: true
stdin_open: true
restart: unless-stopped
env_file: stack.env
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/feed-the-beast/data:/data
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.5
networks:
macvlan_192_168_3_0:
external: true

43
stacks/fireflyiii.yml Normal file
View File

@@ -0,0 +1,43 @@
services:
app:
image: fireflyiii/core:latest
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/firefly_iii/upload:/var/www/html/storage/upload
env_file: stack.env
networks:
firefly_iii:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.15
depends_on:
- db
db:
image: mariadb:lts
restart: unless-stopped
env_file: stack.env
networks:
- firefly_iii
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/firefly_iii/db:/var/lib/mysql
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: unless-stopped
env_file: stack.env
command: sh -c "
apk add tzdata
&& ln -s /usr/share/zoneinfo/${TZ} /etc/localtime
| echo \"0 3 * * * wget -qO- http://app:8080/api/v1/cron/REPLACEME;echo\"
| crontab -
&& crond -f -L /dev/stdout"
networks:
- firefly_iii
networks:
firefly_iii:
macvlan_192_168_3_0:
external: true

14
stacks/gametabs.yml Normal file
View File

@@ -0,0 +1,14 @@
version: "3"
services:
gametabs:
image: nginx
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/gametabs:/usr/share/nginx/html
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.6
networks:
macvlan_192_168_3_0:
external: true

8
stacks/gitea.env.example Normal file
View File

@@ -0,0 +1,8 @@
CONFIG_FILE=/config/config.yaml
GITEA_INSTANCE_URL="https://git.skrd.fun"
GITEA_RUNNER_REGISTRATION_TOKEN="SECRET"
GITEA_RUNNER_NAME="gitea-runner-1"
POSTGRES_PASSWORD="SECRET"
POSTGRES_DB="gitea"
POSTGRES_USER="gitea"

46
stacks/gitea.yml Normal file
View File

@@ -0,0 +1,46 @@
version: "3"
services:
gitea:
image: gitea/gitea:latest
restart: unless-stopped
environment:
- USER_UID=1000
- USER_GID=1000
networks:
gitea_network:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.3
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/gitea/data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
depends_on:
- gitea-db
runner:
image: gitea/act_runner:latest
restart: unless-stopped
env_file: stack.env
networks:
- gitea_network
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/gitea/runner/config:/config
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/gitea/runner/data:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- gitea
gitea-db:
image: postgres:16
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/gitea/db:/var/lib/postgresql/data
networks:
- gitea_network
env_file: stack.env
networks:
gitea_network:
macvlan_192_168_3_0:
external: true

View File

@@ -0,0 +1,8 @@
DB_PASSWORD=SECRET
DB_USERNAME=postgres
DB_DATABASE_NAME=immich
POSTGRES_PASSWORD=SECRET
POSTGRES_USER=postgres
POSTGRES_DB=immich
POSTGRES_INITDB_ARGS="--data-checksums"

51
stacks/immich.yml Normal file
View File

@@ -0,0 +1,51 @@
services:
immich-server:
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/immich/upload:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
networks:
immich_network:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.12
env_file: stack.env
depends_on:
- redis
- database
immich-machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
restart: unless-stopped
networks:
immich_network:
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/immich/model-cache:/cache
redis:
image: docker.io/redis:6.2-alpine@sha256:328fe6a5822256d065debb36617a8169dbfbd77b797c525288e465f56c1d392b
restart: unless-stopped
networks:
immich_network:
healthcheck:
test: redis-cli ping || exit 1
database:
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
restart: unless-stopped
networks:
immich_network:
env_file: stack.env
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/immich/postgres:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='immich' --username='postgres' || exit 1; Chksum="$$(psql --dbname='immich' --username='postgres' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c" ,"shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
networks:
immich_network:
macvlan_192_168_3_0:
external: true

21
stacks/jellyfin.yml Normal file
View File

@@ -0,0 +1,21 @@
version: '3.5'
services:
jellyfin:
image: jellyfin/jellyfin
restart: 'unless-stopped'
user: 1000:1000
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/jellyfin/config:/config
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/jellyfin/cache:/cache
- /srv/dev-disk-by-uuid-5392509c-5ccd-4d8f-8719-60064c4404d6/anime:/data/anime
- /srv/dev-disk-by-uuid-c7a96ee1-c08e-48b1-8afa-79c75380d142/movies:/data/movies
- /srv/dev-disk-by-uuid-c7a96ee1-c08e-48b1-8afa-79c75380d142/tv:/data/tv
environment:
- JELLYFIN_PublishedServerUrl=https://jelly.skrd.fun
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.11
networks:
macvlan_192_168_3_0:
external: true

24
stacks/minecraft.yml Normal file
View File

@@ -0,0 +1,24 @@
version: "3"
services:
mc:
image: itzg/minecraft-server
tty: true
stdin_open: true
restart: unless-stopped
environment:
type: "paper"
eula: "true"
online_mode: "false"
enable_autopause: "true"
jvm_dd_opts: "disable.watchdog:true"
debug_autopause: "true"
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/minecraft/data:/data
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.9
networks:
macvlan_192_168_3_0:
external: true

22
stacks/navidrome.yml Normal file
View File

@@ -0,0 +1,22 @@
version: "3"
services:
navidrome:
image: deluan/navidrome:latest
user: 1000:1000
restart: unless-stopped
environment:
ND_MUSICFOLDER: "/store"
ND_BASEURL: "https:://navi.skrd.fun"
ND_AUTOIMPORTPLAYLISTS: false
ND_ENABLESHARING: true
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.10
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/navidrome:/data
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/music:/store/music:ro
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/ost:/store/ost:ro
networks:
macvlan_192_168_3_0:
external: true

19
stacks/npm.yml Normal file
View File

@@ -0,0 +1,19 @@
version: '3.8'
services:
nginx-proxy-manager:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/npm/data:/data
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/npm/letsencrypt:/etc/letsencrypt
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/npm/ca.crt:/etc/my_ca.crt
networks:
proxy_network:
macvlan_192_168_3_0:
ipv4_address: 192.168.103.2
networks:
proxy_network:
external: true
macvlan_192_168_3_0:
external: true

22
stacks/qbittorrent.yml Normal file
View File

@@ -0,0 +1,22 @@
version: '3'
services:
qbittorrent:
image: lscr.io/linuxserver/qbittorrent:latest
environment:
- PUID=1000
- PGID=1000
- TZ=America/Santiago
- WEBUI_PORT=80
- TORRENTING_PORT=6881
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/qbittorrent:/config:/config
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/downloads:/downloads
restart: unless-stopped
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.4
networks:
macvlan_192_168_3_0:
external: true

View File

@@ -0,0 +1 @@
MARIADB_ROOT_PASSWORD=SECRET

15
stacks/zenithar.yml Normal file
View File

@@ -0,0 +1,15 @@
version: "3"
services:
db:
image: mariadb
restart: unless-stopped
env_file: stack.env
volumes:
- /srv/dev-disk-by-uuid-1582b800-1f82-407a-a3aa-3460b3390127/docker/zenithar:/var/lib/mysql:Z
networks:
macvlan_192_168_3_0:
ipv4_address: 192.168.3.7
networks:
macvlan_192_168_3_0:
external: true

41
terraform/k3s/.gitignore vendored Normal file
View File

@@ -0,0 +1,41 @@
# Local .terraform directories
.terraform/
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Ignore transient lock info files created by terraform apply
.terraform.tfstate.lock.info
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# Ignorando secrets
secrets.tfvars
!secrets.tfvars.example

44
terraform/k3s/.terraform.lock.hcl generated Normal file
View File

@@ -0,0 +1,44 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/bpg/proxmox" {
version = "0.72.0"
constraints = "0.72.0"
hashes = [
"h1:LCNKZG6lVHdf9LTkHgM8CPUbiFxLI8k208Tz9ajz46c=",
"zh:031d0ade16874fe111055b9417b4f9c73efe7c755ba39aa28bd697ee77dc5e0e",
"zh:095320d9cfb1e1f1b42d0d31f7aef5380323ab5e0d428606c43c9a30bf3b40db",
"zh:11b9ccfc249e150a174f1aa0dd63b8f96296fcb94353902e807da2da20035822",
"zh:24aa2cb7362db5ffebdcc45b0f53897fdd102f322ec7d9e0e4ef60a87955c182",
"zh:334d6d6c2c12803b530ca7fcafe25def317333582dca531ae889bdc1dcbf966a",
"zh:383376b3ce17877f78168270f14a4401093cfee464adf85dd88214d09951e6a2",
"zh:762d16fefdf4af471fe11ba315c7a0a3e5ff04c4f6e8431cd541b2f78cd518ae",
"zh:7c455e70d262e26c3fda8859ed67b0118d12f72416397fc8fbf5b5b90f2f02c3",
"zh:8401a38d10e1aacc7c3f75ae41f42c88647ab7e0974010c616b69095c7a719c1",
"zh:b7bdc53cdd6a21f208fc15bbbd0502fd39bee268801fd2b9ce89e18b38138bc0",
"zh:c3741939ceb5fbd4c00f9aa541a3e9cb68222c39890ca5ed3602a0ca3fa98a53",
"zh:d0d49355b2d1dc847028c96328f8e0ffc4ce39c3641940f9136684a7177d008f",
"zh:ed137c25a20912962413ea1972aa15931f54dcb922a9c4451d08237b6cad2037",
"zh:f26e0763dbe6a6b2195c94b44696f2110f7f55433dc142839be16b9697fa5597",
"zh:f3e38e9c63ef9b295c7e4d2e302d85700f2e8dbff49285e364457b999b927a72",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.2"
hashes = [
"h1:JlMZD6nYqJ8sSrFfEAH0Vk/SL8WLZRmFaMUF9PJK5wM=",
"zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511",
"zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea",
"zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0",
"zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b",
"zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038",
"zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4",
"zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464",
"zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b",
"zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e",
"zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1",
]
}

96
terraform/k3s/main.tf Normal file
View File

@@ -0,0 +1,96 @@
data "local_file" "ssh_public_key" {
filename = "/home/ryuuji/.ssh/id_rsa.pub"
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
datastore_id = "storage"
node_name = "talos"
url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
file_name = "k3s-noble-server-cloudimg-amd64.img"
}
resource "proxmox_virtual_environment_file" "k3s_user_data_cloud_config" {
for_each = var.k3s_vm_config
content_type = "snippets"
datastore_id = "storage"
node_name = "talos"
source_raw {
data = <<-EOF
#cloud-config
hostname: ${each.value.name}
users:
- default
- name: ubuntu
groups:
- sudo
shell: /bin/bash
ssh_authorized_keys:
- ${trimspace(data.local_file.ssh_public_key.content)}
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- apt update
- apt install -y qemu-guest-agent net-tools
- timedatectl set-timezone America/Santiago
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
- echo "done" > /tmp/cloud-config.done
EOF
file_name = "${each.value.name}-k3s-cloud-config.yaml"
}
}
resource "proxmox_virtual_environment_vm" "k3s_ubuntu_vm" {
for_each = var.k3s_vm_config
name = each.value.name
node_name = "talos"
agent {
enabled = true
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = each.value.ram
}
disk {
datastore_id = "storage-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
size = 50
}
disk {
datastore_id = "storage-lvm"
iothread = true
interface = "virtio1"
discard = "on"
file_format = "raw"
size = 200
}
network_device {
bridge = "vmbr0"
}
initialization {
datastore_id = "storage-lvm"
ip_config {
ipv4 {
address = "${each.value.ip}/${each.value.cidr}"
gateway = "192.168.1.1"
}
}
user_data_file_id = proxmox_virtual_environment_file.k3s_user_data_cloud_config[each.key].id
}
}

26
terraform/k3s/outputs.tf Normal file
View File

@@ -0,0 +1,26 @@
locals {
ansible_master_lines = [
for name, config in var.k3s_vm_config :
"${config.ip} ansible_user=ubuntu ansible_ssh_private_key_file=/home/ryuuji/.ssh/id_rsa ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
if config.role == "master"
]
ansible_worker_lines = [
for name, config in var.k3s_vm_config :
"${config.ip} ansible_user=ubuntu ansible_ssh_private_key_file=/home/ryuuji/.ssh/id_rsa ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
if config.role == "worker"
]
}
output "ansible_inventory_k3s" {
value = <<EOT
[k3s_master]
${join("\n", local.ansible_master_lines)}
[k3s_workers]
${join("\n", local.ansible_worker_lines)}
[k3s_cluster:children]
k3s_master
k3s_workers
EOT
}

View File

@@ -0,0 +1,20 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.72.0"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
username = var.proxmox_username
password = var.proxmox_password
insecure = true
ssh {
agent = true
username = var.proxmox_ssh_username
private_key = file(var.proxmox_ssh_private_key)
}
}

View File

@@ -0,0 +1,6 @@
proxmox_endpoint = "https://192.168.2.1:8006/"
proxmox_username = "username@pam"
proxmox_password = "SECRET"
proxmox_ssh_username = "username"
proxmox_ssh_private_key = "~/.ssh/id_rsa"

View File

@@ -0,0 +1,23 @@
variable "proxmox_endpoint" {}
variable "proxmox_username" {}
variable "proxmox_password" {}
variable "proxmox_ssh_username" {}
variable "proxmox_ssh_private_key" {}
variable "k3s_vm_config" {
type = map(object({
name = string
ip = string
cidr = string
ram = string
role = string
}))
default = {
"vm1" = { name = "k3s-node-1", ip = "192.168.3.1", cidr = "16", ram = "4096", role = "master" }
"vm2" = { name = "k3s-node-2", ip = "192.168.3.2", cidr = "16", ram = "4096", role = "worker" }
"vm3" = { name = "k3s-node-3", ip = "192.168.3.3", cidr = "16", ram = "4096", role = "worker" }
"vm4" = { name = "k3s-node-4", ip = "192.168.3.4", cidr = "16", ram = "4096", role = "worker" }
}
}

41
terraform/pihole/.gitignore vendored Normal file
View File

@@ -0,0 +1,41 @@
# Local .terraform directories
.terraform/
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Ignore transient lock info files created by terraform apply
.terraform.tfstate.lock.info
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# Ignorando secrets
secrets.tfvars
!secrets.tfvars.example

44
terraform/pihole/.terraform.lock.hcl generated Normal file
View File

@@ -0,0 +1,44 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/bpg/proxmox" {
version = "0.72.0"
constraints = "0.72.0"
hashes = [
"h1:LCNKZG6lVHdf9LTkHgM8CPUbiFxLI8k208Tz9ajz46c=",
"zh:031d0ade16874fe111055b9417b4f9c73efe7c755ba39aa28bd697ee77dc5e0e",
"zh:095320d9cfb1e1f1b42d0d31f7aef5380323ab5e0d428606c43c9a30bf3b40db",
"zh:11b9ccfc249e150a174f1aa0dd63b8f96296fcb94353902e807da2da20035822",
"zh:24aa2cb7362db5ffebdcc45b0f53897fdd102f322ec7d9e0e4ef60a87955c182",
"zh:334d6d6c2c12803b530ca7fcafe25def317333582dca531ae889bdc1dcbf966a",
"zh:383376b3ce17877f78168270f14a4401093cfee464adf85dd88214d09951e6a2",
"zh:762d16fefdf4af471fe11ba315c7a0a3e5ff04c4f6e8431cd541b2f78cd518ae",
"zh:7c455e70d262e26c3fda8859ed67b0118d12f72416397fc8fbf5b5b90f2f02c3",
"zh:8401a38d10e1aacc7c3f75ae41f42c88647ab7e0974010c616b69095c7a719c1",
"zh:b7bdc53cdd6a21f208fc15bbbd0502fd39bee268801fd2b9ce89e18b38138bc0",
"zh:c3741939ceb5fbd4c00f9aa541a3e9cb68222c39890ca5ed3602a0ca3fa98a53",
"zh:d0d49355b2d1dc847028c96328f8e0ffc4ce39c3641940f9136684a7177d008f",
"zh:ed137c25a20912962413ea1972aa15931f54dcb922a9c4451d08237b6cad2037",
"zh:f26e0763dbe6a6b2195c94b44696f2110f7f55433dc142839be16b9697fa5597",
"zh:f3e38e9c63ef9b295c7e4d2e302d85700f2e8dbff49285e364457b999b927a72",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.5.2"
hashes = [
"h1:JlMZD6nYqJ8sSrFfEAH0Vk/SL8WLZRmFaMUF9PJK5wM=",
"zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511",
"zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea",
"zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0",
"zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b",
"zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038",
"zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4",
"zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464",
"zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b",
"zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e",
"zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1",
]
}

87
terraform/pihole/main.tf Normal file
View File

@@ -0,0 +1,87 @@
data "local_file" "ssh_public_key" {
filename = "/home/ryuuji/.ssh/id_rsa.pub"
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
datastore_id = "storage"
node_name = "talos"
url = "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
file_name = "pihole-noble-server-cloudimg-amd64.img"
}
resource "proxmox_virtual_environment_file" "user_data_cloud_config" {
for_each = var.vms_config
content_type = "snippets"
datastore_id = "storage"
node_name = "talos"
source_raw {
data = <<-EOF
#cloud-config
hostname: ${each.value.name}
users:
- default
- name: ubuntu
groups:
- sudo
shell: /bin/bash
ssh_authorized_keys:
- ${trimspace(data.local_file.ssh_public_key.content)}
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- apt update
- apt install -y qemu-guest-agent net-tools
- timedatectl set-timezone America/Santiago
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
- echo "done" > /tmp/cloud-config.done
EOF
file_name = "${each.value.name}-user-data-cloud-config.yaml"
}
}
resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
for_each = var.vms_config
name = each.value.name
node_name = "talos"
agent {
enabled = true
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = each.value.ram
}
disk {
datastore_id = "storage-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
size = each.value.disk
}
network_device {
bridge = "vmbr0"
}
initialization {
datastore_id = "storage-lvm"
ip_config {
ipv4 {
address = "${each.value.ip}/${each.value.cidr}"
gateway = "192.168.1.1"
}
}
user_data_file_id = proxmox_virtual_environment_file.user_data_cloud_config[each.key].id
}
}

View File

@@ -0,0 +1,15 @@
locals {
ansible_lines = [
for name, config in var.vms_config :
"${config.ip} ansible_user=ubuntu ansible_ssh_private_key_file=/home/ryuuji/.ssh/id_rsa ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
]
}
output "ansible_inventory" {
value = <<EOT
[pihole]
${join("\n", local.ansible_lines)}
EOT
}

View File

@@ -0,0 +1,20 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.72.0"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
username = var.proxmox_username
password = var.proxmox_password
insecure = true
ssh {
agent = true
username = var.proxmox_ssh_username
private_key = file(var.proxmox_ssh_private_key)
}
}

View File

@@ -0,0 +1,6 @@
proxmox_endpoint = "https://192.168.2.1:8006/"
proxmox_username = "username@pam"
proxmox_password = "SECRET"
proxmox_ssh_username = "username"
proxmox_ssh_private_key = "~/.ssh/id_rsa"

View File

@@ -0,0 +1,5 @@
variable "proxmox_endpoint" {}
variable "proxmox_username" {}
variable "proxmox_password" {}
variable "proxmox_ssh_username" {}
variable "proxmox_ssh_private_key" {}