feat: add docker server

This commit is contained in:
Matthew McKinnon 2024-11-02 12:28:46 +10:00
parent 0518ee5343
commit 1405a9fab6
8 changed files with 279 additions and 436 deletions

1
.gitignore vendored
View File

@ -2,4 +2,5 @@ terraform.auto.tfvars
.terraform .terraform
tokens.txt tokens.txt
terraform.tfstate* terraform.tfstate*
.bitwarden/

43
.terraform.lock.hcl generated
View File

@ -1,23 +1,6 @@
# This file is maintained automatically by "tofu init". # This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.opentofu.org/hashicorp/local" {
version = "2.5.2"
hashes = [
"h1:6lS+5A/4WFAqY3/RHWFRBSiFVLPRjvLaUgxPQvjXLHU=",
"zh:25b95b76ceaa62b5c95f6de2fa6e6242edbf51e7fc6c057b7f7101aa4081f64f",
"zh:3c974fdf6b42ca6f93309cf50951f345bfc5726ec6013b8832bcd3be0eb3429e",
"zh:5de843bf6d903f5cca97ce1061e2e06b6441985c68d013eabd738a9e4b828278",
"zh:86beead37c7b4f149a54d2ae633c99ff92159c748acea93ff0f3603d6b4c9f4f",
"zh:8e52e81d3dc50c3f79305d257da7fde7af634fed65e6ab5b8e214166784a720e",
"zh:9882f444c087c69559873b2d72eec406a40ede21acb5ac334d6563bf3a2387df",
"zh:a4484193d110da4a06c7bffc44cc6b61d3b5e881cd51df2a83fdda1a36ea25d2",
"zh:a53342426d173e29d8ee3106cb68abecdf4be301a3f6589e4e8d42015befa7da",
"zh:d25ef2aef6a9004363fc6db80305d30673fc1f7dd0b980d41d863b12dacd382a",
"zh:fa2d522fb323e2121f65b79709fd596514b293d816a1d969af8f72d108888e4c",
]
}
provider "registry.opentofu.org/hashicorp/null" { provider "registry.opentofu.org/hashicorp/null" {
version = "3.2.3" version = "3.2.3"
hashes = [ hashes = [
@ -35,15 +18,25 @@ provider "registry.opentofu.org/hashicorp/null" {
] ]
} }
provider "registry.opentofu.org/hashicorp/template" { provider "registry.opentofu.org/maxlaverse/bitwarden" {
version = "2.2.0" version = "0.1.1"
constraints = "~> 0.1.0"
hashes = [ hashes = [
"h1:tdS0otiAtvUV8uLJWJNfcqOPo3llj7FyRzExw6X1srY=", "h1:ZtHaTCuLMChMG8AJ7M8/ZOMqCTdCMfLhxOWTlZb5LMo=",
"zh:374c28bafc43cd65e578cb209efc9eee4c1cec7618f451528e928db98059e8c8", "zh:190c3c9aab29815a0d6d5b7d0f0bcf299c08a36af0142a23e381894cf12ca342",
"zh:6a2982e70fbc2ab2668d624c648ef2eb32243c1a1185246b03991a7a21326db9", "zh:2a90054074e4c651e19b423b1c5a4fff75973f52a290890b06ebfb6aa73ea3a4",
"zh:af83169c21bb13f141510a349e1f70cf7d893247a269bd71cad74dd22f1df0f5", "zh:38396bdc54fed6680a0303c7919a4fc417d5c4438ddb0e2e1a065091990bb350",
"zh:b81a5bedc91a1a81b938c393247248d6c3d1bd8ea685541f9c858908c0afb6b3", "zh:63e899f3a4bb735f50ad90bc839e5d0f68f8b1b96974d91832147d2d16997a88",
"zh:de15486244af2d29d44d510d647cd6e0b1408e89952261013c572b7c9bfd744b", "zh:96da402041eeec5dec9d738777dcaa2c81742086ee2911bb2df9fd685709629b",
"zh:99c18a7e8a6ddc84947c19f051a896a20c20b84914895ba538d25b80fbc9cda6",
"zh:a95a5a3e62bf751a531df58ec555eb98ce13c00e40d343c293f621b4a5c80b6e",
"zh:b93b7e5ea921e425fbc901e7696702df80eb2b8243693abd18a4e6d3df0af6b0",
"zh:bb1228c8205d9b589674f853cc6c788de2a95e29f33f0c929727c1986a02e3b6",
"zh:beaf5ffa167de3c9ab01bf937b671ff90a65fd7de38612195da5f8d9e532c534",
"zh:d2849ed82549744fd8c36a3ae694b935ab3afe8fc7a78c60339581fed5d63112",
"zh:f90c25c736fa0f17f33c96d5ec0b7d45159397acfcdc0bce68e574d673f0f764",
"zh:fa91da4161f5b4580df79f10f891f5b88827941110fae1906a1a672b31a94dd0",
"zh:fae5d219184274ebbce7f6766398c44ec593c1ffb5d2e872a81478b5eb57f01d",
] ]
} }

View File

@ -1,109 +0,0 @@
resource "null_resource" "cloud_init_deb" {
connection {
type = "ssh"
user = "root"
private_key = file("~/.ssh/pve2.comprofix.xyz")
host = "pve2.comprofix.xyz"
agent = true
}
provisioner "file" {
source = "files/cloud_init.cloud_config"
destination = "/var/lib/vz/snippets/cloud_init_deb.yml"
}
}
resource "proxmox_vm_qemu" "dev" {
# VM General Settings
target_node = "pve"
vmid = "500"
name = "dev"
desc = "Dev Server"
# VM Advanced General Settings
onboot = true
scsihw = "virtio-scsi-single"
# VM OS Settings
clone = "debian-12-generic-amd64"
clone_wait = 120
timeouts {
create = "1h"
delete = "1h"
}
# VM System Settings
agent = 1
machine = "q35"
qemu_os = "l26"
# VM CPU Settings
cores = 4
sockets = 1
cpu = "kvm64"
bios = "ovmf"
startup = ""
# VM Memory Settings
memory = 16384
# VM Network Settings
network {
bridge = "vmbr0"
model = "virtio"
tag = "10"
}
efidisk {
efitype = "4m"
storage = "local-zfs"
}
disks {
ide {
ide2 {
cdrom {
passthrough = false
}
}
ide3 {
cloudinit {
storage = "local-zfs"
}
}
}
scsi {
scsi0 {
disk {
size = 20
storage = "local-zfs"
}
}
}
}
# VM Cloud-Init Settings
os_type = "cloud-init"
cicustom = "vendor=local:snippets/cloud_init_deb.yml"
#cloudinit_cdrom_storage = "local-zfs"
# (Optional) IP Address and Gateway
ipconfig0 = "ip=10.10.10.22/24,gw=10.10.10.1"
nameserver = "10.10.10.1"
# (Optional) Default User
ciuser = "${var.ci_user}"
cipassword = "${var.ci_password}"
# (Optional) Add your SSH Public KEY
sshkeys = <<EOF
${ var.ssh_key }
EOF
}

107
docker.tf Normal file
View File

@ -0,0 +1,107 @@
resource "null_resource" "cloud_init_deb" {
connection {
type = "ssh"
user = "root"
private_key = file("~/.ssh/pve2.comprofix.xyz")
host = "pve2.comprofix.xyz"
agent = true
}
provisioner "file" {
source = "files/cloud_init.cloud_config"
destination = "/var/lib/vz/snippets/cloud_init_deb.yml"
}
}
resource "proxmox_vm_qemu" "docker" {
# VM General Settings
target_node = "pve"
vmid = "101"
name = "docker"
desc = "Docker Server"
# VM Advanced General Settings
onboot = true
scsihw = "virtio-scsi-single"
# VM OS Settings
clone = "debian-12-generic-amd64"
clone_wait = 120
timeouts {
create = "1h"
delete = "1h"
}
# VM System Settings
agent = 1
machine = "q35"
qemu_os = "l26"
# VM CPU Settings
cores = 4
sockets = 1
cpu = "x86-64-v2-AES"
bios = "ovmf"
startup = "order=2,up=600"
# VM Memory Settings
memory = 16384
# VM Network Settings
network {
bridge = "vmbr0"
model = "virtio"
tag = "10"
}
efidisk {
efitype = "4m"
storage = "local-zfs"
}
disks {
ide {
ide2 {
cdrom {
passthrough = false
}
}
ide3 {
cloudinit {
storage = "local-zfs"
}
}
}
scsi {
scsi0 {
disk {
size = 20
storage = "local-zfs"
}
}
}
}
# VM Cloud-Init Settings
os_type = "cloud-init"
#cicustom = "vendor=local:snippets/cloud_init_deb.yml"
# (Optional) IP Address and Gateway
ipconfig0 = "ip=10.10.10.5/24,gw=10.10.10.1"
nameserver = "10.10.10.1"
# (Optional) Default User
ciuser = var.ci_user # Updated
cipassword = var.ci_password # Updated
# (Optional) Add your SSH Public KEY
sshkeys = <<EOF
${var.ssh_key}
EOF
}

View File

@ -1,19 +1,3 @@
#cloud-config #cloud-config
packages:
- htop
- vim-nox
- git
- zsh
- curl
- wget
- python3
- python3-pip
- nfs-common
- cron
- jq
- sudo
- logwatch
- sendemail
runcmd: runcmd:
- "bash /usr/local/bin/notify.sh" - "bash /usr/local/bin/notify.sh"

View File

@ -1,240 +0,0 @@
# Proxmox Full-Clone
# ---
# Create a new VM from a clone
resource "proxmox_vm_qemu" "docker" {
# VM General Settings
target_node = "pve"
vmid = "101"
name = "docker"
desc = "Docker Server"
# VM Advanced General Settings
onboot = true
scsihw = "virtio-scsi-single"
# VM OS Settings
clone = "debian-12-generic-amd64"
clone_wait = 45
timeouts {
create = "20m"
delete = "10m"
}
# VM System Settings
agent = 1
machine = "q35"
qemu_os = "l26"
# VM CPU Settings
cores = 1
sockets = 4
cpu = "kvm64"
bios = "ovmf"
startup = "order=2,up=600"
# VM Memory Settings
memory = 16384
# VM Network Settings
network {
bridge = "vmbr0"
model = "virtio"
tag = "10"
}
efidisk {
efitype = "4m"
storage = "local-zfs"
}
disks {
ide {
ide2 {
cdrom {
passthrough = false
}
}
ide3 {
cloudinit {
storage = "local-zfs"
}
}
}
scsi {
scsi0 {
disk {
size = 80
storage = "local-zfs"
}
}
}
}
# VM Cloud-Init Settings
os_type = "cloud-init"
#cloudinit_cdrom_storage = "local-zfs"
# (Optional) IP Address and Gateway
ipconfig0 = "ip=10.10.10.5/24,gw=10.10.10.1"
nameserver = "10.10.10.1"
# (Optional) Default User
ciuser = "${var.ci_user}"
cipassword = "${var.ci_password}"
# # (Optional) Add your SSH Public KEY
# sshkeys = <<EOF
# ${ var.ssh_key }
# EOF
}
resource "proxmox_vm_qemu" "jellyfin" {
# VM General Settings
target_node = "pve"
vmid = "102"
name = "jellyfin"
# VM Advanced General Settings
onboot = true
startup = "order=3,up=300"
scsihw = "virtio-scsi-single"
# VM OS Settings
clone = "debian-12-generic-amd64"
clone_wait = 45
timeouts {
create = "20m"
delete = "10m"
}
# VM System Settings
agent = 1
machine = "q35"
qemu_os = "l26"
# VM CPU Settings
cores = 6
sockets = 1
cpu = "host"
bios = "ovmf"
# VM Memory Settings
memory = 16384
# VM Network Settings
network {
bridge = "vmbr0"
model = "virtio"
tag = "10"
}
efidisk {
efitype = "4m"
storage = "local-zfs"
}
disks {
ide {
ide2 {
cdrom {
passthrough = false
}
}
ide3 {
cloudinit {
storage = "local-zfs"
}
}
}
scsi {
scsi0 {
disk {
size = 40
storage = "local-zfs"
}
}
}
}
# VM Cloud-Init Settings
os_type = "cloud-init"
#cloudinit_cdrom_storage = "local-zfs"
# (Optional) IP Address and Gateway
ipconfig0 = "ip=10.10.10.7/24,gw=10.10.10.1"
nameserver = "10.10.10.1"
# (Optional) Default User
ciuser = "${var.ci_user}"
cipassword = "${var.ci_password}"
# # # (Optional) Add your SSH Public KEY
# sshkeys = <<EOF
# ${ var.ssh_key }
# EOF
}
resource "proxmox_lxc" "omada" {
# VM General Settings
target_node = "pve"
vmid = "200"
hostname = "omada"
ostemplate = "local:vztmpl/debian-12-standard_12.2-1_amd64.tar.zst"
password = "${var.ci_password}"
memory = 4096
swap = 512
cores = 2
# VM Advanced General Settings
onboot = true
start = true
startup = "order=1000"
timeouts {
create = "20m"
delete = "10m"
}
# VM Memory Settingsport 2019
unprivileged = true
features {
nesting = true
}
// Terraform will crash without rootfs defined
rootfs {
storage = "local-zfs"
size = "8G"
}
network {
name = "eth0"
bridge = "vmbr0"
ip = "10.10.40.2/24"
gw = "10.10.40.1"
tag = "40"
}
# # (Optional) Add your SSH Public KEY
ssh_public_keys = <<EOF
${ var.ssh_key }
EOF
}

39
prepareEnv.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
bw login
export BW_SESSION=$(bw unlock --raw)
bw sync
echo "Please wait while we prepare terraform.auto.tfvars"
proxmox_api_url=$(bw get --session $BW_SESSION uri proxmox_api)
proxmox_api_token_id=$(bw get --session $BW_SESSION username proxmox_api)
proxmox_api_token_secret=$(bw get --session $BW_SESSION password proxmox_api)
ci_user=$(bw get --session $BW_SESSION username ci_details)
ci_password=$(bw get --session $BW_SESSION password ci_details)
ssh_key=$(bw get --session $BW_SESSION notes ssh_public_key_main)
teams=$(bw get --session $BW_SESSION notes teams)
passphrase=$(bw get --session $BW_SESSION password state_passphrase)
tfusername=$(bw get --session $BW_SESSION username tofu_postgres)
tfpassword=$(bw get --session $BW_SESSION password tofu_postgres)
tfurl=$(bw get --session $BW_SESSION uri tofu_postgres)
echo 'proxmox_api_url = "'$proxmox_api_url'"' >> terraform.auto.tfvars
echo 'proxmox_api_token_id = "'$proxmox_api_token_id'"' >> terraform.auto.tfvars
echo 'proxmox_api_token_secret = "'$proxmox_api_token_secret'"' >> terraform.auto.tfvars
echo 'ci_user = "'$ci_user'"' >> terraform.auto.tfvars
echo 'ci_password = "'$ci_password'"' >> terraform.auto.tfvars
echo 'ssh_key = "'$ssh_key'"' >> terraform.auto.tfvars
echo 'teams = "'$teams'"' >> terraform.auto.tfvars
echo 'passphrase = "'$passphrase'"' >> terraform.auto.tfvars
echo 'tfusername = "'$tfusername'"' >> terraform.auto.tfvars
echo 'tfpassword = "'$tfpassword'"' >> terraform.auto.tfvars
echo 'tfurl = "'$tfurl'"' >> terraform.auto.tfvars
echo 'lxc_template = "debian-12-standard_12.7-1_amd64.tar.zst"' >> terraform.auto.tfvars

160
provider.tf Executable file → Normal file
View File

@ -1,56 +1,124 @@
# Proxmox Provider
# ---
# Initial Provider Configuration for Proxmox
terraform { terraform {
required_providers {
required_version = ">= 0.13.0" proxmox = {
source = "Telmate/proxmox"
required_providers { version = "3.0.1-rc4"
proxmox = {
source = "Telmate/proxmox"
version = "3.0.1-rc4"
}
} }
}
variable "proxmox_api_url" { bitwarden = {
type = string source = "maxlaverse/bitwarden"
} version = "~> 0.1.0"
}
variable "proxmox_api_token_id" { }
type = string backend "pg" {
} conn_str = "postgres://${var.tfusername}:${var.tfpassword}@${var.tfurl}"
}
variable "proxmox_api_token_secret" { encryption {
type = string key_provider "pbkdf2" "mykey" {
} passphrase = var.passphrase
key_length = 32
variable "ci_user" { salt_length = 16
type = string hash_function = "sha256"
} }
method "aes_gcm" "secure_method" {
variable "ci_password" { keys = key_provider.pbkdf2.mykey
type = string }
} state {
method = method.aes_gcm.secure_method
variable "ssh_key" { enforced = true
type = string }
}
} }
variable "teams" { variable "teams" {
type = string type = string
sensitive = true
}
variable "ci_user" {
type = string
sensitive = true
}
variable "ci_password" {
type = string
sensitive = true
}
variable "lxc_template" {
type = string
}
variable "proxmox_api_url" {
type = string
sensitive = true
}
variable "proxmox_api_token_id" {
type = string
sensitive = true
}
variable "proxmox_api_token_secret" {
type = string
sensitive = true
}
variable "ssh_key" {
type = string
sensitive = true
}
variable "passphrase" {
type = string
sensitive = true
}
variable "tfusername" {
type = string
sensitive = true
}
variable "tfpassword" {
type = string
sensitive = true
}
variable "tfurl" {
type = string
sensitive = true
}
variable "client_id" {
description = "Client ID for Bitwarden"
type = string
sensitive = true
}
variable "client_secret" {
description = "Client Secret for Bitwarden"
type = string
sensitive = true
}
variable "master_password" {
description = "Client Master Password for Bitwarden"
type = string
sensitive = true
} }
provider "proxmox" { provider "proxmox" {
pm_api_url = var.proxmox_api_url
pm_api_url = var.proxmox_api_url pm_api_token_id = var.proxmox_api_token_id
pm_api_token_id = var.proxmox_api_token_id pm_api_token_secret = var.proxmox_api_token_secret
pm_api_token_secret = var.proxmox_api_token_secret pm_timeout = 3600
pm_timeout = 3600 pm_parallel = 2 # Fix VM HDD lock timeout
pm_parallel = 2 # Fix vm hdd lock timeout # Optional: Skip TLS Verification
# pm_tls_insecure = true
# (Optional) Skip TLS Verification
# pm_tls_insecure = true
} }