Compare commits

..

6 Commits

Author SHA1 Message Date
phil
35e0512f51 maj README 2025-12-21 18:37:28 +01:00
phil
55e13418b1 zabbix-agent2 pour zabbix serveur 2025-12-21 18:19:41 +01:00
phil
4dea2e0bb1 typo doc 2025-12-21 16:36:19 +01:00
phil
dd0b9d3b60 zabbix-docker 2025-12-21 16:26:44 +01:00
root
5272d8efc1 Ajout opentofu 2025-12-18 09:23:17 +01:00
phil
de465435f1 docker-glpi : glpi 11 .. 2025-11-25 23:41:55 +01:00
13 changed files with 610 additions and 32 deletions

View File

@@ -1,13 +1,13 @@
# vagrant
le 2025-09-25 13h30 - ps
le 2025-12-21 16h30 - ps
Ce dépôt héberge des **Vagrantfile** dont :
* **dns** : Vagrantfile pour 2 serveurs **Bind9** (1 maitre et un esclave), tests **goss** chainés
* **divers/logs** : Vagrantfile pour serveur et client **rsyslog** sans journald
* **docker**
* **docker-wordpress**
* **docker-glpi**
* **docker-glpi** : Vagrantfile pour GLPI 11.0 avec docker - VM Debian 12
* **docker-elk**
* **docker-traefik-nextcloud**
* **docker-traefik-nextcloud-ss-tls**
@@ -25,9 +25,11 @@ Ce dépôt héberge des **Vagrantfile** dont :
* **lldap2** : Vagrantfile pour serveur LDAP en mode service **lldap** - integration pour Proxmox
* **minione** : Vagrantfile pour Opennebula All In One
* **netbox** : Vagrantfile pour Netbox dockerisée
* **opentofu** : sccript d'installation de **OpenTofu** et d'utilisation avec **bpg/proxmox**
* **rundeck** : Vagrantfile + playbook pour installation avec Mariadb
* **samba-ad-dc** : Vagrantfile + playbook pour **Samba 4.17 ad-dc** sur **Debian 12 Bookworm**
* **wazuh** : Vagrantfile + playbook pour serveur **wazuh** 4.10 et client Debian
* **wp-lb** : Wordpress web1 et web2, lb HaProxy, nfs, db Mariadb - Vagrantfile + playbooks
* **zabbix** : Vagrantfile pour VM Debian 12 **zabbix7** Srv et VM Debian 12 **web1** apache, zabbix agent2
* **zabbix-docker** : Vagrantfile pour VM Debian 12 **zabbix** Srv avec docker et VM Debian 12 **web1** apache, zabbix agent2

View File

@@ -12,7 +12,7 @@ Vagrant.configure("2") do |config|
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.box = "debian/bullseye64"
config.vm.box = "debian/bookworm64"
config.vm.hostname = "glpi"
# Disable automatic box update checking. If you disable this, then
@@ -65,6 +65,7 @@ Vagrant.configure("2") do |config|
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", inline: <<-SHELL
timedatectl set-timezone Europe/Paris
apt-get update
apt install -y wget curl git vim
if ! which docker ; then
@@ -73,41 +74,51 @@ Vagrant.configure("2") do |config|
gpasswd -a vagrant docker
fi
mkdir -p docker/glpi && cd docker/glpi
cat > docker-compose.yml <<-EOT
version: "3.2"
cat > compose.yml <<-EOT
services:
#MariaDB Container
mariadb:
image: mariadb:10.7
container_name: mariadb
hostname: mariadb
volumes:
- ./mysql:/var/lib/mysql
env_file:
- ./mariadb.env
restart: always
#GLPI Container
glpi:
image: diouxx/glpi
container_name: glpi
hostname: glpi
image: "glpi/glpi:latest"
restart: "unless-stopped"
volumes:
- "./storage/glpi:/var/glpi:rw"
env_file: .env # Pass environment variables from .env file to the container
depends_on:
db:
condition: service_healthy
ports:
- "80:80"
volumes:
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /var/www/html/glpi/:/var/www/html/glpi
environment:
- TIMEZONE=Europe/Paris
restart: always
db:
image: "mariadb"
restart: "unless-stopped"
volumes:
- "./storage/mysql:/var/lib/mysql"
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
MYSQL_RANDOM_ROOT_PASSWORD: "yes"
MYSQL_DATABASE: ${GLPI_DB_NAME}
MYSQL_USER: ${GLPI_DB_USER}
MYSQL_PASSWORD: ${GLPI_DB_PASSWORD}
healthcheck:
#test: mariadbadmin ping -h 127.0.0.1 -u $$MYSQL_USER --password=$$MYSQL_PASSWORD
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
start_period: 5s
interval: 5s
timeout: 5s
retries: 10
expose:
- "3306"
EOT
cat > mariadb.env <<-EOT
MARIADB_ROOT_PASSWORD=diouxx
MARIADB_DATABASE=glpidb
MARIADB_USER=glpi_user
MARIADB_PASSWORD=glpi
cat > .env <<-EOT
GLPI_DB_HOST=db
GLPI_DB_PORT=3306
GLPI_DB_NAME=glpi
GLPI_DB_USER=glpi
GLPI_DB_PASSWORD=glpi
EOT
docker compose up -d
ip -br a

View File

@@ -0,0 +1,3 @@
proxmox_endpoint = "https://px2.sio.lan:8006/"
proxmox_api_token = "tofuer@pve!tofu=123456c0-xxxx-yyyy-zzzz-abcdef37eb7a"

48
opentofu/crtoken.sh Normal file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
#
#
tfR=TofuUser
tfU=tofuer
tk=tofu
pveum role add "${tfR}" -privs "\
Datastore.Allocate \
Datastore.AllocateSpace \
Datastore.Audit \
Pool.Allocate \
Pool.Audit \
Sys.Audit \
Sys.Console \
Sys.Modify \
Sys.Syslog \
VM.Allocate \
VM.Audit \
VM.Clone \
VM.Config.CDROM \
VM.Config.Cloudinit \
VM.Config.CPU \
VM.Config.Disk \
VM.Config.HWType \
VM.Config.Memory \
VM.Config.Network \
VM.Config.Options \
VM.Console \
VM.Migrate \
VM.GuestAgent.Audit \
VM.GuestAgent.FileRead \
VM.GuestAgent.FileWrite \
VM.GuestAgent.FileSystemMgmt \
VM.GuestAgent.Unrestricted \
VM.PowerMgmt \
Mapping.Audit \
Mapping.Use \
SDN.Audit \
SDN.Use"
pveum user add "${tfU}@pve" --password 'Azerty1+-'
pveum aclmod / -user "${tfU}@pve" -role "${tfR}"
pveum user token add "${tfU}@pve" "$tk" -expire 0 -privsep 0 -comment "Tofu token"|tee "tk-${tk}.txt"

20
opentofu/inst-toku.sh Normal file
View File

@@ -0,0 +1,20 @@
#!§bin/bash
#echo "Installation d'OpenTofu
#
## Download the installer script:
curl --proto '=https' --tlsv1.2 -fsSL https://get.opentofu.org/install-opentofu.sh -o install-opentofu.sh
## Alternatively: wget --secure-protocol=TLSv1_2 --https-only https://get.opentofu.org/install-opentofu.sh -O install-opentofu.sh
#
# # Give it execution permissions:
chmod +x install-opentofu.sh
#
# Please inspect the downloaded script
#
# Run the installer:
./install-opentofu.sh --install-method deb
#
# Remove the installer:
rm -f install-opentofu.sh
tofu version
#

114
opentofu/main.tf Normal file
View File

@@ -0,0 +1,114 @@
# Retrieve VM templates available in Proxmox that match the specified name
data "proxmox_virtual_environment_vms" "template" {
filter {
name = "name"
values = ["${var.vm_template}"] # The name of the template to clone from
}
}
# Create a cloud-init configuration file as a Proxmox snippet
resource "proxmox_virtual_environment_file" "cloud_config" {
content_type = "snippets" # Cloud-init files are stored as snippets in Proxmox
datastore_id = "local" # Local datastore used to store the snippet
node_name = var.node_name # The Proxmox node where the file will be uploaded
source_raw {
file_name = "vm.cloud-config.yaml" # The name of the snippet file
data = <<-EOF
#cloud-config
hostname: ${var.vm_name}
package_update: true
package_upgrade: true
packages:
- qemu-guest-agent # Ensures the guest agent is installed
users:
- default
- name: ${var.vm_user}
groups: sudo
shell: /bin/bash
ssh-authorized-keys:
- "${var.vm_user_sshkey}" # Inject user's SSH key
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
- echo "done" > /tmp/cloud-config.done
#- reboot # Reboot the VM after provisioning
EOF
}
}
# Define and provision a new VM by cloning the template and applying initialization
resource "proxmox_virtual_environment_vm" "vm" {
name = var.vm_name # VM name
node_name = var.node_name # Proxmox node to deploy the VM
tags = var.vm_tags # Optional VM tags for categorization
vm_id = var.vm_id
agent {
enabled = true # Enable the QEMU guest agent
}
stop_on_destroy = true # Ensure VM is stopped gracefully when destroyed
clone {
vm_id = data.proxmox_virtual_environment_vms.template.vms[0].vm_id # ID of the source template
node_name = data.proxmox_virtual_environment_vms.template.vms[0].node_name # Node of the source template
}
bios = var.vm_bios # BIOS type (e.g., seabios or ovmf)
machine = var.vm_machine # Machine type (e.g., q35)
cpu {
cores = var.vm_cpu # Number of CPU cores
type = "host" # Use host CPU type for best compatibility/performance
}
memory {
dedicated = var.vm_ram # RAM in MB
}
disk {
datastore_id = var.node_datastore # Datastore to hold the disk
interface = "scsi0" # Primary disk interface
size = 4 # Disk size in GB
}
initialization {
user_data_file_id = proxmox_virtual_environment_file.cloud_config.id # Link the cloud-init file
datastore_id = var.node_datastore
interface = "scsi1" # Separate interface for cloud-init
ip_config {
ipv4 {
address = var.vm_address # CIDR
gateway = var.vm_gateway # Get IP via DHCP
}
}
}
network_device {
bridge = "vmbr0" # Use the default bridge
#vlan_id = var.vm_vlan # VLAN tagging if used
}
operating_system {
type = "l26" # Linux 2.6+ kernel
}
vga {
type = "std" # Standard VGA type
}
lifecycle {
ignore_changes = [ # Ignore initialization section after first depoloyment for idempotency
initialization
]
}
}
# Output the assigned IP address of the VM after provisioning
#output "vm_ip" {
# value = proxmox_virtual_environment_vm.vm.ipv4_addresses[1][0] # Second network interface's first IP
# description = "VM IP"
#}

23
opentofu/provider.tf Normal file
View File

@@ -0,0 +1,23 @@
# Define the required Terraform provider block
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox" # Use the community Proxmox provider from the bpg namespace
}
}
}
# Configure the Proxmox provider with API and SSH access
provider "proxmox" {
endpoint = var.proxmox_endpoint # Proxmox API URL (e.g., https://proxmox.local:8006/api2/json)
api_token = var.proxmox_api_token # API token for authentication (should have appropriate permissions)
insecure = true # Reject self-signed or invalid TLS certificates (set to true only in trusted/test environments)
# Optional SSH settings used for VM customization via SSH
ssh {
agent = true # Do not use the local SSH agent; use key file instead
# private_key = file("~/.ssh/id_ed25519") # Load SSH private key from the local file system
username = "root" # SSH username for connecting to the Proxmox host
}
}

View File

@@ -0,0 +1,8 @@
node_name = "px2" # Name of the Proxmox node where the VM will be deployed
vm_name = "infra4" # Desired name for the new virtual machine
vm_id = "2001" # Desired Id for the new virtual machine
vm_cpu = 1 # Number of CPU cores to allocate to the VM
vm_ram = 1024 # Amount of RAM in MB (2 GB)
#vm_vlan = 66 # VLAN ID for network segmentation
vm_address = "10.121.38.43/24"
vm_gateway = "10.121.38.254"

10
opentofu/token.txt Normal file
View File

@@ -0,0 +1,10 @@
┌──────────────┬─────────────────────────────────────────────────────┐
│ key │ value │
╞══════════════╪═════════════════════════════════════════════════════╡
│ full-tokenid │ tofuer@pve!tofu │
├──────────────┼─────────────────────────────────────────────────────┤
│ info │ {"comment":"Tofu token","expire":"0","privsep":"0"} │
├──────────────┼─────────────────────────────────────────────────────┤
│ value │ 4b1234c0-1239-4563-789c-abcdef12eb7a │
└──────────────┴─────────────────────────────────────────────────────┘

99
opentofu/variables.tf Normal file
View File

@@ -0,0 +1,99 @@
variable "proxmox_endpoint" {
description = "Proxmox URL endpoint"
type = string
}
variable "proxmox_api_token" {
description = "Proxmox API token"
type = string
sensitive = true
}
variable "node_name" {
description = "Proxmox host for the VM"
type = string
}
variable "node_datastore" {
description = "Datastore used for VM storage"
type = string
default = "local-lvm"
}
variable "vm_template" {
description = "Template of the VM"
type = string
#default = "ubuntu-cloud"
default = "Debian-12-Template"
}
variable "vm_name" {
description = "Hostname of the VM"
type = string
}
variable "vm_user" {
description = "Admin user of the VM"
type = string
default = "debian"
}
variable "vm_user_sshkey" {
description = "Admin user SSH key of the VM"
type = string
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDd97G/Uw3zlnhVByjpHZFw9FDa88phFUMtYfstTq7wYlUOJB2rdLPpU0bAjIpvpYmHOmBNseWYKaOT7EXNdxWPWJGuoen23tqdSzhnOV0LJz8zbCIA0Ykz/XOqRyJkq6qUw+L3atDxVC5pSSSY279yJtuQ2nmVld2KWDY4lnyZzZT5eQsrxCbT57hVMLCKcMKNb4QnYlLgnyHW8DyWHGG5GEWF9skFSPlmwY5s5H3OYg3u8ijuGjenjDTzLfRUzFltGJ2kBYbIn1iailArKpCiasmJyyja+YuYn3WwaNcl8Tpqa8eI52/LtOXDMwUKzvRJ6D6INEr/1duGYP/fQSEH root@ansible"
}
variable "vm_cpu" {
description = "Number of CPU cores of the VM"
type = number
default = 1
}
variable "vm_ram" {
description = "Number of RAM (MB) of the VM"
type = number
default = 1024
}
variable "vm_bios" {
description = "Type of BIOS used for the VM"
type = string
default = "ovmf"
}
variable "vm_machine" {
description = "Type of machine used for the VM"
type = string
default = "q35"
}
#variable "vm_vlan" {
# description = "VLAN of the VM"
# type = number
# #default = 66
#}
variable "vm_tags" {
description = "Tags for the VM"
type = list(any)
default = ["test"]
}
variable "vm_address" {
description = "IP v4 address for the VM - CIDR format"
type = string
# default = "q35"
}
variable "vm_gateway" {
description = "Gateway IP v4 address for the VM"
type = string
default = "10.121.38.254"
}
variable "vm_id" {
description = "VM identifier"
type = string
}

22
zabbix-docker/README.md Normal file
View File

@@ -0,0 +1,22 @@
# Zabbix-docker
## Présentation
Cette Vagrantfile :
* créée la VM **zabbix**, installe **docker**, et un fichier **compose.yml**
* créée la VM **web1**, installe **apache2** ainsi que **zabbix-agent2** (mode active) et l'inscrit sur **zabbix**
## Mode opératoire
```
vagrant up zabbix
vagrant up web1
```
* une fois connecté à zabbix (Admin/zabbix)
* ajouter l'hote avec son adresse IP 192.168.56.10 et les templates :
linux serveur
serveur web apache zabbix-agent 2
## Documentation
* cf https://www.virtualizationhowto.com/2025/11/why-i-switched-to-zabbix-for-monitoring-my-docker-containers/

165
zabbix-docker/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,165 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.define "zabbix" do |zabbix|
zabbix.vm.box = "debian/bookworm64"
zabbix.vm.hostname = "zabbix"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
# config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# zabbix.vm.network "private_network", ip: "192.168.56.10"
zabbix.vm.network "private_network", ip: "192.168.56.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
#zabbix.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Disable the default share of the current code directory. Doing this
# provides improved isolation between the vagrant box and your host
# by making sure your Vagrantfile isn't accessible to the vagrant box.
# If you use this you may want to enable additional shared subfolders as
# shown above
# config.vm.synced_folder ".", "/vagrant", disabled: true
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
zabbix.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
vb.memory = "2048"
end
#
# View the documentation for the provider you are using for more
# information on available options.
# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
zabbix.vm.provision "shell", inline: <<-SHELL
timedatectl set-timezone Europe/Paris
apt-get update && apt upgrade -y
apt-get install -y wget curl git vim
if ! which docker ; then
curl -s -o getdocker.sh https://get.docker.com
bash getdocker.sh
gpasswd -a vagrant docker
fi
mkdir zabbix && cd zabbix
cat > compose.yml <<EOT
services:
mysql-server:
image: mysql:8.0
container_name: zabbix-mysql
restart: unless-stopped
environment:
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
MYSQL_ROOT_PASSWORD: root_password
volumes:
- ./zabbix/mysql:/var/lib/mysql
command:
- mysqld
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_bin
zabbix-server:
image: zabbix/zabbix-server-mysql:latest
container_name: zabbix-server
restart: unless-stopped
environment:
DB_SERVER_HOST: mysql-server
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
MYSQL_ROOT_PASSWORD: root_password
ports:
- "10051:10051"
volumes:
- ./zabbix/server:/var/lib/zabbix
depends_on:
- mysql-server
zabbix-web:
image: zabbix/zabbix-web-nginx-mysql:latest
container_name: zabbix-web
restart: unless-stopped
environment:
DB_SERVER_HOST: mysql-server
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
ZBX_SERVER_HOST: zabbix-server
PHP_TZ: Europe/Paris
ports:
- "8080:8080"
depends_on:
- mysql-server
- zabbix-server
EOT
docker compose up
SHELL
end
config.vm.define "web1" do |srv| #VM No'1
srv.vm.box = "debian/bookworm64" #Setting machine type
srv.vm.hostname = "web1" #Setting machine type
srv.vm.network "private_network", ip: "192.168.56.11"
srv.vm.provision "shell", inline: <<-SHELL
timedatectl set-timezone Europe/Paris
apt-get update
apt-get install -y wget curl vim apache2
wget https://repo.zabbix.com/zabbix/7.4/release/debian/pool/main/z/zabbix-release/zabbix-release_latest_7.4+debian12_all.deb
dpkg -i zabbix-release_latest_7.4+debian12_all.deb
apt update
apt install -y zabbix-agent2
echo "Server=192.168.56.10" >> /etc/zabbix/zabbix_agent2.conf
echo "ServerActive=192.168.56.10" >> /etc/zabbix/zabbix_agent2.conf
echo "Hostname=web1" >> /etc/zabbix/zabbix_agent2.conf
systemctl restart zabbix-agent2
SHELL
end
end

53
zabbix-docker/compose.yml Normal file
View File

@@ -0,0 +1,53 @@
services:
mysql-server:
image: mariadb
container_name: zabbix-mysql
restart: unless-stopped
environment:
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
MYSQL_ROOT_PASSWORD: root_password
volumes:
- ./zabbix/mysql:/var/lib/mysql
command:
#- mysqld
mysqld --character-set-server=utf8mb4 --collation-server=utf8mb4_bin
# - mariadb
# - --character-set-server=utf8mb4
# - --collation-server=utf8mb4_bin
zabbix-server:
image: zabbix/zabbix-server-mysql:latest
container_name: zabbix-server
restart: unless-stopped
environment:
DB_SERVER_HOST: mysql-server
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
MYSQL_ROOT_PASSWORD: root_password
ports:
- "10051:10051"
volumes:
- ./zabbix/server:/var/lib/zabbix
depends_on:
- mysql-server
zabbix-web:
image: zabbix/zabbix-web-nginx-mysql:latest
container_name: zabbix-web
restart: unless-stopped
environment:
DB_SERVER_HOST: mysql-server
MYSQL_DATABASE: zabbix
MYSQL_USER: zabbix
MYSQL_PASSWORD: zabbix_password
ZBX_SERVER_HOST: zabbix-server
PHP_TZ: Europe/Paris
ports:
- "8080:8080"
depends_on:
- mysql-server
- zabbix-server