Compare commits

..

1 Commits

Author SHA1 Message Date
32329396a9 mkvm : mode normal pour option -s 2024-01-18 00:12:51 +01:00
107 changed files with 547 additions and 3654 deletions

View File

@ -1,6 +1,6 @@
# gsb2024
2024-01-19 11h45 ps
2024-01-17 18h04 ps
Environnement et playbooks **ansible** pour le projet **GSB 2024**
@ -23,8 +23,8 @@ Prérequis :
* **r-ext** : routage, NAT
* **s-proxy** : proxy **squid**
* **s-itil** : serveur GLPI
* **s-backup** : DNS esclave + sauvegarde s-win (SMB), Stork et Gotify
* **s-mon** : supervision avec **Nagios4/Zabbix**, notifications et journald
* **s-backup** : DNS esclave + sauvegarde s-win (SMB)
* **s-mon** : supervision avec **Nagios4**, notifications et syslog
* **s-fog** : deploiement postes de travail avec **FOG**
* **s-win** : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* **s-nxc** : NextCloud avec **docker** via proxy inverse **traefik** et certificat auto-signé

View File

@ -1,25 +0,0 @@
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
service:
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 101
gid: 65534
groups:
- nogroup
home: /run/sshd
shell: /usr/sbin/nologin
process:
sshd:
running: true

View File

@ -1,87 +1,59 @@
#package:
# systemd-journal-remote:
# installed: true
file:
/etc/nginx/sites-enabled/default:
exists: false
contents: []
/etc/nginx/sites-enabled/glpi:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
/var/www/html/glpi:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
contents: []
/var/www/html/glpicli:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: directory
contents: []
/var/www/html/glpicli/GLPI-Agent-1.7-x64.msi:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp:80:
listening: true
ip:
- 0.0.0.0
tcp:3306:
listening: true
ip:
- 127.0.0.1
tcp:9000:
listening: true
ip:
- 127.0.0.1
tcp:10050:
listening: true
ip:
- 0.0.0.0
/var/www/html/glpi:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
/var/www/html/glpicli:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: directory
/var/www/html/glpi/plugins:
exists: true
mode: "0777"
filetype: directory
/var/www/html/glpicli/GLPI-Agent-1.7-x64.msi:
exists: true
#mode: "0777"
filetype: file
/var/www/html/index.nginx-debian.html:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: file
service:
mariadb.service:
enabled: true
running: true
nginx:
enabled: true
running: true
php8.2-fpm.service:
enabled: true
running: true
ssh:
enabled: true
running: true
systemd-journal-upload:
enabled: true
running: true
zabbix-agent:
enabled: true
running: true
http:
http://s-itil.gsb.lan/:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
username: glpi
password: glpi
http://s-itil.gsb.lan/glpicli:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
mariadb:
enabled: true
running: true
nginx:
enabled: true
running: true
zabbix-agent:
enable: true
running: true
systemd-journal-upload.service:
enabled: true
running: true
port:
tcp:10050:
listening: true
ip:
- 0.0.0.0
tcp:10050:
listening: true
ip:
- '::'

View File

@ -1,93 +0,0 @@
file:
/etc/kea/kea-ctrl-agent.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/etc/kea/kea-dhcp4.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/tmp/kea4-ctrl-socket:
exists: true
mode: "0755"
size: 0
owner: _kea
group: _kea
filetype: socket
contains: []
contents: null
/usr/lib/x86_64-linux-gnu/kea:
exists: true
mode: "0755"
owner: root
group: root
filetype: directory
contents: []
package:
isc-kea-common:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-ctrl-agent:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-dhcp4:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-hooks:
installed: true
versions:
- 2.4.1-isc20231123184533
libmariadb3:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mariadb-common:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mysql-common:
installed: true
versions:
- 5.8+1.1.0
addr:
udp://172.16.64.254:67:
local-address: 127.0.0.1
reachable: true
timeout: 500
port:
tcp:8000:
listening: true
ip:
- 172.16.0.20
service:
isc-kea-ctrl-agent.service:
enabled: true
running: true
isc-kea-dhcp4-server.service:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.20/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.20/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 172.16.64.20/24
mtu: 1500

View File

@ -1,93 +0,0 @@
file:
/etc/kea/kea-ctrl-agent.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/etc/kea/kea-dhcp4.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/tmp/kea4-ctrl-socket:
exists: true
mode: "0755"
size: 0
owner: _kea
group: _kea
filetype: socket
contains: []
contents: null
/usr/lib/x86_64-linux-gnu/kea:
exists: true
mode: "0755"
owner: root
group: root
filetype: directory
contents: []
package:
isc-kea-common:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-ctrl-agent:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-dhcp4:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-hooks:
installed: true
versions:
- 2.4.1-isc20231123184533
libmariadb3:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mariadb-common:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mysql-common:
installed: true
versions:
- 5.8+1.1.0
addr:
udp://172.16.64.254:67:
local-address: 127.0.0.1
reachable: true
timeout: 500
port:
tcp:8000:
listening: true
ip:
- 172.16.0.21
service:
isc-kea-ctrl-agent.service:
enabled: true
running: true
isc-kea-dhcp4-server.service:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.21/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.21/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 172.16.64.21/24
mtu: 1500

View File

@ -1,38 +1,21 @@
addr:
tcp://192.168.102.1:80:
reachable: true
timeout: 500
tcp://192.168.102.2:80:
reachable: true
timeout: 500
service:
mariadb:
enabled: true
running: true
mysql:
enabled: true
running: true
user:
mysql:
exists: true
uid: 104
gid: 111
groups:
- mysql
home: /nonexistent
shell: /bin/false
group:
mysql:
exists: true
gid: 111
package:
mysql-server:
installed: true
versions:
- 5.5.54-0+deb8u1
command:
egrep "#bind-address" /etc/mysql/my.cnf:
exit-status: 0
stdout:
- "#bind-address\t\t= 127.0.0.1"
stderr: []
timeout: 10000
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.154/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.102.254/24
mtu: 1500
enp0s3:
exists: true
addrs:
- 192.168.99.13/24
enp0s8:
exists: true
addrs:
- 192.168.102.50/24

View File

@ -1,62 +1,63 @@
package:
apache2:
installed: true
versions:
- 2.4.57-2
nfs-common:
installed: true
versions:
- 1:2.6.2-4
apache2:
installed: true
versions:
- 2.4.10-10+deb8u7
php5:
installed: true
versions:
- 5.6.29+dfsg-0+deb8u1
port:
tcp6:80:
listening: true
ip:
- '::'
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
service:
apache2:
enabled: true
running: true
nfs-common:
enabled: false
running: false
apache2:
enabled: true
running: true
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 105
gid: 65534
groups:
- nogroup
home: /var/run/sshd
shell: /usr/sbin/nologin
command:
egrep 192.168.102.14:/export/www /etc/fstab:
exit-status: 0
stdout:
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
stderr: []
timeout: 10000
process:
apache2:
running: true
mount:
/var/www/html:
exists: true
opts:
- rw
- relatime
vfs-opts:
- rw
- vers=4.2
- rsize=131072
- wsize=131072
- namlen=255
- hard
- proto=tcp
- timeo=600
- retrans=2
- sec=sys
- clientaddr=192.168.102.1
- local_lock=none
- addr=192.168.102.253
source: 192.168.102.253:/home/wordpress
filesystem: nfs4
apache2:
running: true
sshd:
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.101/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.101.1/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.102.1/24
mtu: 1500
enp0s3:
exists: true
addrs:
- 192.168.99.11/24
enp0s8:
exists: true
addrs:
- 192.168.101.1/24
enp0s9:
exists: true
addrs:
- 192.168.102.1/24

View File

@ -1,62 +1,63 @@
package:
apache2:
installed: true
versions:
- 2.4.57-2
nfs-common:
installed: true
versions:
- 1:2.6.2-4
apache2:
installed: true
versions:
- 2.4.10-10+deb8u7
php5:
installed: true
versions:
- 5.6.29+dfsg-0+deb8u1
port:
tcp6:80:
listening: true
ip:
- '::'
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
service:
apache2:
enabled: true
running: true
nfs-common:
enabled: false
running: false
apache2:
enabled: true
running: true
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 105
gid: 65534
groups:
- nogroup
home: /var/run/sshd
shell: /usr/sbin/nologin
command:
egrep 192.168.102.14:/export/www /etc/fstab:
exit-status: 0
stdout:
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
stderr: []
timeout: 10000
process:
apache2:
running: true
mount:
/var/www/html:
exists: true
opts:
- rw
- relatime
vfs-opts:
- rw
- vers=4.2
- rsize=131072
- wsize=131072
- namlen=255
- hard
- proto=tcp
- timeo=600
- retrans=2
- sec=sys
- clientaddr=192.168.102.2
- local_lock=none
- addr=192.168.102.253
source: 192.168.102.253:/home/wordpress
filesystem: nfs4
apache2:
running: true
sshd:
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.102/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.101.2/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.102.2/24
mtu: 1500
enp0s3:
exists: true
addrs:
- 192.168.99.12/24
enp0s8:
exists: true
addrs:
- 192.168.101.2/24
enp0s9:
exists: true
addrs:
- 192.168.102.2/24

View File

@ -1,55 +1,28 @@
package:
haproxy:
installed: true
versions:
- 2.6.12-1+deb12u1
addr:
tcp://192.168.101.1:80:
reachable: true
timeout: 500
tcp://192.168.101.2:80:
reachable: true
timeout: 500
port:
tcp:80:
listening: true
ip:
- 192.168.100.10
tcp:80:
listening: true
ip:
- 192.168.100.11
service:
haproxy:
enabled: true
running: true
user:
haproxy:
exists: true
uid: 104
gid: 111
groups:
- haproxy
home: /var/lib/haproxy
shell: /usr/sbin/nologin
group:
haproxy:
exists: true
gid: 111
process:
haproxy:
running: true
haproxy:
enabled: true
running: true
sshd:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.100/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.100.10/24
mtu: 1500
http:
http://192.168.100.10/:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
enp0s3:
exists: true
addrs:
- 192.168.99.100/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.100.11/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.101.254/24
mtu: 1500

View File

@ -1,62 +1,92 @@
file:
/etc/systemd/system/systemd-journal-remote.service:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
/var/log/journal/remote:
exists: true
mode: "0755"
owner: systemd-journal-remote
group: systemd-journal-remote
filetype: directory
contents: []
package:
apache2:
installed: true
versions:
- 2.4.57-2
mariadb-server:
installed: true
versions:
- 1:10.11.4-1~deb12u1
systemd-journal-remote:
installed: true
versions:
- 252.19-1~deb12u1
apache2:
installed: true
zabbix-server-mysql:
installed: true
zabbix-frontend-php:
installed: true
zabbix-apache-conf:
installed: true
zabbix-sql-scripts:
installed: true
zabbix-agent:
installed: true
mariadb-server:
installed: true
python3-pymysql:
installed: true
systemd-journal-remote:
installed: true
file:
/etc/systemd/system/systemd-journal-remote.service:
exist: true
mode: "0777"
filetype: directory
/var/log/journal/remote:
exist: true
mode: "0777"
filetype: directory
port:
tcp:80:
listening: true
ip:
- 0.0.0.0
tcp:3306:
listening: true
ip:
- 127.0.0.1
tcp:10050:
listening: true
ip:
- 0.0.0.0
tcp:10051:
listening: true
ip:
- 0.0.0.0
tcp:19532:
listening: true
ip:
- '*'
service:
apache2:
enabled: true
running: true
mariadb.service:
enabled: true
running: true
systemd-journal-remote.socket:
enabled: true
running: true
zabbix-agent:
enabled: true
running: true
zabbix-server:
enabled: true
running: true
apache2:
enabled: true
running: true
zabbix-server:
enabled: true
running: true
zabbix-agent:
enabled: true
running: true
systemd-journal-remote.socket:
enabled: true
running: true
command:
sysctl net.ipv4.ip_forward:
exit-status: 0
stdout:
- net.ipv4.ip_forward = 0
stderr: []
timeout: 10000
process:
apache2:
running: true
zabbix_server:
running: true
mariadb:
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.8/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.8/24
mtu: 1500
enp0s3:
exists: true
addrs:
- 192.168.99.8/24
enp0s8:
exists: true
addrs:
- 172.16.0.8/24
http:
http://s-mon.gsb.lan/zabbix:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
http://localhost/zabbix:
status: 401
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []

View File

@ -1,55 +0,0 @@
file:
/home/wordpress:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
contents: []
package:
file:
installed: true
versions:
- 1:5.44-3
nfs-common:
installed: true
versions:
- 1:2.6.2-4
nfs-kernel-server:
installed: true
versions:
- 1:2.6.2-4
addr:
tcp://192.168.102.1:80:
reachable: true
timeout: 500
tcp://192.168.102.2:80:
reachable: true
timeout: 500
service:
nfs-common:
enabled: false
running: false
nfs-kernel-server:
enabled: true
running: true
nfs-mountd:
enabled: true
running: true
nfs-server:
enabled: true
running: true
nfs-utils:
enabled: true
running: false
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.153/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.102.253/24
mtu: 1500

View File

@ -1,145 +0,0 @@
file:
/root/nxc:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/certs:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/config:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/config/dynamic.yml:
exists: true
mode: "0644"
#size: 415
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/config/static.yml:
exists: true
mode: "0644"
#size: 452
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/docker-compose.yml:
exists: true
mode: "0644"
#size: 2135
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-debug.sh:
exists: true
mode: "0755"
#size: 64
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-prune.sh:
exists: true
mode: "0755"
#size: 110
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-start.sh:
exists: true
mode: "0755"
#size: 34
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-stop.sh:
exists: true
mode: "0755"
#size: 32
#owner: root
#group: root
filetype: file
contains: []
/usr/local/bin/mkcert:
exists: true
mode: "0755"
#size: 4788866
#owner: root
#group: root
filetype: file
contains: []
#addr:
#tcp://s-nxc.gsb.lan:443:
#reachable: true
#timeout: 500
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp:80:
listening: true
ip: []
tcp:443:
listening: true
ip: []
#tcp:8081:
#listening: true
#ip:
#- 0.0.0.0
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.7/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.7/24
mtu: 1500
http:
https://s-nxc.gsb.lan:
status: 200
allow-insecure: true
no-follow-redirects: false
timeout: 5000
body:
- Nextcloud

View File

@ -11,7 +11,7 @@ GITPRJ=gsb2024
apt-get update
apt-get install -y lighttpd git
STOREREP="/var/www/html/gsbstore"
SRC="${SRC:-http://depl.sio.lan/gsbstore}"
GLPIREL=10.0.11
str="wget -nc -4 https://github.com/glpi-project/glpi/releases/download/${GLPIREL}/glpi-${GLPIREL}.tgz"
@ -39,7 +39,7 @@ str7="wget -nc -4 https://github.com/goss-org/goss/releases/latest/download/dgos
str8="wget -nc -4 'https://gestsup.fr/index.php?page=download&channel=stable&version=3.2.30&type=gestsup' -O gestsup_3.2.30.zip"
#METRICBEAT ET FILEBEAT
ELKREL=8.11.4
ELKREL=8.11.3
str81="wget -nc -4 https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-amd64.deb"
str82="wget -nc -4 https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-windows-x86_64.zip"
str83="wget -nc -4 https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${ELKREL}-windows-x86_64.zip"
@ -50,12 +50,6 @@ str84="wget -nc -4 https://artifacts.elastic.co/downloads/beats/metricbeat/metri
(cat <<EOT > "${STOREREP}/getall"
#!/bin/bash
if [[ -z "${SRC+x}" ]]; then
echo "erreur : variable SRC indefinie"
echo " SRC : URL serveur deploiement"
echo "export SRC=http://depl.sio.adm/gsbstore ; ./$0"
exit 1
fi
${str}
${str31}
@ -78,7 +72,6 @@ ${str81}
${str82}
${str83}
${str84}
wget -nc -4 "${SRC}/zabbix.sql.gz" -O zabbix.sql.gz
EOT
)

View File

@ -24,7 +24,6 @@
192.168.99.16 s-fog.gsb.adm
192.168.99.20 s-kea1.gsb.adm
192.168.99.21 s-kea2.gsb.adm
192.168.99.22 s-awx.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm

View File

@ -23,7 +23,6 @@
192.168.99.14 s-nas.gsb.adm
192.168.99.20 s-kea1.gsb.adm
192.168.99.21 s-kea2.gsb.adm
192.168.99.22 s-awx.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2024011900 ; Serial
2024011500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -16,11 +16,9 @@ $TTL 604800
@ IN A 127.0.0.1
@ IN AAAA ::1
s-infra IN A 172.16.0.1
s-backup IN A 172.16.0.4
s-proxy IN A 172.16.0.2
s-appli IN A 172.16.0.3
s-backup IN A 172.16.0.4
s-stork IN A 172.16.0.4
s-gotify IN A 172.16.0.4
s-win IN A 172.16.0.6
s-mess IN A 172.16.0.7
s-nxc IN A 172.16.0.7
@ -31,7 +29,6 @@ s-elk IN A 172.16.0.11
s-gestsup IN A 172.16.0.17
s-kea1 IN A 172.16.0.20
s-kea2 IN A 172.16.0.21
s-awx IN A 172.16.0.22
r-int IN A 172.16.0.254
r-int-lnk IN A 192.168.200.254
r-ext IN A 192.168.200.253

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2024011800 ; Serial
2024011500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -23,7 +23,6 @@ $TTL 604800
9.0 IN PTR s-itil.gsb.lan.
20.0 IN PTR s-kea1.gsb.lan.
21.0 IN PTR s-kea2.gsb.lan.
22.0 IN PTR s-awx.gsb.lan.
101.1 IN PTR s-web1
101.2 IN PTR s-web2
100.10 IN PTR s-lb

Binary file not shown.

View File

@ -1,16 +1,16 @@
---
- name: on recupere getdocker
get_url:
url: http://s-adm.gsb.adm/gsbstore/getdocker.sh
dest: /usr/local/bin
- name: Supprime le fichier getdocker.sh si déjà présent
file:
state: absent
path: /tmp/getdocker.sh
- name: on verifie si docker est installe
stat:
path: /usr/bin/docker
#command: which docker
register: docker_present
- name: Télécharge le script d'installation de docker
uri:
url: 'https://get.docker.com'
method: GET
dest: /tmp/getdocker.sh
mode: a+x
register: result
- name: Execution du script getdocker si docker n'est pas deja installe
shell: bash /usr/local/bin/getdocker.sh
#when: docker_present.stdout.find('/usr/bin/docker') == -1
when: not docker_present.stat.exists
- name: Execution du script getdocker
shell: bash /tmp/getdocker.sh

View File

@ -1 +1 @@
BEATVER: "8.11.4"
BEATVER: "8.11.5"

View File

@ -8,11 +8,6 @@
apt:
deb: /tmp/filebeat-${BEATVEAR}-amd64.deb
- name: sorie pou debug
fail:
msg: "packet installe"
- name: Changement du fichier de conf
copy:
src: filebeat.yml

View File

@ -1,22 +1,9 @@
# Le rôle elk
ELK Version 8.5.3
## Principe du rôle elk
ELK 8.5.3
Ce rôle a pour but d'installer un serveur ELK pour centraliser les logs et les métriques pour simplifier la gestion du parc informatique GSB.
Le rôle **elk** installe **docker**, les différentes tâches de ce rôle sont de :
Ce rôle permet de créer un serveur ELK pour centraliser les logs et de des métriques pour simplifier la gestion du parc informatique GSB.
Le principe de ce rôle est d'installer docker, les différentes tâches de ce rôle sont de :
- Vérifier si ELK est déjà installé,
- clonage du depot **devianthony** depuis github,
- Changement de la configuration pour passer en version 'basic'
- Importation un docker-compose depuis github,
- Changement la configuration pour passer en version 'basic'
- Lancement d'ELK avec docker-compose
## Lancement manuel
- depuis le répertoire **nxc** :
````shell
docker compose up setup
docker compose up -d
````

View File

@ -21,7 +21,7 @@
regexp: 'xpack.license.self_generated.type: trial'
replace: 'xpack.license.self_generated.type: basic'
# - name: Execution du fichier docker-compose.yml
# shell: docker compose pull
# args:
# chdir: /root/elk
- name: Execution du fichier docker-compose.yml
shell: docker compose up -d
args:
chdir: /root/elk

View File

@ -1,76 +1,6 @@
Configuration de ferm
# [Ferm](http://ferm.foo-projects.org/)
Modifier l'execution d'iptables [plus d'info ici#!/bin/bash
set -u
set -e
# Version Site to Site
AddressAwg=10.0.0.1/32 # Adresse VPN Wireguard cote A
EndpointA=192.168.0.51 # Adresse extremite A
PortA=51820 # Port ecoute extremite A
NetworkA=192.168.1.0/24 # reseau cote A
NetworkC=192.168.200.0/24 #reseau cote A
NetworkD=172.16.0.0/24 #reseau cote A
AddressBwg=10.0.0.2/32 # Adresse VPN Wireguard cote B
EndpointB=192.168.0.52 # Adresse extremite B
PortB=51820 # Port ecoute extremite B
NetworkB=172.16.128.0/24 # reseau cote B
umask 077
wg genkey > endpoint-a.key
wg pubkey < endpoint-a.key > endpoint-a.pub
wg genkey > endpoint-b.key
wg pubkey < endpoint-b.key > endpoint-b.pub
PKA=$(cat endpoint-a.key)
pKA=$(cat endpoint-a.pub)
PKB=$(cat endpoint-b.key)
pKB=$(cat endpoint-b.pub)
cat <<FINI > wg0-a.conf
# local settings for Endpoint A
[Interface]
PrivateKey = $PKA
Address = $AddressAwg
ListenPort = $PortA
# IP forwarding
PreUp = sysctl -w net.ipv4.ip_forward=1
# remote settings for Endpoint B
[Peer]
PublicKey = $pKB
Endpoint = ${EndpointB}:$PortB
AllowedIPs = $AddressBwg, $NetworkB
FINI
cat <<FINI > wg0-b.conf
# local settings for Endpoint B
[Interface]
PrivateKey = $PKB
Address = $AddressBwg
ListenPort = $PortB
# IP forwarding
PreUp = sysctl -w net.ipv4.ip_forward=1
# remote settings for Endpoint A
[Peer]
PublicKey = $pKA
Endpoint = ${EndpointA}:$PortA
AllowedIPs = $AddressAwg, $NetworkA, $NetworkC, $NetworkD
FINI
echo "wg0-a.conf et wg0-b.conf sont generes ..."
echo "copier wg0-b.conf sur la machine b et renommer les fichiers de configuration ..."](https://wiki.debian.org/iptables)
Modifier l'execution d'iptables [plus d'info ici](https://wiki.debian.org/iptables)
```shell
update-alternatives --set iptables /usr/sbin/iptables-legacy
```

View File

@ -4,6 +4,7 @@
@def $DEV_PRIVATE = enp0s8;
@def $DEV_WORLD = enp0s9;
@def $DEV_WORLD = enp0s9;
@def $DEV_VPN= wg0;
@def $NET_PRIVATE = 172.16.0.0/24;
@ -31,7 +32,7 @@ table filter {
# well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT;
# we provide DNS services for the internal net
# we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;

View File

@ -29,7 +29,7 @@ table filter {
# well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT;
# we provide DNS services for the internal net
# we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;

View File

@ -1,50 +0,0 @@
---
- name: Mise a jour apt cache
apt:
update_cache: yes
- name: Creation /etc/gotify
ansible.builtin.file:
path: /etc/gotify
state: directory
mode: '0755'
- name: Creation /opt/gotify
ansible.builtin.file:
path: /opt/gotify
state: directory
mode: '0755'
- name: installation de gotify
get_url:
url: "https://github.com/gotify/server/releases/latest/download/gotify-linux-amd64.zip"
dest: "/tmp/gotify.zip"
- name: Extraction de Gotify
ansible.builtin.unarchive:
src: "/tmp/gotify.zip"
dest: "/opt/gotify"
become: yes
- name: Creation du fichier systemd
template:
src: "gotify.service.j2"
dest: "/etc/systemd/system/gotify.service"
become: yes
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Creation du fichier conf gotify
template:
src: "config.yml.j2"
dest: "/etc/gotify/config.yml"
become: yes
- name: Demarage du gotify
systemd:
name: gotify
state: started
enabled: yes

View File

@ -1,4 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: "" # the address to bind on, leave empty to bind on all addresses
port: 8008

View File

@ -1,13 +0,0 @@
[Unit]
Description=Gotify Server
After=network.target
[Service]
Type=simple
User=root
ExecStart=/opt/gotify/gotify-linux-amd64
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -1,21 +1,14 @@
# Rôle Kea
***
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
Rôle du Kea pour la haute disponibilité dhcp
## Tables des matières
1. [Que fait le rôle Kea ?]
2. [Installation et configuration de ka]
3. [Remarques]
## Que fait le rôle Kea ?
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
Il permet de configurer les serveur kea en mode haute disponibilité.
### Installation et configuration de kea
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
### Remarquees ###
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.
Le rôle kea va installer les packets kea dhcp4, hook, admin une fois les packets installer. Nous allons configurer les 2 serveurs kea pour qu'il distribut les ip de n-user et soit en haute disponibilité.

View File

@ -1,8 +1,8 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"
kea_ver: "2.4.1"
kea_dbname: ""
kea_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -1,66 +0,0 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.0.20",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -1,226 +0,0 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea1.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -1,12 +1,5 @@
---
- name: Restart isc-kea-dhcp4-server
ansible.builtin.service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: Restart isc-kea-ctrl-agent
ansible.builtin.service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes
- name: restart zabbix agent
service:
name: zabbix-agent
state: restarted
enabled: yes

View File

@ -1,43 +1,65 @@
---
- name: installation des dépendances
apt:
name:
- liblog4cplus-2.0.5
- libmariadb3
- libpq5
- mariadb-common
- mysql-common
state: present
- name: Preparation
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/kea-2-4/setup.deb.sh' | sudo -E bash
- name: telechargemement du paquet isc-kea-common
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-common_2.4.1-isc20231123184533/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-dhcp4
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-dhcp4_2.4.1-isc20231123184533/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: Update apt
ansible.builtin.apt:
update_cache: yes
- name: telechargement du paquet isc-kea-ctrl-agent
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-ctrl-agent_2.4.1-isc20231123184533/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
#- name: Installation paquet isc-kea-common
# ansible.builtin.apt:
# deb: isc-kea-common
# state: present
- name: telechargement du paquet isc-kea-hooks
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-hooks_2.4.1-isc20231123184533/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: Update apt
apt:
update_cache: yes
- name: Installation paquet isc-kea-common
apt:
deb: "/tmp/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-dhcp4
ansible.builtin.apt:
name: isc-kea-dhcp4-server
state: present
- name: Installation isc-kea-ctrl-agent
ansible.builtin.apt:
name: isc-kea-ctrl-agent
state: present
- name: Installation isc-kea-dhcp4
apt:
deb: "/tmp/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
ansible.builtin.apt:
name: isc-kea-hooks
state: present
- name: Installation isc-kea-ctrl-agent
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Generation ---- du fichier de configuration kea-ctrl-agent
ansible.builtin.template:
src: kea-ctrl-agent.conf.j2
dest: /etc/kea/kea-ctrl-agent.conf
notify:
- Restart isc-kea-ctrl-agent
- name: Generation du fichier de configuration kea-dhcp4.conf
ansible.builtin.template:
src: kea-dhcp4.conf.j2
dest: /etc/kea/kea-dhcp4.conf
notify:
- Restart isc-kea-dhcp4-server
- name: Installation isc-kea-ctrl-agent
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present

View File

@ -1,32 +0,0 @@
{
"Control-agent":
{
"http-host": "{{ kea_ctrl_address_this }}",
"http-port": 8000,
"control-sockets":
{
"dhcp4":
{
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
},
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
"flush": true,
"maxsize": 204800,
"maxver": 4,
{% raw %} "pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n", {% endraw %}
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -1,226 +0,0 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": ["{{ kea_dhcp_int }}"]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "{{ kea_this_server }}",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "{{ kea_srv1 }}",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://{{ kea_ctrl_address1 }}:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "{{ kea_srv2 }}",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://{{ kea_ctrl_address2 }}:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -1,10 +0,0 @@
# Role lb-bd
***
Rôle lb-bd pour la mise en place de la base de données du serveur WordPress.
## Tables des matières
1. Que fait le rôle lb-bd ?
## Que fait le rôle lb-bd ?
Ce rôle installe le paquet `mariadb-server` puis créé et configure la base de données nommée **wordpressdb** en ouvrant le port 3306 et en créant l'utilisateur MySQL nommé **wordpressuser** avec le mot de passe **wordpresspasswd**.

View File

@ -1,22 +0,0 @@
# Rôle lb-front
***
Rôle lb-front pour la répartition de charge des serveurs web sur WordPress avec HAProxy
## Tables des matières
1. Que fait le rôle lb-front ?
2. Ordre d'installation des serveurs.
## Que fait le rôle lb-front ?
Le rôle lb-front va installer `haproxy` pour le load balancing/la répartition de charge et va configurer le fichier `/etc/haproxy/haproxy.cfg`.
le fichier va faire du Round-Robin, un algoritme qui va équilibrer le nombre de requêtes entre s-lb-web1 et s-lb-web2.
le site web est accessibe à l'adresse <http://s-lb.gsb.adm>.
## Ordre d'installation des serveurs.
1. Le serveur s-lb avec haproxy qui va "initialiser" les sous-réseaux dans la DMZ.
2. Le serveur s-lb-bd qui va contenir la base de données WordPress utilisée par les serveurs web.
3. Le serveur s-nas qui va stocker la configuration WordPress et la partager aux serveurs web en NFS. Il va aussi utiliser la base de données sur stockée s-lb-bd.
4. Les serveurs s-web1 et s-web2 qui vont installer Apache2, PHP et afficher le serveur WordPress.

View File

@ -0,0 +1,23 @@
port:
tcp:80:
listening: true
ip:
- 192.168.100.11
service:
haproxy:
enabled: true
running: true
sshd:
enabled: true
running: true
interface:
enp0s8:
exists: true
addrs:
- 192.168.100.11/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.101.254/24
mtu: 1500

View File

@ -41,7 +41,7 @@ frontend proxypublic
backend fermeweb
balance roundrobin
option httpclose
option httpchk HEAD / HTTP/1.0
#option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check

View File

@ -14,7 +14,7 @@
backend fermeweb
balance roundrobin
option httpclose
option httpchk HEAD / HTTP/1.0
#option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check

View File

@ -1,10 +1,3 @@
# Rôle lb-nfs-client
***
Rôle lb-nfs-client pour l'accès au serveur NFS sur les serveurs lb-web1 et lb-web2.
## Tables des matières
1. Que fait le rôle lb-nfs-client ?
## Que fait le rôle lb-nfs-client ?
Ce rôle sert à installer le paquet `nfs-common` et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.
##Partage NFS
Ce rôle sert à installer nfs et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.

View File

@ -1,17 +1,10 @@
# Role lb-nfs-server
***
Rôle lb-nfs-server pour la mise en place du partage des fichiers de configuration de WordPress.
## Tables des matières
1. Que fait le rôle lb-nfs-server ?
## Que fait le rôle lb-nfs-server ?
# Role s-nas-server
## Installation de nfs-server et mise en oeuvre du partage /home/wordpress
Ce rôle :
* installe le paquet `nfs-server`
* installe **nfs-server**
* copie le fichier de configuration **exports** pour exporter le répertoire **/home/wordpress**
* décompresse WordPress dans **/home/wordpress**
* relance le service `nfs-server`
* Configure l'accès de WordPress à la base de données dans le fichier `wp-config.php`
Le répertoire **/home/wordpress** est exporté par NFS dans le sous-réseau **n-dmz-db**
* relance le service **nfs-server**
* décompresse wordpress
### Objectif
Le répertoire **/home/wordpress** est exporté par **nfs** sur le réseau **n-dmz-db**

View File

@ -16,7 +16,7 @@
- name: 20 - decompresse wordpress
unarchive:
src: http://s-adm.gsb.adm/gsbstore/wordpress-6.4.2-fr_FR.tar.gz
src: https://fr.wordpress.org/latest-fr_FR.tar.gz
dest: /home/
remote_src: yes

View File

@ -1,12 +1,3 @@
# Rôle lb-web
***
Rôle lb-web pour l'affichage et l'utilisation du site web.
## Tables des matières
1. Que fait le rôle lb-web ?
## Que fait le rôle lb-web ?
Ce rôle télécharge les paquets nécessaires au fonctionnement du site web (`apache2`, `php` et `mariadb-client`) qui permetront aux serveurs web d'accerder a la base de données de WordPress.
Le site web est accessibe à l'adresse http://s-lb.gsb.adm.
##Téléchargement et configuration de WordPress
Ce rôle télécharge wordpress depuis s-adm puis configure le fichier wp-config.php pour la situation du gsb.

View File

@ -1,2 +1,2 @@
depl_url: "http://s-adm.gsb.adm/gsbstore/"
depl_wordpress: "wordpress-6.4.2-fr_FR.tar.gz"
depl_wordpress: "wordpress-6.1.1-fr_FR.tar.gz"

View File

@ -1,16 +1,8 @@
# Installation de Nextcloud et du proxy inverse Traefik
## Explication de l'installation de Nextcloud
Afin de pouvoir faire fonctionner Nextcloud et Traefik, il faut mettre en place docker. Dans un premier plan, il vas donc falloir lancer le script **getall** sur **s-adm**. Ensuite dans un second temps, il faudra etre dans le fichier **/nxc** sur **s-nxc** et lancer **docker-compose.yaml**. Pour finir, il faudra ajouter l'authentification LDAP au nextcloud grace a l'AD de **s-win**.
Nextcloud et Traefik fonctionnent grâce à docker. Pour pouvoir faire fonctionner ce playbook, docker doit être installé.
# <p align="center">Procédure d'installation</p>
***
## 1. Installation docker
Voir: https://gitea.lyc-lecastel.fr/gsb/gsb2024/src/branch/main/roles/docker
## 2. Fonctionnement du playbook s-nxc
## 1.
Le playbook crée le dossier **nxc** à la racine de root.
@ -18,11 +10,11 @@ Les fichiers "nextcloud.yml" et "traefik.yml" y seront copiés depuis le répert
Enfin, dans le répertoire nxc, sont créés les répertoires **certs** et **config**.
### 2.1 Copie des fichiers
## 2. Copie des fichiers
Le playbook copie les fichiers placés dans "files" et les places dans les bons répertoires.
Le playbook copie les fichiers placés dans "files" et les placer dans les bons répertoires.
### 2.2 Génération du certificat
## 3. Génération du certificat
Le playbook crée un certificat **x509** grâce à **mkcert**, il s'agit d'une solution permettant de créer des certificats auto-signés. Pour cela, il télécharge **mkcert** sur **s-adm** (utiliser le script **getall**).
@ -33,7 +25,7 @@ Pour créer le certificat, le playbook exécute les commandes (lancé depuis nxc
/usr/local/bin/mkcert -install # Installe mkcert
/usr/local/bin/mkcert -key-file key.pem -cert-file cert.pem "hôte.domaine.local" "*.domaine.local" #Crée le certificat le DNS spécifié
```
## 3. Lancement
## 4. Lancement
Le playbook lance les fichiers "docker-compose" à savoir : nextcloud.yml et traefik.yml qui démarrent les deux piles **docker**.
@ -45,28 +37,22 @@ ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin
Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan
## 4. Ajout authentification LDAP
## 5. Ajout authentification LDAP
Pour ajouter l'authentification LDAP au Nextcloud, depuis **n-user** il faut :
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et "Application"
Pour ajouter l'authentification LDAP au Nextcloud, il faut :
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et Application
* Dans vos applications, descendre et activer "LDAP user and group backend"
* Puis cliquer sur le profil, puis "Paramètres d'administration" et dans "Administration" cliquer sur "Intégration LDAP/AD"
* Puis cliquer sur le profil, puis Paramètres d'administration et dans Administration cliquer sur Intégration LDAP/AD
* Une fois sur la page d'intégration LDAP/AD :
* Dans Hôte mettre :
> **ldap://s-win.gsb.lan**
* Cliquer sur "Détecter le port" (normalement le port 389 apparait)
> ldap://s-win.gsb.lan
* Cliquer sur Détecter le port (normalement le port 389 apparait)
* Dans DN Utilisateur mettre :
> **CN=nextcloud,CN=Users,DC=gsb,DC=lan**
> CN=nextcloud,CN=Users,DC=GSB,DC=LAN
* Mot de passe :
> **Azerty1+**
* Et dans "Un DN de base par ligne" :
> **DC=gsb,DC=lan**
* Cliquer sur "Détecter le DN de base" (normalement il apparaitra automatiquement)
* Après la configuration réaliser, cliquer sur "Continuer", puis cliquer 3 fois sur continuer
* Une fois arrivé sur "Groupes", vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.
## Contributeurs
- LG
- CH
> Azerty1+
* Et dans Un DN de base par ligne :
> DC=GSB,DC=LAN
* Après la configuration passe OK
* Une fois la configuration finie, cliquer 3 fois sur continuer
* Une fois arrivé sur Groupes, vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.

View File

@ -53,8 +53,8 @@ services:
image: nextcloud
container_name: app
restart: always
#ports:
#- 8081:80
ports:
- 8081:80
#links:
depends_on:
- db

View File

@ -1,22 +0,0 @@
#!/bin/bash
# Mettre le serveur NextCloud en mode maintenance
docker compose exec -u www-data app php occ maintenance:mode --on
# Extraire les dossiers de sauvegarde
cd /root/nxc
# Copie locale de la sauvegarde
rsync -Aavx nextcloud/ nextcloud-dirbkp/
# Base de données MySQL/MariaDB
docker compose exec db mysqldump -u nextcloud -pAzerty1+ nextcloud > nextcloud-sqlbkp.bak
# Sortir du mode maintenance
docker compose exec -u www-data app php occ maintenance:mode --off
# création d'une archive
tar cvfz nxc.tgz nextcloud-sqlbkp.bak nextcloud-dirbkp

View File

@ -21,11 +21,11 @@
- name: Copie de dynamic.yml
copy:
src: dynamic.yml
src: dynamic.yml
dest: /root/nxc/config
- name: Copie de docker-compose.yml
copy:
copy:
src: docker-compose.yml
dest: /root/nxc
@ -69,14 +69,8 @@
args:
chdir: /root/nxc
- name: vérification si le réseau proxy existe
command: docker network ls --filter name=proxy
register: net_proxy
- name: création du réseau proxy
- name: Creation reseau docker proxy
command: docker network create proxy
# when: net_proxy.stdout.find('proxy') == -1
when: "'proxy' not in net_proxy.stdout"
#- name: Démarrage du docker-compose...
#command: /bin/bash docker-compose up -d

View File

@ -1,14 +0,0 @@
# Rôle Kea
***
Rôle du Kea pour la haute disponibilité dhcp
## Tables des matières
1. [Que fait le rôle Kea ?]
## Que fait le rôle Kea ?
Il permet de configurer les serveur kea en mode haute disponibilité.
### Installation et configuration de kea
Le rôle kea va installer les packets kea dhcp4, hook, admin une fois les packets installer. Nous allons configurer les 2 serveurs kea pour qu'il distribut les ip de n-user et soit en haute disponibilité.

View File

@ -1,8 +0,0 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -1,66 +0,0 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.64.20",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -1,226 +0,0 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea1.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -1,18 +0,0 @@
---
- name: restart isc-kea-dhcp4-server
service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: restart isc-kea-ctrl-agent
service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes
- name: restart mariadb-server
service:
name: mariadb-server
state: restarted
enabled: yes

View File

@ -1,75 +0,0 @@
---
- name: installation des dépendances
apt:
name:
- liblog4cplus-2.0.5
- libmariadb3
- libpq5
- mariadb-common
- mysql-common
state: present
- name: telechargemement du paquet isc-kea-common
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-common_2.4.1-isc20231123184533/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-dhcp4
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-dhcp4_2.4.1-isc20231123184533/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-ctrl-agent
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-ctrl-agent_2.4.1-isc20231123184533/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-hooks
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-hooks_2.4.1-isc20231123184533/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: Update apt
apt:
update_cache: yes
- name: Installation paquet isc-kea-common
apt:
deb: "/tmp/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-dhcp4
apt:
deb: "/tmp/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-ctrl-agent
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
apt:
deb: "/tmp/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Copie du repertoire des hooks dans le repertoire /usr/local/bin/kea/hooks
copy:
src: /usr/lib/x86_64-linux-gnu/kea/
dest: /usr/local/lib/kea/
- name: Copie du fichier de configuration kea-dhcp4.conf
copy:
src: kea-dhcp4.conf
dest: /etc/kea/kea-dhcp4.conf
notify:
- restart isc-kea-dhcp4-server
- name: Copie du fichier de configuration kea-ctrl-agent
copy:
src: kea-ctrl-agent.conf
dest: /etc/kea/kea-ctrl-agent.conf
notify:
- restart isc-kea-ctrl-agent

View File

@ -1,14 +0,0 @@
# Rôle Kea
***
Rôle du Kea pour la haute disponibilité dhcp
## Tables des matières
1. [Que fait le rôle Kea ?]
## Que fait le rôle Kea ?
Il permet de configurer les serveur kea en mode haute disponibilité.
### Installation et configuration de kea
Le rôle kea va installer les packets kea dhcp4, hook, admin une fois les packets installer. Nous allons configurer les 2 serveurs kea pour qu'il distribut les ip de n-user et soit en haute disponibilité.

View File

@ -1,8 +0,0 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -1,66 +0,0 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.64.21",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -1,226 +0,0 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea2.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -1,18 +0,0 @@
---
- name: restart isc-kea-dhcp4-server
service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: restart isc-kea-ctrl-agent
service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes
- name: restart mariadb-server
service:
name: mariadb-server
state: restarted
enabled: yes

View File

@ -1,75 +0,0 @@
---
- name: installation des dépendances
apt:
name:
- liblog4cplus-2.0.5
- libmariadb3
- libpq5
- mariadb-common
- mysql-common
state: present
- name: telechargemement du paquet isc-kea-common
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-common_2.4.1-isc20231123184533/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-dhcp4
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-dhcp4_2.4.1-isc20231123184533/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-ctrl-agent
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-ctrl-agent_2.4.1-isc20231123184533/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-hooks
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-hooks_2.4.1-isc20231123184533/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: Update apt
apt:
update_cache: yes
- name: Installation paquet isc-kea-common
apt:
deb: "/tmp/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-dhcp4
apt:
deb: "/tmp/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-ctrl-agent
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
apt:
deb: "/tmp/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Copie du repertoire des hooks dans le repertoire /usr/local/bin/kea/hooks
copy:
src: /usr/lib/x86_64-linux-gnu/kea/
dest: /usr/local/lib/kea/
- name: Copie du fichier de configuration kea-dhcp4.conf
copy:
src: kea-dhcp4.conf
dest: /etc/kea/kea-dhcp4.conf
notify:
- restart isc-kea-dhcp4-server
- name: Copie du fichier de configuration kea-ctrl-agent
copy:
src: kea-ctrl-agent.conf
dest: /etc/kea/kea-ctrl-agent.conf
notify:
- restart isc-kea-ctrl-agent

View File

@ -7,15 +7,15 @@ iface lo inet loopback
# carte n-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.102/24
address 192.168.99.101/24
# Réseau n-dmz-lb
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.101.2/24
address 192.168.101.1/24
# réseau n-dmz-db
allow-hotplug enp0s9
iface enp0s9 inet static
address 192.168.102.2/24
address 192.168.102.1/24
post-up mount -o rw 192.168.102.253:/home/wordpress /var/www/html

View File

@ -1,23 +0,0 @@
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
#source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# cote n-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.22/24
gateway 192.168.99.99
# Cote n-infra
allow-hotplug enp0s8
iface enp0s8 inet static
address 172.16.0.22/24
up ip route add 172.16.64.0/24 via 172.16.0.254
up ip route add 172.16.128.0/24 via 172.16.0.254
up ip route add 192.168.0.0/16 via 172.16.0.254
up ip route add 192.168.200.0/24 via 172.16.0.254

View File

@ -1,10 +0,0 @@
#!/bin/bash
# envoie sur s-backup
BACKUP=/home/backup/s-nxc
# Préparation des dossiers qui vont accueillir les données à sauvegarder (-e lance le répertoire si il existe)
[[ -e "${BACKUP}" ]] || mkdir -p "${BACKUP}"
# Sauvegarde du fichier nxc.tgz vers la machine s-backup
scp -i ~/.ssh/id_rsa_sbackup root@s-nxc.gsb.adm:/root/nxc/nxc.tgz "${BACKUP}/"

View File

@ -14,14 +14,6 @@
group: root
mode: '0755'
- name: copie script backupnxc dans /usr/local/bin
copy:
src: backupnxc.sh
dest: /usr/local/bin
owner: root
group: root
mode: '0755'
- name: crontab backupsmb ( commentee par defaut )
cron:
name: backupsmb

View File

@ -1 +0,0 @@
###Génération de clé publique et privée###

View File

@ -1,20 +0,0 @@
---
- name: on genere une cle privee pour s-backup
openssh_keypair:
path: /root/id_rsa_sbackup
type: rsa
state: present
- name: copie cle publique dans gsbstore
copy:
src: /root/id_rsa_sbackup.pub
dest: /var/www/html/gsbstore
mode: 0644
remote_src: yes
- name: copie cle privee dans gsbstore
copy:
src: /root/id_rsa_sbackup
dest: /var/www/html/gsbstore
mode: 0600
remote_src: yes

View File

@ -1,13 +0,0 @@
---
- name: creation .ssh
file:
path: ~/.ssh
state: directory
mode: 0700
- name: recuperation de la cle privee generee par s-adm
get_url:
url: http://s-adm.gsb.adm/gsbstore/id_rsa_sbackup
dest: /root/.ssh/id_rsa_sbackup
mode: 0600

View File

@ -1,6 +0,0 @@
---
- name: recuperation de la cle publique generee par s-adm
ansible.posix.authorized_key:
user: root
state: present
key: http://s-adm.gsb.adm/gsbstore/id_rsa_sbackup.pub

View File

@ -1,21 +0,0 @@
# Rôle Kea
***
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
## Tables des matières
1. [Que fait le rôle Kea ?]
2. [Installation et configuration de ka]
3. [Remarques]
## Que fait le rôle Kea ?
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
### Installation et configuration de kea
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
### Remarquees ###
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.

View File

@ -1,7 +0,0 @@
---
- name: Restart isc-stork-agent
ansible.builtin.service:
name: isc-stork-agent.service
state: restarted
enabled: yes

View File

@ -1,21 +0,0 @@
---
- name: Preparation
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.deb.sh' | sudo bash
- name: Update apt
ansible.builtin.apt:
update_cache: yes
- name: Installation isc-stork-agent
ansible.builtin.apt:
name: isc-stork-agent
state: present
- name: Generation du fichier de configuration agent.env
ansible.builtin.template:
src: agent.env.j2
dest: /etc/stork/agent.env
notify:
- Restart isc-stork-agent

View File

@ -1,45 +0,0 @@
### the IP or hostname to listen on for incoming Stork server connections
STORK_AGENT_HOST={{ stork_host }}
### the TCP port to listen on for incoming Stork server connections
STORK_AGENT_PORT={{ stork_port }}
### listen for commands from the Stork server only, but not for Prometheus requests
# STORK_AGENT_LISTEN_STORK_ONLY=true
### listen for Prometheus requests only, but not for commands from the Stork server
# STORK_AGENT_LISTEN_PROMETHEUS_ONLY=true
### settings for exporting stats to Prometheus
### the IP or hostname on which the agent exports Kea statistics to Prometheus
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_ADDRESS=
### the port on which the agent exports Kea statistics to Prometheus
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_PORT=
### how often the agent collects stats from Kea, in seconds
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_INTERVAL=
## enable or disable collecting per-subnet stats from Kea
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_PER_SUBNET_STATS=true
### the IP or hostname on which the agent exports BIND 9 statistics to Prometheus
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_ADDRESS=
### the port on which the agent exports BIND 9 statistics to Prometheus
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_PORT=
### how often the agent collects stats from BIND 9, in seconds
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_INTERVAL=
### Stork Server URL used by the agent to send REST commands to the server during agent registration
STORK_AGENT_SERVER_URL=http://s-backup.gsb.lan:8080/
### skip TLS certificate verification when the Stork Agent connects
### to Kea over TLS and Kea uses self-signed certificates
# STORK_AGENT_SKIP_TLS_CERT_VERIFICATION=true
### Logging parameters
### Set logging level. Supported values are: DEBUG, INFO, WARN, ERROR
# STORK_LOG_LEVEL=DEBUG
### disable output colorization
# CLICOLOR=false
### path to the hook directory
# STORK_AGENT_HOOK_DIRECTORY=

View File

@ -1,21 +0,0 @@
# Rôle Kea
***
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
## Tables des matières
1. [Que fait le rôle Kea ?]
2. [Installation et configuration de ka]
3. [Remarques]
## Que fait le rôle Kea ?
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
### Installation et configuration de kea
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
### Remarquees ###
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.

View File

@ -1,8 +0,0 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -1,6 +0,0 @@
---
- name: Restart isc-stork-server.service
ansible.builtin.service:
name: isc-stork-server.service
state: restarted
enabled: yes

View File

@ -1,31 +0,0 @@
---
- name: Preparation
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.deb.sh' | sudo bash
- name: Update apt
ansible.builtin.apt:
update_cache: yes
#- name: Installation paquet isc-kea-common
# ansible.builtin.apt:
# deb: isc-kea-common
# state: present
- name: Installation isc-stork-server postgresql
ansible.builtin.apt:
pkg:
- isc-stork-server
- postgresql-15
- name: lancer la commande de création de la base de donnees stork
ansible.builtin.shell: su postgres --command "stork-tool db-create --db-name {{ stork_db_name }} --db-user {{ stork_db_user }} --db-password {{ stork_db_passwd }}"
- name: Generation ---- du fichier de configuration server.env
ansible.builtin.template:
src: server.env.j2
dest: /etc/stork/server.env
notify:
- Restart isc-stork-server.service

View File

@ -1,52 +0,0 @@
### database settings
### the address of a PostgreSQL database
STORK_DATABASE_HOST=localhost
### the port of a PostgreSQL database
STORK_DATABASE_PORT=5432
### the name of a database
STORK_DATABASE_NAME={{ stork_db_name }}
### the username for connecting to the database
STORK_DATABASE_USER_NAME={{ stork_db_user }}
### the SSL mode for connecting to the database
### possible values: disable, require, verify-ca, or verify-full
# STORK_DATABASE_SSLMODE=
### the location of the SSL certificate used by the server to connect to the database
# STORK_DATABASE_SSLCERT=
### the location of the SSL key used by the server to connect to the database
# STORK_DATABASE_SSLKEY=
### the location of the root certificate file used to verify the database server's certificate
# STORK_DATABASE_SSLROOTCERT=
### the password for the username connecting to the database
### empty password is set to avoid prompting a user for database password
STORK_DATABASE_PASSWORD={{stork_db_passwd }}
### REST API settings
### the IP address on which the server listens
# STORK_REST_HOST=
### the port number on which the server listens
# STORK_REST_PORT=
### the file with a certificate to use for secure connections
# STORK_REST_TLS_CERTIFICATE=
### the file with a private key to use for secure connections
# STORK_REST_TLS_PRIVATE_KEY=
### the certificate authority file used for mutual TLS authentication
# STORK_REST_TLS_CA_CERTIFICATE=
### the directory with static files served in the UI
STORK_REST_STATIC_FILES_DIR=/usr/share/stork/www
### the base URL of the UI - to be used only if the UI is served from a subdirectory
# STORK_REST_BASE_URL=
### enable Prometheus /metrics HTTP endpoint for exporting metrics from
### the server to Prometheus. It is recommended to secure this endpoint
### (e.g. using HTTP proxy).
# STORK_SERVER_ENABLE_METRICS=true
### Logging parameters
### Set logging level. Supported values are: DEBUG, INFO, WARN, ERROR
# STORK_LOG_LEVEL=DEBUG
### disable output colorization
# CLICOLOR=false
### path to the hook directory
# STORK_SERVER_HOOK_DIRECTORY=

View File

@ -1,21 +1,13 @@
## **Explication de l'installation du VPN :**
Le processus d'installation s'articule en trois phases distinctes. Tout d'abord, l'installation commence par le playbook **r-vp1**. Ensuite, dans une seconde étape, le playbook r-vp2 est déployé. Enfin, la dernière phase concerne la mise en place de notre filtrage à l'aide de **ferm**.
## **Explication des dossiers pour Wireguard :**
Le dossier wireguard-r = r-vp1
wireguard-l = r-vp2
# <p align="center">Procédure d'installation </p>
de **r-vp1** et de copie du fichier wg0-b.conf.
***
## Sur **r-vp1**:
Attendre la fin de l'installation. Ensuite lancer un serveur http avec python3 pour récuperer le fichier wg0-b.conf sur **r-vp2** .
Lancer le playbook : *ansible-playbook -i localhost, -c local* r-vp1.yml sur **r-vp1**
Attendre la fin de l'installation. Ensuite lancer le scipt r-vp1-post.sh
### 🛠️ Lancer le script r-vp1-post.sh
### 🛠️ Lancer le script
```bash
cd /tools/ansible/gsb2023/Scripts
```
@ -24,10 +16,7 @@ bash r-vp1-post.sh
```
## Sur **r-vp2**:
Lancer le playbook : *ansible-playbook -i localhost, -c local* r-vp2.yml sur **r-vp2**
Puis lancer le script r-vp2-post.sh pour récuperer le fichier de configuration et activer l'interface wg0.
Lancer le script r-vp2-post.sh pour récuperer le fichier de configuration et activer l'interface wg0.
### 🛠️ Lancer le script
```bash
cd /tools/ansible/gsb2023/Scripts
@ -37,11 +26,7 @@ bash r-vp2-post.sh
```
## Fin
Pour finir redemarer les machines.
redemarer les machines
```bash
reboot
```
Veuillez maintenant vous rendre dans le dossier du role ferm :
*gsb2024/roles/fw-ferm*
*Modification : jm*

View File

@ -4,21 +4,11 @@ Rôle du Zabbix client pour la supervision des différentes machines en active
## Tables des matières
1. [Que fait le rôle Zabbix ?]
2. [Installation et configuration de Zabbix-agent]
3. [Partie windows]
## Que fait le rôle Zabbix ?
Il permet de configurer les agents zabbix en active sur le serveur.
Il permet de configurer les agents zabbix en active sur le serveur à définir dans defaults.
### Installation et configuration de Zabbix-agent
Le rôle Zabbix-cli va installer Zabbix-agent sur les serveurs Debian. Vous pouvez modifier les paramètres dans le fichier 'defaults'. Il s'agit d'une configuration en mode actif, ce qui signifie que du côté du serveur, il suffit de définir les hôtes avec leur nom, le type d'OS, et pour notre cas, préciser qu'il s'agit d'une machine virtuelle sur le serveur Zabbix.
### Partie Windows !
Le fonctionnement de Zabbix-agent n'est pas différent de celui sur Linux. Cependant, lorsque vous êtes sur le site de Zabbix pour installer l'agent, veillez à choisir la version classique de Zabbix-agent plutôt que la version 2, car elle requiert plus de ressources pour une faible supervision supplémentaire.
En ce qui concerne la configuration lors de l'installation de l'agent Zabbix, il vous demandera de saisir des informations telles que, par exemple, 'IP du serveur'. Vous n'êtes pas obligé de fournir ces informations, car tout peut être modifié ultérieurement.
Le fichier de configuration est le même que celui utilisé dans Linux. Si vous avez effectué l'installation par défaut de l'agent Zabbix, vous trouverez les fichiers de configuration dans le répertoire C:\Program Files\Zabbix Agent, et le nom du fichier de configuration est "zabbix_agentd.conf".
Avant toute configuration après l'installation de Zabbix Agent, pensez bien à aller dans le Gestionnaire des tâches, puis dans Services. Tout en bas, vous trouverez 'Zabbix Agent' qui est en cours d'exécution. Arrêtez-le, puis vous pourrez modifier la configuration sans aucun problème.
Dans la configuration pour activer Zabbix Agent en active, il vous suffit de modifier la valeur 'server' en la remplaçant par 127.0.0.1, et la valeur 'serveractif' par l'adresse IP de votre serveur Zabbix, dans notre cas 172.16.0.8. N'oubliez pas de modifier la valeur du 'hostname', car c'est celle-ci que vous devrez saisir dans les hôtes du serveur Zabbix pour que la supervision remonte. Pensez également à redémarrer le service une fois que Zabbix Agent est configuré.
Le rôle Zabbix-cli va installer zabbix-agent pour les serveurs, zabbix-agent pour superviser, zabbix-agent sera notre outil de supervision côté serveurs.

View File

@ -1,3 +0,0 @@
SERVER: "127.0.0.1"
SERVERACTIVE: "192.168.99.8"
TOKENAPI: "f72473b7e5402a5247773e456f3709dcdd5e41792360108fc3451bbfeed8eafe"

View File

@ -17,22 +17,20 @@
name: zabbix-agent
state: present
- name: Mise en place du fichier conf zabbix agent (active)
template:
src: zabbix_agentd.conf.j2
dest: /etc/zabbix/zabbix_agentd.conf
- name: Enable Zabbix agent service
service:
name: zabbix-agent
state: restarted
enabled: yes
- name: mise en place script hostcreate
template:
src: hostcreate.sh.j2
dest: /tmp/hostcreate.sh
#- name: lancement script hostcreate
#command: bash /tmp/hostcreate.sh
- name: Replace Zabbix agent config
replace:
path: /etc/zabbix/zabbix_agentd.conf
regexp: '{{ item.regexp }}'
replace: '{{ item.replace }}'
backup: true
loop:
- { regexp: '^(Server\s*=\s*).*$', replace: 'Server = 127.0.0.1' }
- { regexp: '^(ServerActive\s*=\s*).*$', replace: 'ServerActive = 192.168.99.8' }
- { regexp: '^(Hostname\s*=\s*).*$', replace: 'Hostname = {{ ansible_hostname }}' }
- { regexp: '^(Include\s*=\s*).*$', replace: 'Include = /etc/zabbix/zabbix_agentd.d/*.conf' }

View File

@ -1 +0,0 @@
curl -X POST -H "Content-Type: application/json" -d '{ "jsonrpc":"2.0","method":"host.create","params": {"host": "{{ ansible_hostname }}","groups": [{"groupid": "6"}],"templates": [{"templateid": "10343"}],"inventory_mode": 0,"inventory": {"type": 0}},"auth": "{{ TOKENAPI }}","id": 1}' http://{{ SERVERACTIVE }}/zabbix/api_jsonrpc.php

View File

@ -1,554 +0,0 @@
# This is a configuration file for Zabbix agent daemon (Unix)
# To get more information about Zabbix, visit http://www.zabbix.com
############ GENERAL PARAMETERS #################
### Option: PidFile
# Name of PID file.
#
# Mandatory: no
# Default:
# PidFile=/tmp/zabbix_agentd.pid
PidFile=/run/zabbix/zabbix_agentd.pid
### Option: LogType
# Specifies where log messages are written to:
# system - syslog
# file - file specified with LogFile parameter
# console - standard output
#
# Mandatory: no
# Default:
# LogType=file
### Option: LogFile
# Log file name for LogType 'file' parameter.
#
# Mandatory: yes, if LogType is set to file, otherwise no
# Default:
# LogFile=
LogFile=/var/log/zabbix/zabbix_agentd.log
### Option: LogFileSize
# Maximum size of log file in MB.
# 0 - disable automatic log rotation.
#
# Mandatory: no
# Range: 0-1024
# Default:
# LogFileSize=1
LogFileSize=0
### Option: DebugLevel
# Specifies debug level:
# 0 - basic information about starting and stopping of Zabbix processes
# 1 - critical information
# 2 - error information
# 3 - warnings
# 4 - for debugging (produces lots of information)
# 5 - extended debugging (produces even more information)
#
# Mandatory: no
# Range: 0-5
# Default:
# DebugLevel=3
### Option: SourceIP
# Source IP address for outgoing connections.
#
# Mandatory: no
# Default:
# SourceIP=
### Option: AllowKey
# Allow execution of item keys matching pattern.
# Multiple keys matching rules may be defined in combination with DenyKey.
# Key pattern is wildcard expression, which support "*" character to match any number of any characters in certain position. It might be used in both key name and key arguments.
# Parameters are processed one by one according their appearance order.
# If no AllowKey or DenyKey rules defined, all keys are allowed.
#
# Mandatory: no
### Option: DenyKey
# Deny execution of items keys matching pattern.
# Multiple keys matching rules may be defined in combination with AllowKey.
# Key pattern is wildcard expression, which support "*" character to match any number of any characters in certain position. It might be used in both key name and key arguments.
# Parameters are processed one by one according their appearance order.
# If no AllowKey or DenyKey rules defined, all keys are allowed.
# Unless another system.run[*] rule is specified DenyKey=system.run[*] is added by default.
#
# Mandatory: no
# Default:
# DenyKey=system.run[*]
### Option: EnableRemoteCommands - Deprecated, use AllowKey=system.run[*] or DenyKey=system.run[*] instead
# Internal alias for AllowKey/DenyKey parameters depending on value:
# 0 - DenyKey=system.run[*]
# 1 - AllowKey=system.run[*]
#
# Mandatory: no
### Option: LogRemoteCommands
# Enable logging of executed shell commands as warnings.
# 0 - disabled
# 1 - enabled
#
# Mandatory: no
# Default:
# LogRemoteCommands=0
##### Passive checks related
### Option: Server
# List of comma delimited IP addresses, optionally in CIDR notation, or DNS names of Zabbix servers and Zabbix proxies.
# Incoming connections will be accepted only from the hosts listed here.
# If IPv6 support is enabled then '127.0.0.1', '::127.0.0.1', '::ffff:127.0.0.1' are treated equally
# and '::/0' will allow any IPv4 or IPv6 address.
# '0.0.0.0/0' can be used to allow any IPv4 address.
# Example: Server=127.0.0.1,192.168.1.0/24,::1,2001:db8::/32,zabbix.example.com
#
# Mandatory: yes, if StartAgents is not explicitly set to 0
# Default:
# Server=
Server = {{ SERVER }}
### Option: ListenPort
# Agent will listen on this port for connections from the server.
#
# Mandatory: no
# Range: 1024-32767
# Default:
# ListenPort=10050
### Option: ListenIP
# List of comma delimited IP addresses that the agent should listen on.
# First IP address is sent to Zabbix server if connecting to it to retrieve list of active checks.
#
# Mandatory: no
# Default:
# ListenIP=0.0.0.0
### Option: StartAgents
# Number of pre-forked instances of zabbix_agentd that process passive checks.
# If set to 0, disables passive checks and the agent will not listen on any TCP port.
#
# Mandatory: no
# Range: 0-100
# Default:
# StartAgents=3
##### Active checks related
### Option: ServerActive
# Zabbix server/proxy address or cluster configuration to get active checks from.
# Server/proxy address is IP address or DNS name and optional port separated by colon.
# Cluster configuration is one or more server addresses separated by semicolon.
# Multiple Zabbix servers/clusters and Zabbix proxies can be specified, separated by comma.
# More than one Zabbix proxy should not be specified from each Zabbix server/cluster.
# If Zabbix proxy is specified then Zabbix server/cluster for that proxy should not be specified.
# Multiple comma-delimited addresses can be provided to use several independent Zabbix servers in parallel. Spaces are allowed.
# If port is not specified, default port is used.
# IPv6 addresses must be enclosed in square brackets if port for that host is specified.
# If port is not specified, square brackets for IPv6 addresses are optional.
# If this parameter is not specified, active checks are disabled.
# Example for Zabbix proxy:
# ServerActive=127.0.0.1:10051
# Example for multiple servers:
# ServerActive=127.0.0.1:20051,zabbix.domain,[::1]:30051,::1,[12fc::1]
# Example for high availability:
# ServerActive=zabbix.cluster.node1;zabbix.cluster.node2:20051;zabbix.cluster.node3
# Example for high availability with two clusters and one server:
# ServerActive=zabbix.cluster.node1;zabbix.cluster.node2:20051,zabbix.cluster2.node1;zabbix.cluster2.node2,zabbix.domain
#
# Mandatory: no
# Default:
# ServerActive=
ServerActive = {{ SERVERACTIVE }}
### Option: Hostname
# List of comma delimited unique, case sensitive hostnames.
# Required for active checks and must match hostnames as configured on the server.
# Value is acquired from HostnameItem if undefined.
#
# Mandatory: no
# Default:
# Hostname=
Hostname = {{ ansible_hostname }}
### Option: HostnameItem
# Item used for generating Hostname if it is undefined. Ignored if Hostname is defined.
# Does not support UserParameters or aliases.
#
# Mandatory: no
# Default:
# HostnameItem=system.hostname
### Option: HostMetadata
# Optional parameter that defines host metadata.
# Host metadata is used at host auto-registration process.
# An agent will issue an error and not start if the value is over limit of 2034 bytes.
# If not defined, value will be acquired from HostMetadataItem.
#
# Mandatory: no
# Range: 0-2034 bytes
# Default:
# HostMetadata=
### Option: HostMetadataItem
# Optional parameter that defines an item used for getting host metadata.
# Host metadata is used at host auto-registration process.
# During an auto-registration request an agent will log a warning message if
# the value returned by specified item is over limit of 65535 characters.
# This option is only used when HostMetadata is not defined.
#
# Mandatory: no
# Default:
# HostMetadataItem=
### Option: HostInterface
# Optional parameter that defines host interface.
# Host interface is used at host auto-registration process.
# An agent will issue an error and not start if the value is over limit of 255 characters.
# If not defined, value will be acquired from HostInterfaceItem.
#
# Mandatory: no
# Range: 0-255 characters
# Default:
# HostInterface=
### Option: HostInterfaceItem
# Optional parameter that defines an item used for getting host interface.
# Host interface is used at host auto-registration process.
# During an auto-registration request an agent will log a warning message if
# the value returned by specified item is over limit of 255 characters.
# This option is only used when HostInterface is not defined.
#
# Mandatory: no
# Default:
# HostInterfaceItem=
### Option: RefreshActiveChecks
# How often list of active checks is refreshed, in seconds.
#
# Mandatory: no
# Range: 1-86400
# Default:
# RefreshActiveChecks=5
### Option: BufferSend
# Do not keep data longer than N seconds in buffer.
#
# Mandatory: no
# Range: 1-3600
# Default:
# BufferSend=5
### Option: BufferSize
# Maximum number of values in a memory buffer. The agent will send
# all collected data to Zabbix Server or Proxy if the buffer is full.
#
# Mandatory: no
# Range: 2-65535
# Default:
# BufferSize=100
### Option: MaxLinesPerSecond
# Maximum number of new lines the agent will send per second to Zabbix Server
# or Proxy processing 'log' and 'logrt' active checks.
# The provided value will be overridden by the parameter 'maxlines',
# provided in 'log' or 'logrt' item keys.
#
# Mandatory: no
# Range: 1-1000
# Default:
# MaxLinesPerSecond=20
### Option: HeartbeatFrequency
# Frequency of heartbeat messages in seconds.
# Used for monitoring availability of active checks.
# 0 - heartbeat messages disabled.
#
# Mandatory: no
# Range: 0-3600
# Default: 60
# HeartbeatFrequency=
############ ADVANCED PARAMETERS #################
### Option: Alias
# Sets an alias for an item key. It can be used to substitute long and complex item key with a smaller and simpler one.
# Multiple Alias parameters may be present. Multiple parameters with the same Alias key are not allowed.
# Different Alias keys may reference the same item key.
# For example, to retrieve the ID of user 'zabbix':
# Alias=zabbix.userid:vfs.file.regexp[/etc/passwd,^zabbix:.:([0-9]+),,,,\1]
# Now shorthand key zabbix.userid may be used to retrieve data.
# Aliases can be used in HostMetadataItem but not in HostnameItem parameters.
#
# Mandatory: no
# Range:
# Default:
### Option: Timeout
# Spend no more than Timeout seconds on processing
#
# Mandatory: no
# Range: 1-30
# Default:
# Timeout=3
### Option: AllowRoot
# Allow the agent to run as 'root'. If disabled and the agent is started by 'root', the agent
# will try to switch to the user specified by the User configuration option instead.
# Has no effect if started under a regular user.
# 0 - do not allow
# 1 - allow
#
# Mandatory: no
# Default:
# AllowRoot=0
### Option: User
# Drop privileges to a specific, existing user on the system.
# Only has effect if run as 'root' and AllowRoot is disabled.
#
# Mandatory: no
# Default:
# User=zabbix
# NOTE: This option is overriden by settings in systemd service file!
### Option: Include
# You may include individual files or all files in a directory in the configuration file.
# Installing Zabbix will create include directory in /usr/local/etc, unless modified during the compile time.
#
# Mandatory: no
# Default:
# Include=
Include = /etc/zabbix/zabbix_agentd.d/*.conf
# Include=/usr/local/etc/zabbix_agentd.userparams.conf
# Include=/usr/local/etc/zabbix_agentd.conf.d/
# Include=/usr/local/etc/zabbix_agentd.conf.d/*.conf
####### USER-DEFINED MONITORED PARAMETERS #######
### Option: UnsafeUserParameters
# Allow all characters to be passed in arguments to user-defined parameters.
# The following characters are not allowed:
# \ ' " ` * ? [ ] { } ~ $ ! & ; ( ) < > | # @
# Additionally, newline characters are not allowed.
# 0 - do not allow
# 1 - allow
#
# Mandatory: no
# Range: 0-1
# Default:
# UnsafeUserParameters=0
### Option: UserParameter
# User-defined parameter to monitor. There can be several user-defined parameters.
# Format: UserParameter=<key>,<shell command>
# See 'zabbix_agentd' directory for examples.
#
# Mandatory: no
# Default:
# UserParameter=
### Option: UserParameterDir
# Directory to execute UserParameter commands from. Only one entry is allowed.
# When executing UserParameter commands the agent will change the working directory to the one
# specified in the UserParameterDir option.
# This way UserParameter commands can be specified using the relative ./ prefix.
#
# Mandatory: no
# Default:
# UserParameterDir=
####### LOADABLE MODULES #######
### Option: LoadModulePath
# Full path to location of agent modules.
# Default depends on compilation options.
# To see the default path run command "zabbix_agentd --help".
#
# Mandatory: no
# Default:
# LoadModulePath=${libdir}/modules
### Option: LoadModule
# Module to load at agent startup. Modules are used to extend functionality of the agent.
# Formats:
# LoadModule=<module.so>
# LoadModule=<path/module.so>
# LoadModule=</abs_path/module.so>
# Either the module must be located in directory specified by LoadModulePath or the path must precede the module name.
# If the preceding path is absolute (starts with '/') then LoadModulePath is ignored.
# It is allowed to include multiple LoadModule parameters.
#
# Mandatory: no
# Default:
# LoadModule=
####### TLS-RELATED PARAMETERS #######
### Option: TLSConnect
# How the agent should connect to server or proxy. Used for active checks.
# Only one value can be specified:
# unencrypted - connect without encryption
# psk - connect using TLS and a pre-shared key
# cert - connect using TLS and a certificate
#
# Mandatory: yes, if TLS certificate or PSK parameters are defined (even for 'unencrypted' connection)
# Default:
# TLSConnect=unencrypted
### Option: TLSAccept
# What incoming connections to accept.
# Multiple values can be specified, separated by comma:
# unencrypted - accept connections without encryption
# psk - accept connections secured with TLS and a pre-shared key
# cert - accept connections secured with TLS and a certificate
#
# Mandatory: yes, if TLS certificate or PSK parameters are defined (even for 'unencrypted' connection)
# Default:
# TLSAccept=unencrypted
### Option: TLSCAFile
# Full pathname of a file containing the top-level CA(s) certificates for
# peer certificate verification.
#
# Mandatory: no
# Default:
# TLSCAFile=
### Option: TLSCRLFile
# Full pathname of a file containing revoked certificates.
#
# Mandatory: no
# Default:
# TLSCRLFile=
### Option: TLSServerCertIssuer
# Allowed server certificate issuer.
#
# Mandatory: no
# Default:
# TLSServerCertIssuer=
### Option: TLSServerCertSubject
# Allowed server certificate subject.
#
# Mandatory: no
# Default:
# TLSServerCertSubject=
### Option: TLSCertFile
# Full pathname of a file containing the agent certificate or certificate chain.
#
# Mandatory: no
# Default:
# TLSCertFile=
### Option: TLSKeyFile
# Full pathname of a file containing the agent private key.
#
# Mandatory: no
# Default:
# TLSKeyFile=
### Option: TLSPSKIdentity
# Unique, case sensitive string used to identify the pre-shared key.
#
# Mandatory: no
# Default:
# TLSPSKIdentity=
### Option: TLSPSKFile
# Full pathname of a file containing the pre-shared key.
#
# Mandatory: no
# Default:
# TLSPSKFile=
####### For advanced users - TLS ciphersuite selection criteria #######
### Option: TLSCipherCert13
# Cipher string for OpenSSL 1.1.1 or newer in TLS 1.3.
# Override the default ciphersuite selection criteria for certificate-based encryption.
#
# Mandatory: no
# Default:
# TLSCipherCert13=
### Option: TLSCipherCert
# GnuTLS priority string or OpenSSL (TLS 1.2) cipher string.
# Override the default ciphersuite selection criteria for certificate-based encryption.
# Example for GnuTLS:
# NONE:+VERS-TLS1.2:+ECDHE-RSA:+RSA:+AES-128-GCM:+AES-128-CBC:+AEAD:+SHA256:+SHA1:+CURVE-ALL:+COMP-NULL:+SIGN-ALL:+CTYPE-X.509
# Example for OpenSSL:
# EECDH+aRSA+AES128:RSA+aRSA+AES128
#
# Mandatory: no
# Default:
# TLSCipherCert=
### Option: TLSCipherPSK13
# Cipher string for OpenSSL 1.1.1 or newer in TLS 1.3.
# Override the default ciphersuite selection criteria for PSK-based encryption.
# Example:
# TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256
#
# Mandatory: no
# Default:
# TLSCipherPSK13=
### Option: TLSCipherPSK
# GnuTLS priority string or OpenSSL (TLS 1.2) cipher string.
# Override the default ciphersuite selection criteria for PSK-based encryption.
# Example for GnuTLS:
# NONE:+VERS-TLS1.2:+ECDHE-PSK:+PSK:+AES-128-GCM:+AES-128-CBC:+AEAD:+SHA256:+SHA1:+CURVE-ALL:+COMP-NULL:+SIGN-ALL
# Example for OpenSSL:
# kECDHEPSK+AES128:kPSK+AES128
#
# Mandatory: no
# Default:
# TLSCipherPSK=
### Option: TLSCipherAll13
# Cipher string for OpenSSL 1.1.1 or newer in TLS 1.3.
# Override the default ciphersuite selection criteria for certificate- and PSK-based encryption.
# Example:
# TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256
#
# Mandatory: no
# Default:
# TLSCipherAll13=
### Option: TLSCipherAll
# GnuTLS priority string or OpenSSL (TLS 1.2) cipher string.
# Override the default ciphersuite selection criteria for certificate- and PSK-based encryption.
# Example for GnuTLS:
# NONE:+VERS-TLS1.2:+ECDHE-RSA:+RSA:+ECDHE-PSK:+PSK:+AES-128-GCM:+AES-128-CBC:+AEAD:+SHA256:+SHA1:+CURVE-ALL:+COMP-NULL:+SIGN-ALL:+CTYPE-X.509
# Example for OpenSSL:
# EECDH+aRSA+AES128:RSA+aRSA+AES128:kECDHEPSK+AES128:kPSK+AES128
#
# Mandatory: no
# Default:
# TLSCipherAll=
####### For advanced users - TCP-related fine-tuning parameters #######
## Option: ListenBacklog
# The maximum number of pending connections in the queue. This parameter is passed to
# listen() function as argument 'backlog' (see "man listen").
#
# Mandatory: no
# Range: 0 - INT_MAX (depends on system, too large values may be silently truncated to implementation-specified maximum)
# Default: SOMAXCONN (hard-coded constant, depends on system)
# ListenBacklog=

View File

@ -29,7 +29,15 @@
name: mariadb
state: started
- name: 6. Creer un utilisateur et lui attribuer tous les droits
- name: 6. Créer la base de données
community.mysql.mysql_db:
name: zabbix
encoding: utf8mb4
collation: utf8mb4_bin
state: present
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: 7. Creer un utilisateur et lui attribuer tous les droits
community.mysql.mysql_user:
name: zabbix
password: password
@ -37,52 +45,50 @@
state: present
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: 7. Modifier la variable trust function creators pour importer la base données
- name: 8. Modifier une variable pour importer un schema
community.mysql.mysql_variables:
variable: log_bin_trust_function_creators
value: 1
mode: global
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: 8. Récupérer la base de données
get_url:
url: http://s-adm.gsb.adm/gsbstore/zabbix.sql.gz
dest: /tmp
- name: 9. Importer la base de données
- name: 9. Importer le schema initial
community.mysql.mysql_db:
state: import
name: zabbix
encoding: utf8mb4
target: /tmp/zabbix.sql.gz
login_user: zabbix
login_password: password
target: /usr/share/zabbix-sql-scripts/mysql/server.sql.gz
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: 10. Remettre a zero la variable trust function creators
- name: 10. Modifier la variable pour le schema
community.mysql.mysql_variables:
variable: log_bin_trust_function_creators
value: 0
mode: global
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: 11. Lancer le service zabbix-server
- name: 11. Configurer le mdp de la db
replace:
path: /etc/zabbix/zabbix_server.conf
regexp: '^# DBPassword='
replace: 'DBPassword=password'
- name: 12. Lancer le service zabbix-server
service:
name: zabbix-server
state: restarted
enabled: yes
- name: 12. Lancer le service zabbix-agent
- name: 13. Lancer le service zabbix-agent
service:
name: zabbix-agent
state: restarted
enabled: yes
- name: 13. Lancer le service apache2
- name: 14. Lancer le service apache2
service:
name: apache2
state: restarted
enabled: yes
- name: 14. Gotify
copy:
src: gotify.sh
dest: /usr/lib/zabbix/alertscripts

View File

@ -7,9 +7,8 @@
- s-ssh
- dnsmasq
- squid
- ssh-backup-key-gen
# - local-store
- zabbix-cli
# #- zabbix-cli
## - syslog-cli
- post
# - goss

View File

@ -8,7 +8,7 @@
- appli
- ssh-cli
# - syslog-cli
- zabbix-cli
#- zabbix-cli
- ssl-apache
- post

View File

@ -1,20 +1,14 @@
---
- hosts: localhost
connection: local
vars:
stork_db_user: "stork-server"
stork_db_passwd: "Azerty1+"
stork_db_name: "stork"
roles:
- base
- goss
- zabbix-cli
- gotify
- stork-server
- ssh-cli
#- syslog-cli
# - proxy3
#- zabbix-cli
# - ssh-cli
# - syslog-cli
- smb-backup
- dns-slave
- post
- ssh-backup-key-private

View File

@ -4,7 +4,7 @@
# include: config.yml
roles:
- base
- zabbix-cli
#- zabbix-cli
- goss
- dns-master
- webautoconf

View File

@ -1,24 +0,0 @@
---
- hosts: localhost
connection: local
vars:
kea_this_server: "s-kea1"
kea_srv1: "s-kea1"
kea_srv2: "s-kea2"
kea_ctrl_address_this: "172.16.0.20"
kea_ctrl_address1: "172.16.0.20"
kea_ctrl_address2: "172.16.0.21"
kea_dhcp_int: "enp0s9"
stork_host: "s-kea1.gsb.lan"
stork_port: "8081"
roles:
- base
- goss
- ssh-cli
- kea
- stork-agent
- zabbix-cli
- journald-snd
- snmp-agent
- post

View File

@ -1,24 +0,0 @@
---
- hosts: localhost
connection: local
vars:
kea_this_server: "s-kea2"
kea_srv1: "s-kea1"
kea_srv2: "s-kea2"
kea_ctrl_address_this: "172.16.0.21"
kea_ctrl_address1: "172.16.0.20"
kea_ctrl_address2: "172.16.0.21"
kea_dhcp_int: "enp0s9"
stork_host: "s-kea2.gsb.lan"
stork_port: "8081"
roles:
- base
- goss
- ssh-cli
- kea
- stork-agent
- zabbix-cli
- journald-snd
- snmp-agent
- post

View File

@ -9,5 +9,5 @@
- goss
- lb-bd
- post
#- zabbix-cli
- snmp-agent
- ssh-cli

View File

@ -4,9 +4,8 @@
roles:
- base
- goss
- post-lb
- lb-web
# - zabbix-cli
- snmp-agent
- ssh-cli

View File

@ -4,9 +4,8 @@
roles:
- base
- goss
- post-lb
- lb-web
# - zabbix-cli
- snmp-agent
- ssh-cli

View File

@ -6,7 +6,7 @@
- base
- goss
- lb-front
#- zabbix-cli
- snmp-agent
- ssh-cli
- post

View File

@ -7,5 +7,5 @@
- docker-nextcloud
- ssh-cli
# - syslog-cli
- zabbix-cli
- snmp-agent
- post

View File

@ -9,8 +9,7 @@
roles:
- base
- goss
#- zabbix-cli
- snmp-agent
- lb-nfs-server
- ssh-cli
# - syslog-cli

View File

@ -1,55 +0,0 @@
command:
ls -l .:
exit-status: 0
stdout:
- total 200
- -rwxr-xr-x 1 root root 232 15 janv. 17:38 agoss
- -rw-r--r-- 1 root root 212 15 janv. 17:38 changelog
- drwxr-xr-x 3 root root 4096 15 janv. 17:38 doc
- drwxr-xr-x 2 root root 4096 19 janv. 10:50 goss
- -rwxr-xr-x 1 root root 209 15 janv. 17:38 gsbchk
- -rwxr-xr-x 1 root root 7174 15 janv. 17:38 gsbstart
- -rwxr-xr-x 1 root root 728 15 janv. 17:38 gsbstartl
- -rw-r--r-- 1 root root 289 15 janv. 17:38 lisezmoi.txt
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 old
- drwxr-xr-x 2 root root 4096 19 janv. 09:16 pre
- -rw-r--r-- 1 root root 477 19 janv. 09:16 pull-config
- -rw-r--r-- 1 root root 5070 19 janv. 09:16 README.md
- -rw-r--r-- 1 root root 141 15 janv. 17:38 r-ext.yml
- -rw-r--r-- 1 root root 151 15 janv. 17:38 r-int.yml
- drwxr-xr-x 55 root root 4096 19 janv. 09:16 roles
- -rw-r--r-- 1 root root 177 15 janv. 17:38 r-vp1-fw.yml
- -rw-r--r-- 1 root root 259 15 janv. 17:38 r-vp1.yml
- -rw-r--r-- 1 root root 173 15 janv. 17:38 r-vp2-fw.yml
- -rw-r--r-- 1 root root 305 15 janv. 17:38 r-vp2.yml
- -rw-r--r-- 1 root root 181 19 janv. 09:16 s-adm.yml
- -rw-r--r-- 1 root root 119 15 janv. 17:38 s-agence.yml
- -rw-r--r-- 1 root root 166 19 janv. 09:16 s-appli.yml
- -rw-r--r-- 1 root root 182 19 janv. 09:16 s-backup.yml
- drwxr-xr-x 3 root root 4096 19 janv. 09:16 scripts
- -rw-r--r-- 1 root root 213 15 janv. 17:38 s-docker.yml
- -rw-r--r-- 1 root root 144 15 janv. 17:38 s-elk.yml
- -rw-r--r-- 1 root root 178 19 janv. 09:16 s-fog-post.yml
- -rw-r--r-- 1 root root 162 19 janv. 09:16 s-fog.yml
- -rw-r--r-- 1 root root 199 19 janv. 09:16 s-infra.yml
- -rw-r--r-- 1 root root 351 15 janv. 17:38 s-itil.yml
- -rw-r--r-- 1 root root 185 19 janv. 09:16 s-kea1.yml
- -rw-r--r-- 1 root root 174 19 janv. 09:16 s-kea2.yml
- -rw-r--r-- 1 root root 131 19 janv. 09:16 s-lb-bd.yml
- -rw-r--r-- 1 root root 127 19 janv. 09:16 s-lb-web1.yml
- -rw-r--r-- 1 root root 127 19 janv. 09:16 s-lb-web2.yml
- -rw-r--r-- 1 root root 145 19 janv. 09:16 s-lb.yml
- -rw-r--r-- 1 root root 148 19 janv. 09:16 s-mess.yml
- -rw-r--r-- 1 root root 241 19 janv. 09:16 s-mon.yml
- -rw-r--r-- 1 root root 290 19 janv. 09:16 s-nas.yml
- -rw-r--r-- 1 root root 156 15 janv. 17:38 s-nxc.yml
- -rw-r--r-- 1 root root 140 15 janv. 17:38 s-peertube.yml
- -rw-r--r-- 1 root root 148 19 janv. 09:16 s-proxy.yml
- -rw-r--r-- 1 root root 161 15 janv. 17:38 s-test.yml
- drwxr-xr-x 3 root root 4096 15 janv. 17:38 sv
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 tests
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 vagrant
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 windows
- drwxr-xr-x 7 root root 4096 19 janv. 09:16 wireguard
stderr: []
timeout: 10000

Some files were not shown because too many files have changed in this diff Show More