Compare commits
57 Commits
v0.0.4z-jm
...
v0.0.6p-jc
Author | SHA1 | Date | |
---|---|---|---|
bfdca163f7 | |||
cb1b315819 | |||
c086bcdc7f | |||
1134ca261d | |||
b0d81dc69c | |||
331b8b0fb6 | |||
4025f996dc | |||
a1ee9c6207 | |||
a1442e534d | |||
e78ef5948b | |||
298f105805 | |||
d88745e741 | |||
fffcb22db8 | |||
abb8c15028 | |||
73b4560dd9 | |||
91d8b57029 | |||
37bbbad9dd | |||
84215f502b | |||
2606cd19b0 | |||
b27ce2a372 | |||
18ce1f65ad | |||
116b84d230 | |||
c92a7654d3 | |||
02c7f3dffd | |||
5a8558d701 | |||
7d6b15844a | |||
2653221559 | |||
3100ba51e2 | |||
bbe58dbb01 | |||
7124d8aaff | |||
0afa2c3596 | |||
38602033b3 | |||
1c1993021b | |||
b146170467 | |||
df9d3c6c1c | |||
d75f4ffb3f | |||
eaf75de89e | |||
02fc23d224 | |||
bdc71bbb3c | |||
308504062e | |||
c3ad470fd1 | |||
2d3067d67b | |||
7d885b08b8 | |||
d88044350a | |||
ca6d1d2e09 | |||
1a2c349969 | |||
3a18a3bd9a | |||
239480a12b | |||
f66774efe1 | |||
b57b0763e9 | |||
79279fc3a1 | |||
54ef5103ca | |||
a87853372c | |||
378a20f02a | |||
21ee40ab59 | |||
d393b1eebe | |||
bff32cd191 |
@ -1,6 +1,6 @@
|
||||
# gsb2024
|
||||
|
||||
2024-01-17 18h04 ps
|
||||
2024-01-19 11h45 ps
|
||||
|
||||
Environnement et playbooks **ansible** pour le projet **GSB 2024**
|
||||
|
||||
@ -23,8 +23,8 @@ Prérequis :
|
||||
* **r-ext** : routage, NAT
|
||||
* **s-proxy** : proxy **squid**
|
||||
* **s-itil** : serveur GLPI
|
||||
* **s-backup** : DNS esclave + sauvegarde s-win (SMB)
|
||||
* **s-mon** : supervision avec **Nagios4**, notifications et syslog
|
||||
* **s-backup** : DNS esclave + sauvegarde s-win (SMB), Stork et Gotify
|
||||
* **s-mon** : supervision avec **Nagios4/Zabbix**, notifications et journald
|
||||
* **s-fog** : deploiement postes de travail avec **FOG**
|
||||
* **s-win** : Windows Server 2019, AD, DNS, DHCP, partage fichiers
|
||||
* **s-nxc** : NextCloud avec **docker** via proxy inverse **traefik** et certificat auto-signé
|
||||
|
25
goss.yaml
Normal file
25
goss.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
port:
|
||||
tcp:22:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp6:22:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
service:
|
||||
sshd:
|
||||
enabled: true
|
||||
running: true
|
||||
user:
|
||||
sshd:
|
||||
exists: true
|
||||
uid: 101
|
||||
gid: 65534
|
||||
groups:
|
||||
- nogroup
|
||||
home: /run/sshd
|
||||
shell: /usr/sbin/nologin
|
||||
process:
|
||||
sshd:
|
||||
running: true
|
93
goss/s-kea1.yaml
Normal file
93
goss/s-kea1.yaml
Normal file
@ -0,0 +1,93 @@
|
||||
file:
|
||||
/etc/kea/kea-ctrl-agent.conf:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: _kea
|
||||
group: root
|
||||
filetype: file
|
||||
contents: []
|
||||
/etc/kea/kea-dhcp4.conf:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: _kea
|
||||
group: root
|
||||
filetype: file
|
||||
contents: []
|
||||
/tmp/kea4-ctrl-socket:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
size: 0
|
||||
owner: _kea
|
||||
group: _kea
|
||||
filetype: socket
|
||||
contains: []
|
||||
contents: null
|
||||
/usr/lib/x86_64-linux-gnu/kea:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: directory
|
||||
contents: []
|
||||
package:
|
||||
isc-kea-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-ctrl-agent:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-dhcp4:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-hooks:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
libmariadb3:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:10.11.4-1~deb12u1
|
||||
mariadb-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:10.11.4-1~deb12u1
|
||||
mysql-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 5.8+1.1.0
|
||||
addr:
|
||||
udp://172.16.64.254:67:
|
||||
local-address: 127.0.0.1
|
||||
reachable: true
|
||||
timeout: 500
|
||||
port:
|
||||
tcp:8000:
|
||||
listening: true
|
||||
ip:
|
||||
- 172.16.0.20
|
||||
service:
|
||||
isc-kea-ctrl-agent.service:
|
||||
enabled: true
|
||||
running: true
|
||||
isc-kea-dhcp4-server.service:
|
||||
enabled: true
|
||||
running: true
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.20/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.0.20/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.64.20/24
|
||||
mtu: 1500
|
93
goss/s-kea2.yaml
Normal file
93
goss/s-kea2.yaml
Normal file
@ -0,0 +1,93 @@
|
||||
file:
|
||||
/etc/kea/kea-ctrl-agent.conf:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: _kea
|
||||
group: root
|
||||
filetype: file
|
||||
contents: []
|
||||
/etc/kea/kea-dhcp4.conf:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: _kea
|
||||
group: root
|
||||
filetype: file
|
||||
contents: []
|
||||
/tmp/kea4-ctrl-socket:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
size: 0
|
||||
owner: _kea
|
||||
group: _kea
|
||||
filetype: socket
|
||||
contains: []
|
||||
contents: null
|
||||
/usr/lib/x86_64-linux-gnu/kea:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: directory
|
||||
contents: []
|
||||
package:
|
||||
isc-kea-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-ctrl-agent:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-dhcp4:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
isc-kea-hooks:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.1-isc20231123184533
|
||||
libmariadb3:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:10.11.4-1~deb12u1
|
||||
mariadb-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:10.11.4-1~deb12u1
|
||||
mysql-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 5.8+1.1.0
|
||||
addr:
|
||||
udp://172.16.64.254:67:
|
||||
local-address: 127.0.0.1
|
||||
reachable: true
|
||||
timeout: 500
|
||||
port:
|
||||
tcp:8000:
|
||||
listening: true
|
||||
ip:
|
||||
- 172.16.0.21
|
||||
service:
|
||||
isc-kea-ctrl-agent.service:
|
||||
enabled: true
|
||||
running: true
|
||||
isc-kea-dhcp4-server.service:
|
||||
enabled: true
|
||||
running: true
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.21/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.0.21/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.64.21/24
|
||||
mtu: 1500
|
@ -1,21 +1,38 @@
|
||||
package:
|
||||
mysql-server:
|
||||
installed: true
|
||||
versions:
|
||||
- 5.5.54-0+deb8u1
|
||||
command:
|
||||
egrep "#bind-address" /etc/mysql/my.cnf:
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- "#bind-address\t\t= 127.0.0.1"
|
||||
stderr: []
|
||||
timeout: 10000
|
||||
addr:
|
||||
tcp://192.168.102.1:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
tcp://192.168.102.2:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
service:
|
||||
mariadb:
|
||||
enabled: true
|
||||
running: true
|
||||
mysql:
|
||||
enabled: true
|
||||
running: true
|
||||
user:
|
||||
mysql:
|
||||
exists: true
|
||||
uid: 104
|
||||
gid: 111
|
||||
groups:
|
||||
- mysql
|
||||
home: /nonexistent
|
||||
shell: /bin/false
|
||||
group:
|
||||
mysql:
|
||||
exists: true
|
||||
gid: 111
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.13/24
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.50/24
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.154/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.254/24
|
||||
mtu: 1500
|
||||
|
@ -1,63 +1,62 @@
|
||||
package:
|
||||
apache2:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.10-10+deb8u7
|
||||
php5:
|
||||
installed: true
|
||||
versions:
|
||||
- 5.6.29+dfsg-0+deb8u1
|
||||
apache2:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.57-2
|
||||
nfs-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:2.6.2-4
|
||||
port:
|
||||
tcp:22:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp6:22:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
tcp6:80:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
tcp6:80:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
service:
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
sshd:
|
||||
enabled: true
|
||||
running: true
|
||||
user:
|
||||
sshd:
|
||||
exists: true
|
||||
uid: 105
|
||||
gid: 65534
|
||||
groups:
|
||||
- nogroup
|
||||
home: /var/run/sshd
|
||||
shell: /usr/sbin/nologin
|
||||
command:
|
||||
egrep 192.168.102.14:/export/www /etc/fstab:
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
|
||||
stderr: []
|
||||
timeout: 10000
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
nfs-common:
|
||||
enabled: false
|
||||
running: false
|
||||
process:
|
||||
apache2:
|
||||
running: true
|
||||
sshd:
|
||||
running: true
|
||||
apache2:
|
||||
running: true
|
||||
mount:
|
||||
/var/www/html:
|
||||
exists: true
|
||||
opts:
|
||||
- rw
|
||||
- relatime
|
||||
vfs-opts:
|
||||
- rw
|
||||
- vers=4.2
|
||||
- rsize=131072
|
||||
- wsize=131072
|
||||
- namlen=255
|
||||
- hard
|
||||
- proto=tcp
|
||||
- timeo=600
|
||||
- retrans=2
|
||||
- sec=sys
|
||||
- clientaddr=192.168.102.1
|
||||
- local_lock=none
|
||||
- addr=192.168.102.253
|
||||
source: 192.168.102.253:/home/wordpress
|
||||
filesystem: nfs4
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.11/24
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.1/24
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.1/24
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.101/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.1/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.1/24
|
||||
mtu: 1500
|
||||
|
@ -1,63 +1,62 @@
|
||||
package:
|
||||
apache2:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.10-10+deb8u7
|
||||
php5:
|
||||
installed: true
|
||||
versions:
|
||||
- 5.6.29+dfsg-0+deb8u1
|
||||
apache2:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.57-2
|
||||
nfs-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:2.6.2-4
|
||||
port:
|
||||
tcp:22:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp6:22:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
tcp6:80:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
tcp6:80:
|
||||
listening: true
|
||||
ip:
|
||||
- '::'
|
||||
service:
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
sshd:
|
||||
enabled: true
|
||||
running: true
|
||||
user:
|
||||
sshd:
|
||||
exists: true
|
||||
uid: 105
|
||||
gid: 65534
|
||||
groups:
|
||||
- nogroup
|
||||
home: /var/run/sshd
|
||||
shell: /usr/sbin/nologin
|
||||
command:
|
||||
egrep 192.168.102.14:/export/www /etc/fstab:
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
|
||||
stderr: []
|
||||
timeout: 10000
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
nfs-common:
|
||||
enabled: false
|
||||
running: false
|
||||
process:
|
||||
apache2:
|
||||
running: true
|
||||
sshd:
|
||||
running: true
|
||||
apache2:
|
||||
running: true
|
||||
mount:
|
||||
/var/www/html:
|
||||
exists: true
|
||||
opts:
|
||||
- rw
|
||||
- relatime
|
||||
vfs-opts:
|
||||
- rw
|
||||
- vers=4.2
|
||||
- rsize=131072
|
||||
- wsize=131072
|
||||
- namlen=255
|
||||
- hard
|
||||
- proto=tcp
|
||||
- timeo=600
|
||||
- retrans=2
|
||||
- sec=sys
|
||||
- clientaddr=192.168.102.2
|
||||
- local_lock=none
|
||||
- addr=192.168.102.253
|
||||
source: 192.168.102.253:/home/wordpress
|
||||
filesystem: nfs4
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.12/24
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.2/24
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.2/24
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.102/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.2/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.2/24
|
||||
mtu: 1500
|
||||
|
@ -1,28 +1,55 @@
|
||||
package:
|
||||
haproxy:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.6.12-1+deb12u1
|
||||
addr:
|
||||
tcp://192.168.101.1:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
tcp://192.168.101.2:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
port:
|
||||
tcp:80:
|
||||
listening: true
|
||||
ip:
|
||||
- 192.168.100.11
|
||||
tcp:80:
|
||||
listening: true
|
||||
ip:
|
||||
- 192.168.100.10
|
||||
service:
|
||||
haproxy:
|
||||
enabled: true
|
||||
running: true
|
||||
sshd:
|
||||
enabled: true
|
||||
running: true
|
||||
haproxy:
|
||||
enabled: true
|
||||
running: true
|
||||
user:
|
||||
haproxy:
|
||||
exists: true
|
||||
uid: 104
|
||||
gid: 111
|
||||
groups:
|
||||
- haproxy
|
||||
home: /var/lib/haproxy
|
||||
shell: /usr/sbin/nologin
|
||||
group:
|
||||
haproxy:
|
||||
exists: true
|
||||
gid: 111
|
||||
process:
|
||||
haproxy:
|
||||
running: true
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.100/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.100.11/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.254/24
|
||||
mtu: 1500
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.100/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.100.10/24
|
||||
mtu: 1500
|
||||
http:
|
||||
http://192.168.100.10/:
|
||||
status: 200
|
||||
allow-insecure: false
|
||||
no-follow-redirects: false
|
||||
timeout: 5000
|
||||
body: []
|
||||
|
146
goss/s-mon.yaml
146
goss/s-mon.yaml
@ -1,92 +1,62 @@
|
||||
package:
|
||||
apache2:
|
||||
installed: true
|
||||
zabbix-server-mysql:
|
||||
installed: true
|
||||
zabbix-frontend-php:
|
||||
installed: true
|
||||
zabbix-apache-conf:
|
||||
installed: true
|
||||
zabbix-sql-scripts:
|
||||
installed: true
|
||||
zabbix-agent:
|
||||
installed: true
|
||||
mariadb-server:
|
||||
installed: true
|
||||
python3-pymysql:
|
||||
installed: true
|
||||
systemd-journal-remote:
|
||||
installed: true
|
||||
file:
|
||||
/etc/systemd/system/systemd-journal-remote.service:
|
||||
exist: true
|
||||
mode: "0777"
|
||||
filetype: directory
|
||||
/var/log/journal/remote:
|
||||
exist: true
|
||||
mode: "0777"
|
||||
filetype: directory
|
||||
port:
|
||||
tcp:80:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp:3306:
|
||||
listening: true
|
||||
ip:
|
||||
- 127.0.0.1
|
||||
tcp:10050:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp:10051:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
tcp:19532:
|
||||
listening: true
|
||||
ip:
|
||||
- '*'
|
||||
/etc/systemd/system/systemd-journal-remote.service:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
filetype: file
|
||||
contents: []
|
||||
/var/log/journal/remote:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
owner: systemd-journal-remote
|
||||
group: systemd-journal-remote
|
||||
filetype: directory
|
||||
contents: []
|
||||
package:
|
||||
apache2:
|
||||
installed: true
|
||||
versions:
|
||||
- 2.4.57-2
|
||||
mariadb-server:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:10.11.4-1~deb12u1
|
||||
systemd-journal-remote:
|
||||
installed: true
|
||||
versions:
|
||||
- 252.19-1~deb12u1
|
||||
service:
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
zabbix-server:
|
||||
enabled: true
|
||||
running: true
|
||||
zabbix-agent:
|
||||
enabled: true
|
||||
running: true
|
||||
systemd-journal-remote.socket:
|
||||
enabled: true
|
||||
running: true
|
||||
command:
|
||||
sysctl net.ipv4.ip_forward:
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- net.ipv4.ip_forward = 0
|
||||
stderr: []
|
||||
timeout: 10000
|
||||
process:
|
||||
apache2:
|
||||
running: true
|
||||
zabbix_server:
|
||||
running: true
|
||||
mariadb:
|
||||
running: true
|
||||
apache2:
|
||||
enabled: true
|
||||
running: true
|
||||
mariadb.service:
|
||||
enabled: true
|
||||
running: true
|
||||
systemd-journal-remote.socket:
|
||||
enabled: true
|
||||
running: true
|
||||
zabbix-agent:
|
||||
enabled: true
|
||||
running: true
|
||||
zabbix-server:
|
||||
enabled: true
|
||||
running: true
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.8/24
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.0.8/24
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.8/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.0.8/24
|
||||
mtu: 1500
|
||||
http:
|
||||
http://localhost/zabbix:
|
||||
status: 401
|
||||
allow-insecure: false
|
||||
no-follow-redirects: false
|
||||
timeout: 5000
|
||||
body: []
|
||||
http://s-mon.gsb.lan/zabbix:
|
||||
status: 200
|
||||
allow-insecure: false
|
||||
no-follow-redirects: false
|
||||
timeout: 5000
|
||||
body: []
|
||||
|
55
goss/s-nas.yaml
Normal file
55
goss/s-nas.yaml
Normal file
@ -0,0 +1,55 @@
|
||||
file:
|
||||
/home/wordpress:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
owner: www-data
|
||||
group: www-data
|
||||
filetype: directory
|
||||
contents: []
|
||||
package:
|
||||
file:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:5.44-3
|
||||
nfs-common:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:2.6.2-4
|
||||
nfs-kernel-server:
|
||||
installed: true
|
||||
versions:
|
||||
- 1:2.6.2-4
|
||||
addr:
|
||||
tcp://192.168.102.1:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
tcp://192.168.102.2:80:
|
||||
reachable: true
|
||||
timeout: 500
|
||||
service:
|
||||
nfs-common:
|
||||
enabled: false
|
||||
running: false
|
||||
nfs-kernel-server:
|
||||
enabled: true
|
||||
running: true
|
||||
nfs-mountd:
|
||||
enabled: true
|
||||
running: true
|
||||
nfs-server:
|
||||
enabled: true
|
||||
running: true
|
||||
nfs-utils:
|
||||
enabled: true
|
||||
running: false
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.153/24
|
||||
mtu: 1500
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.102.253/24
|
||||
mtu: 1500
|
145
goss/s-nxc.yaml
Normal file
145
goss/s-nxc.yaml
Normal file
@ -0,0 +1,145 @@
|
||||
file:
|
||||
/root/nxc:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 4096
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: directory
|
||||
contains: []
|
||||
|
||||
/root/nxc/certs:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 4096
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: directory
|
||||
contains: []
|
||||
|
||||
/root/nxc/config:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 4096
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: directory
|
||||
contains: []
|
||||
|
||||
/root/nxc/config/dynamic.yml:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
#size: 415
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/config/static.yml:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
#size: 452
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/docker-compose.yml:
|
||||
exists: true
|
||||
mode: "0644"
|
||||
#size: 2135
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/nxc-debug.sh:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 64
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/nxc-prune.sh:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 110
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/nxc-start.sh:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 34
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/root/nxc/nxc-stop.sh:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 32
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
/usr/local/bin/mkcert:
|
||||
exists: true
|
||||
mode: "0755"
|
||||
#size: 4788866
|
||||
#owner: root
|
||||
#group: root
|
||||
filetype: file
|
||||
contains: []
|
||||
|
||||
#addr:
|
||||
#tcp://s-nxc.gsb.lan:443:
|
||||
#reachable: true
|
||||
#timeout: 500
|
||||
|
||||
port:
|
||||
tcp:22:
|
||||
listening: true
|
||||
ip:
|
||||
- 0.0.0.0
|
||||
|
||||
tcp:80:
|
||||
listening: true
|
||||
ip: []
|
||||
|
||||
tcp:443:
|
||||
listening: true
|
||||
ip: []
|
||||
|
||||
#tcp:8081:
|
||||
#listening: true
|
||||
#ip:
|
||||
#- 0.0.0.0
|
||||
|
||||
interface:
|
||||
enp0s3:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.99.7/24
|
||||
mtu: 1500
|
||||
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 172.16.0.7/24
|
||||
mtu: 1500
|
||||
|
||||
http:
|
||||
https://s-nxc.gsb.lan:
|
||||
status: 200
|
||||
allow-insecure: true
|
||||
no-follow-redirects: false
|
||||
timeout: 5000
|
||||
body:
|
||||
- Nextcloud
|
@ -11,7 +11,7 @@ GITPRJ=gsb2024
|
||||
apt-get update
|
||||
apt-get install -y lighttpd git
|
||||
STOREREP="/var/www/html/gsbstore"
|
||||
|
||||
SRC="${SRC:-http://depl.sio.lan/gsbstore}"
|
||||
|
||||
GLPIREL=10.0.11
|
||||
str="wget -nc -4 https://github.com/glpi-project/glpi/releases/download/${GLPIREL}/glpi-${GLPIREL}.tgz"
|
||||
@ -50,6 +50,12 @@ str84="wget -nc -4 https://artifacts.elastic.co/downloads/beats/metricbeat/metri
|
||||
|
||||
(cat <<EOT > "${STOREREP}/getall"
|
||||
#!/bin/bash
|
||||
if [[ -z "${SRC+x}" ]]; then
|
||||
echo "erreur : variable SRC indefinie"
|
||||
echo " SRC : URL serveur deploiement"
|
||||
echo "export SRC=http://depl.sio.adm/gsbstore ; ./$0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
${str}
|
||||
${str31}
|
||||
@ -72,6 +78,7 @@ ${str81}
|
||||
${str82}
|
||||
${str83}
|
||||
${str84}
|
||||
wget -nc -4 "${SRC}/zabbix.sql.gz" -O zabbix.sql.gz
|
||||
|
||||
EOT
|
||||
)
|
||||
|
@ -5,7 +5,7 @@
|
||||
;
|
||||
$TTL 604800
|
||||
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
|
||||
2024011800 ; Serial
|
||||
2024011900 ; Serial
|
||||
7200 ; Refresh
|
||||
86400 ; Retry
|
||||
8419200 ; Expire
|
||||
@ -16,9 +16,11 @@ $TTL 604800
|
||||
@ IN A 127.0.0.1
|
||||
@ IN AAAA ::1
|
||||
s-infra IN A 172.16.0.1
|
||||
s-backup IN A 172.16.0.4
|
||||
s-proxy IN A 172.16.0.2
|
||||
s-appli IN A 172.16.0.3
|
||||
s-backup IN A 172.16.0.4
|
||||
s-stork IN A 172.16.0.4
|
||||
s-gotify IN A 172.16.0.4
|
||||
s-win IN A 172.16.0.6
|
||||
s-mess IN A 172.16.0.7
|
||||
s-nxc IN A 172.16.0.7
|
||||
|
BIN
roles/docker/tasks/glpi-10.0.11.tgz
Normal file
BIN
roles/docker/tasks/glpi-10.0.11.tgz
Normal file
Binary file not shown.
@ -7,7 +7,7 @@
|
||||
- name: on verifie si docker est installe
|
||||
stat:
|
||||
path: /usr/bin/docker
|
||||
# command: which docker
|
||||
#command: which docker
|
||||
register: docker_present
|
||||
|
||||
- name: Execution du script getdocker si docker n'est pas deja installe
|
||||
|
@ -1,9 +1,22 @@
|
||||
## Principe du rôle elk
|
||||
ELK 8.5.3
|
||||
# Le rôle elk
|
||||
ELK Version 8.5.3
|
||||
|
||||
Ce rôle permet de créer un serveur ELK pour centraliser les logs et de des métriques pour simplifier la gestion du parc informatique GSB.
|
||||
Le principe de ce rôle est d'installer docker, les différentes tâches de ce rôle sont de :
|
||||
|
||||
Ce rôle a pour but d'installer un serveur ELK pour centraliser les logs et les métriques pour simplifier la gestion du parc informatique GSB.
|
||||
|
||||
|
||||
Le rôle **elk** installe **docker**, les différentes tâches de ce rôle sont de :
|
||||
- Vérifier si ELK est déjà installé,
|
||||
- Importation un docker-compose depuis github,
|
||||
- Changement la configuration pour passer en version 'basic'
|
||||
- clonage du depot **devianthony** depuis github,
|
||||
- Changement de la configuration pour passer en version 'basic'
|
||||
- Lancement d'ELK avec docker-compose
|
||||
|
||||
## Lancement manuel
|
||||
- depuis le répertoire **nxc** :
|
||||
````shell
|
||||
docker compose up setup
|
||||
docker compose up -d
|
||||
````
|
||||
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
regexp: 'xpack.license.self_generated.type: trial'
|
||||
replace: 'xpack.license.self_generated.type: basic'
|
||||
|
||||
- name: Execution du fichier docker-compose.yml
|
||||
shell: docker compose up -d
|
||||
args:
|
||||
chdir: /root/elk
|
||||
# - name: Execution du fichier docker-compose.yml
|
||||
# shell: docker compose pull
|
||||
# args:
|
||||
# chdir: /root/elk
|
||||
|
@ -1,6 +1,76 @@
|
||||
Configuration de ferm
|
||||
|
||||
# [Ferm](http://ferm.foo-projects.org/)
|
||||
|
||||
Modifier l'execution d'iptables [plus d'info ici](https://wiki.debian.org/iptables)
|
||||
Modifier l'execution d'iptables [plus d'info ici#!/bin/bash
|
||||
set -u
|
||||
set -e
|
||||
# Version Site to Site
|
||||
|
||||
AddressAwg=10.0.0.1/32 # Adresse VPN Wireguard cote A
|
||||
EndpointA=192.168.0.51 # Adresse extremite A
|
||||
PortA=51820 # Port ecoute extremite A
|
||||
NetworkA=192.168.1.0/24 # reseau cote A
|
||||
NetworkC=192.168.200.0/24 #reseau cote A
|
||||
NetworkD=172.16.0.0/24 #reseau cote A
|
||||
|
||||
AddressBwg=10.0.0.2/32 # Adresse VPN Wireguard cote B
|
||||
EndpointB=192.168.0.52 # Adresse extremite B
|
||||
PortB=51820 # Port ecoute extremite B
|
||||
NetworkB=172.16.128.0/24 # reseau cote B
|
||||
|
||||
umask 077
|
||||
wg genkey > endpoint-a.key
|
||||
wg pubkey < endpoint-a.key > endpoint-a.pub
|
||||
|
||||
wg genkey > endpoint-b.key
|
||||
wg pubkey < endpoint-b.key > endpoint-b.pub
|
||||
|
||||
|
||||
PKA=$(cat endpoint-a.key)
|
||||
pKA=$(cat endpoint-a.pub)
|
||||
PKB=$(cat endpoint-b.key)
|
||||
pKB=$(cat endpoint-b.pub)
|
||||
|
||||
cat <<FINI > wg0-a.conf
|
||||
# local settings for Endpoint A
|
||||
[Interface]
|
||||
PrivateKey = $PKA
|
||||
Address = $AddressAwg
|
||||
ListenPort = $PortA
|
||||
|
||||
# IP forwarding
|
||||
PreUp = sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
# remote settings for Endpoint B
|
||||
[Peer]
|
||||
PublicKey = $pKB
|
||||
Endpoint = ${EndpointB}:$PortB
|
||||
AllowedIPs = $AddressBwg, $NetworkB
|
||||
|
||||
FINI
|
||||
|
||||
|
||||
cat <<FINI > wg0-b.conf
|
||||
# local settings for Endpoint B
|
||||
[Interface]
|
||||
PrivateKey = $PKB
|
||||
Address = $AddressBwg
|
||||
ListenPort = $PortB
|
||||
|
||||
# IP forwarding
|
||||
PreUp = sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
# remote settings for Endpoint A
|
||||
[Peer]
|
||||
PublicKey = $pKA
|
||||
Endpoint = ${EndpointA}:$PortA
|
||||
AllowedIPs = $AddressAwg, $NetworkA, $NetworkC, $NetworkD
|
||||
|
||||
FINI
|
||||
|
||||
echo "wg0-a.conf et wg0-b.conf sont generes ..."
|
||||
echo "copier wg0-b.conf sur la machine b et renommer les fichiers de configuration ..."](https://wiki.debian.org/iptables)
|
||||
```shell
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
```
|
||||
|
50
roles/gotify/tasks/main.yml
Normal file
50
roles/gotify/tasks/main.yml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Mise a jour apt cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Creation /etc/gotify
|
||||
ansible.builtin.file:
|
||||
path: /etc/gotify
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Creation /opt/gotify
|
||||
ansible.builtin.file:
|
||||
path: /opt/gotify
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: installation de gotify
|
||||
get_url:
|
||||
url: "https://github.com/gotify/server/releases/latest/download/gotify-linux-amd64.zip"
|
||||
dest: "/tmp/gotify.zip"
|
||||
|
||||
- name: Extraction de Gotify
|
||||
ansible.builtin.unarchive:
|
||||
src: "/tmp/gotify.zip"
|
||||
dest: "/opt/gotify"
|
||||
become: yes
|
||||
|
||||
- name: Creation du fichier systemd
|
||||
template:
|
||||
src: "gotify.service.j2"
|
||||
dest: "/etc/systemd/system/gotify.service"
|
||||
become: yes
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Creation du fichier conf gotify
|
||||
template:
|
||||
src: "config.yml.j2"
|
||||
dest: "/etc/gotify/config.yml"
|
||||
become: yes
|
||||
|
||||
- name: Demarage du gotify
|
||||
systemd:
|
||||
name: gotify
|
||||
state: started
|
||||
enabled: yes
|
||||
|
4
roles/gotify/templates/config.yml.j2
Normal file
4
roles/gotify/templates/config.yml.j2
Normal file
@ -0,0 +1,4 @@
|
||||
server:
|
||||
keepaliveperiodseconds: 0
|
||||
listenaddr: "" # the address to bind on, leave empty to bind on all addresses
|
||||
port: 8008
|
13
roles/gotify/templates/gotify.service.j2
Normal file
13
roles/gotify/templates/gotify.service.j2
Normal file
@ -0,0 +1,13 @@
|
||||
[Unit]
|
||||
Description=Gotify Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/opt/gotify/gotify-linux-amd64
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
Binary file not shown.
@ -1,66 +0,0 @@
|
||||
// This is an example of a configuration for Control-Agent (CA) listening
|
||||
// for incoming HTTP traffic. This is necessary for handling API commands,
|
||||
// in particular lease update commands needed for HA setup.
|
||||
{
|
||||
"Control-agent":
|
||||
{
|
||||
// We need to specify where the agent should listen to incoming HTTP
|
||||
// queries.
|
||||
"http-host": "172.16.64.1",
|
||||
|
||||
// This specifies the port CA will listen on.
|
||||
"http-port": 8000,
|
||||
|
||||
"control-sockets":
|
||||
{
|
||||
// This is how the Agent can communicate with the DHCPv4 server.
|
||||
"dhcp4":
|
||||
{
|
||||
"comment": "socket to DHCPv4 server",
|
||||
"socket-type": "unix",
|
||||
"socket-name": "/tm/kea4-ctrl-socket"
|
||||
},
|
||||
|
||||
// Location of the DHCPv6 command channel socket.
|
||||
# "dhcp6":
|
||||
# {
|
||||
# "socket-type": "unix",
|
||||
# "socket-name": "/tmp/kea6-ctrl-socket"
|
||||
# },
|
||||
|
||||
// Location of the D2 command channel socket.
|
||||
# "d2":
|
||||
# {
|
||||
# "socket-type": "unix",
|
||||
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
|
||||
# "user-context": { "in-use": false }
|
||||
# }
|
||||
},
|
||||
|
||||
// Similar to other Kea components, CA also uses logging.
|
||||
"loggers": [
|
||||
{
|
||||
"name": "kea-ctrl-agent",
|
||||
"output_options": [
|
||||
{
|
||||
"output": "stdout",
|
||||
|
||||
// Several additional parameters are possible in addition
|
||||
// to the typical output. Flush determines whether logger
|
||||
// flushes output to a file. Maxsize determines maximum
|
||||
// filesize before the file is rotated. maxver
|
||||
// specifies the maximum number of rotated files being
|
||||
// kept.
|
||||
"flush": true,
|
||||
"maxsize": 204800,
|
||||
"maxver": 4,
|
||||
// We use pattern to specify custom log message layout
|
||||
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
|
||||
}
|
||||
],
|
||||
"severity": "INFO",
|
||||
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
Binary file not shown.
21
roles/kea/README.md
Normal file
21
roles/kea/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Rôle Kea
|
||||
***
|
||||
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
|
||||
|
||||
## Tables des matières
|
||||
1. [Que fait le rôle Kea ?]
|
||||
2. [Installation et configuration de ka]
|
||||
3. [Remarques]
|
||||
|
||||
|
||||
## Que fait le rôle Kea ?
|
||||
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
|
||||
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
|
||||
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
|
||||
|
||||
### Installation et configuration de kea
|
||||
|
||||
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
|
||||
|
||||
### Remarquees ###
|
||||
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.
|
@ -6,7 +6,7 @@
|
||||
{
|
||||
// We need to specify where the agent should listen to incoming HTTP
|
||||
// queries.
|
||||
"http-host": "172.16.64.1",
|
||||
"http-host": "172.16.0.20",
|
||||
|
||||
// This specifies the port CA will listen on.
|
||||
"http-port": 8000,
|
||||
@ -18,7 +18,7 @@
|
||||
{
|
||||
"comment": "socket to DHCPv4 server",
|
||||
"socket-type": "unix",
|
||||
"socket-name": "/tm/kea4-ctrl-socket"
|
||||
"socket-name": "/tmp/kea4-ctrl-socket"
|
||||
},
|
||||
|
||||
// Location of the DHCPv6 command channel socket.
|
12
roles/kea/handlers/main.yml
Normal file
12
roles/kea/handlers/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Restart isc-kea-dhcp4-server
|
||||
ansible.builtin.service:
|
||||
name: isc-kea-dhcp4-server.service
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: Restart isc-kea-ctrl-agent
|
||||
ansible.builtin.service:
|
||||
name: isc-kea-ctrl-agent.service
|
||||
state: restarted
|
||||
enabled: yes
|
43
roles/kea/tasks/main.yml
Normal file
43
roles/kea/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
|
||||
- name: Preparation
|
||||
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/kea-2-4/setup.deb.sh' | sudo -E bash
|
||||
|
||||
- name: Update apt
|
||||
ansible.builtin.apt:
|
||||
update_cache: yes
|
||||
|
||||
#- name: Installation paquet isc-kea-common
|
||||
# ansible.builtin.apt:
|
||||
# deb: isc-kea-common
|
||||
# state: present
|
||||
|
||||
- name: Installation isc-kea-dhcp4
|
||||
ansible.builtin.apt:
|
||||
name: isc-kea-dhcp4-server
|
||||
state: present
|
||||
|
||||
- name: Installation isc-kea-ctrl-agent
|
||||
ansible.builtin.apt:
|
||||
name: isc-kea-ctrl-agent
|
||||
state: present
|
||||
|
||||
- name: Installation isc-kea-hooks
|
||||
ansible.builtin.apt:
|
||||
name: isc-kea-hooks
|
||||
state: present
|
||||
|
||||
- name: Generation ---- du fichier de configuration kea-ctrl-agent
|
||||
ansible.builtin.template:
|
||||
src: kea-ctrl-agent.conf.j2
|
||||
dest: /etc/kea/kea-ctrl-agent.conf
|
||||
notify:
|
||||
- Restart isc-kea-ctrl-agent
|
||||
|
||||
- name: Generation du fichier de configuration kea-dhcp4.conf
|
||||
ansible.builtin.template:
|
||||
src: kea-dhcp4.conf.j2
|
||||
dest: /etc/kea/kea-dhcp4.conf
|
||||
notify:
|
||||
- Restart isc-kea-dhcp4-server
|
||||
|
32
roles/kea/templates/kea-ctrl-agent.conf.j2
Normal file
32
roles/kea/templates/kea-ctrl-agent.conf.j2
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"Control-agent":
|
||||
{
|
||||
"http-host": "{{ kea_ctrl_address_this }}",
|
||||
"http-port": 8000,
|
||||
"control-sockets":
|
||||
{
|
||||
"dhcp4":
|
||||
{
|
||||
"socket-type": "unix",
|
||||
"socket-name": "/tmp/kea4-ctrl-socket"
|
||||
},
|
||||
},
|
||||
|
||||
"loggers": [
|
||||
{
|
||||
"name": "kea-ctrl-agent",
|
||||
"output_options": [
|
||||
{
|
||||
"output": "stdout",
|
||||
"flush": true,
|
||||
"maxsize": 204800,
|
||||
"maxver": 4,
|
||||
{% raw %} "pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n", {% endraw %}
|
||||
}
|
||||
],
|
||||
"severity": "INFO",
|
||||
"debuglevel": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -22,7 +22,7 @@
|
||||
// The DHCPv4 server listens on this interface. When changing this to
|
||||
// the actual name of your interface, make sure to also update the
|
||||
// interface parameter in the subnet definition below.
|
||||
"interfaces": [ "enp0s8" ]
|
||||
"interfaces": ["{{ kea_dhcp_int }}"]
|
||||
},
|
||||
|
||||
// Control socket is required for communication between the Control
|
||||
@ -76,19 +76,19 @@
|
||||
// deliver lease updates to the server as well as synchronize the
|
||||
// lease database after failure.
|
||||
{
|
||||
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
|
||||
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_lease_cmds.so"
|
||||
},
|
||||
|
||||
{
|
||||
// The HA hook library should be loaded.
|
||||
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
|
||||
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_ha.so",
|
||||
"parameters": {
|
||||
// Each server should have the same HA configuration, except for the
|
||||
// "this-server-name" parameter.
|
||||
"high-availability": [ {
|
||||
// This parameter points to this server instance. The respective
|
||||
// HA peers must have this parameter set to their own names.
|
||||
"this-server-name": "kea1",
|
||||
"this-server-name": "{{ kea_this_server }}",
|
||||
// The HA mode is set to hot-standby. In this mode, the active server handles
|
||||
// all the traffic. The standby takes over if the primary becomes unavailable.
|
||||
"mode": "hot-standby",
|
||||
@ -116,24 +116,24 @@
|
||||
"peers": [
|
||||
// This is the configuration of this server instance.
|
||||
{
|
||||
"name": "kea1",
|
||||
"name": "{{ kea_srv1 }}",
|
||||
// This specifies the URL of this server instance. The
|
||||
// Control Agent must run along with this DHCPv4 server
|
||||
// instance and the "http-host" and "http-port" must be
|
||||
// set to the corresponding values.
|
||||
"url": "http://172.16.64.1:8000/",
|
||||
"url": "http://{{ kea_ctrl_address1 }}:8000/",
|
||||
// This server is primary. The other one must be
|
||||
// secondary.
|
||||
"role": "primary"
|
||||
},
|
||||
// This is the configuration of the secondary server.
|
||||
{
|
||||
"name": "kea2",
|
||||
"name": "{{ kea_srv2 }}",
|
||||
// Specifies the URL on which the partner's control
|
||||
// channel can be reached. The Control Agent is required
|
||||
// to run on the partner's machine with "http-host" and
|
||||
// "http-port" values set to the corresponding values.
|
||||
"url": "http://172.16.64.2:8000/",
|
||||
"url": "http://{{ kea_ctrl_address2 }}:8000/",
|
||||
// The other server is secondary. This one must be
|
||||
// primary.
|
||||
"role": "standby"
|
||||
@ -152,7 +152,7 @@
|
||||
|
||||
// There are no relays in this network, so we need to tell Kea that this subnet
|
||||
// is reachable directly via the specified interface.
|
||||
"interface": "enp0s8",
|
||||
"interface": "enp0s9",
|
||||
|
||||
// Specify a dynamic address pool.
|
||||
"pools": [
|
||||
@ -171,7 +171,7 @@
|
||||
{
|
||||
// For each IPv4 subnet you typically need to specify at least one router.
|
||||
"name": "routers",
|
||||
"data": "172.16.64.1"
|
||||
"data": "172.16.64.254"
|
||||
},
|
||||
{
|
||||
// Using cloudflare or Quad9 is a reasonable option. Change this
|
||||
@ -179,7 +179,7 @@
|
||||
// choice is 8.8.8.8, owned by Google. Using third party DNS
|
||||
// service raises some privacy concerns.
|
||||
"name": "domain-name-servers",
|
||||
"data": "172.16.64.1"
|
||||
"data": "172.16.0.1"
|
||||
}
|
||||
],
|
||||
|
10
roles/lb-bd/README.md
Normal file
10
roles/lb-bd/README.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Role lb-bd
|
||||
***
|
||||
Rôle lb-bd pour la mise en place de la base de données du serveur WordPress.
|
||||
|
||||
## Tables des matières
|
||||
1. Que fait le rôle lb-bd ?
|
||||
|
||||
|
||||
## Que fait le rôle lb-bd ?
|
||||
Ce rôle installe le paquet `mariadb-server` puis créé et configure la base de données nommée **wordpressdb** en ouvrant le port 3306 et en créant l'utilisateur MySQL nommé **wordpressuser** avec le mot de passe **wordpresspasswd**.
|
22
roles/lb-front/README.md
Normal file
22
roles/lb-front/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Rôle lb-front
|
||||
***
|
||||
Rôle lb-front pour la répartition de charge des serveurs web sur WordPress avec HAProxy
|
||||
|
||||
## Tables des matières
|
||||
1. Que fait le rôle lb-front ?
|
||||
2. Ordre d'installation des serveurs.
|
||||
|
||||
|
||||
## Que fait le rôle lb-front ?
|
||||
|
||||
Le rôle lb-front va installer `haproxy` pour le load balancing/la répartition de charge et va configurer le fichier `/etc/haproxy/haproxy.cfg`.
|
||||
|
||||
le fichier va faire du Round-Robin, un algoritme qui va équilibrer le nombre de requêtes entre s-lb-web1 et s-lb-web2.
|
||||
|
||||
le site web est accessibe à l'adresse <http://s-lb.gsb.adm>.
|
||||
|
||||
## Ordre d'installation des serveurs.
|
||||
1. Le serveur s-lb avec haproxy qui va "initialiser" les sous-réseaux dans la DMZ.
|
||||
2. Le serveur s-lb-bd qui va contenir la base de données WordPress utilisée par les serveurs web.
|
||||
3. Le serveur s-nas qui va stocker la configuration WordPress et la partager aux serveurs web en NFS. Il va aussi installer la base de données sur s-lb-bd.
|
||||
4. Les serveurs s-web1 et s-web2 qui vont installer Apache2, PHP et afficher le serveur WordPress.
|
@ -1,23 +0,0 @@
|
||||
port:
|
||||
tcp:80:
|
||||
listening: true
|
||||
ip:
|
||||
- 192.168.100.11
|
||||
service:
|
||||
haproxy:
|
||||
enabled: true
|
||||
running: true
|
||||
sshd:
|
||||
enabled: true
|
||||
running: true
|
||||
interface:
|
||||
enp0s8:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.100.11/24
|
||||
mtu: 1500
|
||||
enp0s9:
|
||||
exists: true
|
||||
addrs:
|
||||
- 192.168.101.254/24
|
||||
mtu: 1500
|
@ -41,7 +41,7 @@ frontend proxypublic
|
||||
backend fermeweb
|
||||
balance roundrobin
|
||||
option httpclose
|
||||
#option httpchk HEAD / HTTP/1.0
|
||||
option httpchk HEAD / HTTP/1.0
|
||||
server s-lb-web1 192.168.101.1:80 check
|
||||
server s-lb-web2 192.168.101.2:80 check
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
backend fermeweb
|
||||
balance roundrobin
|
||||
option httpclose
|
||||
#option httpchk HEAD / HTTP/1.0
|
||||
option httpchk HEAD / HTTP/1.0
|
||||
server s-lb-web1 192.168.101.1:80 check
|
||||
server s-lb-web2 192.168.101.2:80 check
|
||||
|
||||
|
@ -1,3 +1,10 @@
|
||||
##Partage NFS
|
||||
|
||||
Ce rôle sert à installer nfs et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.
|
||||
# Rôle lb-nfs-client
|
||||
***
|
||||
Rôle lb-nfs-client pour l'accès au serveur NFS sur les serveurs lb-web1 et lb-web2.
|
||||
|
||||
## Tables des matières
|
||||
1. Que fait le rôle lb-nfs-client ?
|
||||
|
||||
|
||||
## Que fait le rôle lb-nfs-client ?
|
||||
Ce rôle sert à installer le paquet `nfs-common` et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.
|
||||
|
@ -1,10 +1,17 @@
|
||||
# Role s-nas-server
|
||||
## Installation de nfs-server et mise en oeuvre du partage /home/wordpress
|
||||
|
||||
# Role lb-nfs-server
|
||||
***
|
||||
Rôle lb-nfs-server pour la mise en place du partage des fichiers de configuration de WordPress.
|
||||
|
||||
## Tables des matières
|
||||
1. Que fait le rôle lb-nfs-server ?
|
||||
|
||||
|
||||
## Que fait le rôle lb-nfs-server ?
|
||||
Ce rôle :
|
||||
* installe **nfs-server**
|
||||
* installe le paquet `nfs-server`
|
||||
* copie le fichier de configuration **exports** pour exporter le répertoire **/home/wordpress**
|
||||
* relance le service **nfs-server**
|
||||
* décompresse wordpress
|
||||
### Objectif
|
||||
Le répertoire **/home/wordpress** est exporté par **nfs** sur le réseau **n-dmz-db**
|
||||
* décompresse WordPress dans **/home/wordpress**
|
||||
* relance le service `nfs-server`
|
||||
* Configure l'accès de WordPress à la base de données dans le fichier `wp-config.php`
|
||||
|
||||
Le répertoire **/home/wordpress** est exporté par NFS dans le sous-réseau **n-dmz-db**
|
||||
|
@ -1,3 +1,12 @@
|
||||
##Téléchargement et configuration de WordPress
|
||||
|
||||
Ce rôle télécharge wordpress depuis s-adm puis configure le fichier wp-config.php pour la situation du gsb.
|
||||
# Rôle lb-web
|
||||
***
|
||||
Rôle lb-web pour l'affichage et l'utilisation du site web.
|
||||
|
||||
## Tables des matières
|
||||
1. Que fait le rôle lb-web ?
|
||||
|
||||
|
||||
## Que fait le rôle lb-web ?
|
||||
Ce rôle télécharge les paquets nécessaires au fonctionnement du site web (`apache2`, `php` et `mariadb-client`) qui permetront aux serveurs web d'accerder a la base de données de WordPress.
|
||||
|
||||
Le site web est accessibe à l'adresse http://s-lb.gsb.adm.
|
||||
|
@ -1,8 +1,16 @@
|
||||
# Installation de Nextcloud et du proxy inverse Traefik
|
||||
|
||||
Nextcloud et Traefik fonctionnent grâce à docker. Pour pouvoir faire fonctionner ce playbook, docker doit être installé.
|
||||
## Explication de l'installation de Nextcloud
|
||||
Afin de pouvoir faire fonctionner Nextcloud et Traefik, il faut mettre en place docker. Dans un premier plan, il vas donc falloir lancer le script **getall** sur **s-adm**. Ensuite dans un second temps, il faudra etre dans le fichier **/nxc** sur **s-nxc** et lancer **docker-compose.yaml**. Pour finir, il faudra ajouter l'authentification LDAP au nextcloud grace a l'AD de **s-win**.
|
||||
|
||||
## 1.
|
||||
# <p align="center">Procédure d'installation</p>
|
||||
|
||||
***
|
||||
## 1. Installation docker
|
||||
|
||||
Voir: https://gitea.lyc-lecastel.fr/gsb/gsb2024/src/branch/main/roles/docker
|
||||
|
||||
## 2. Fonctionnement du playbook s-nxc
|
||||
|
||||
Le playbook crée le dossier **nxc** à la racine de root.
|
||||
|
||||
@ -10,11 +18,11 @@ Les fichiers "nextcloud.yml" et "traefik.yml" y seront copiés depuis le répert
|
||||
|
||||
Enfin, dans le répertoire nxc, sont créés les répertoires **certs** et **config**.
|
||||
|
||||
## 2. Copie des fichiers
|
||||
### 2.1 Copie des fichiers
|
||||
|
||||
Le playbook copie les fichiers placés dans "files" et les placer dans les bons répertoires.
|
||||
Le playbook copie les fichiers placés dans "files" et les places dans les bons répertoires.
|
||||
|
||||
## 3. Génération du certificat
|
||||
### 2.2 Génération du certificat
|
||||
|
||||
Le playbook crée un certificat **x509** grâce à **mkcert**, il s'agit d'une solution permettant de créer des certificats auto-signés. Pour cela, il télécharge **mkcert** sur **s-adm** (utiliser le script **getall**).
|
||||
|
||||
@ -25,7 +33,7 @@ Pour créer le certificat, le playbook exécute les commandes (lancé depuis nxc
|
||||
/usr/local/bin/mkcert -install # Installe mkcert
|
||||
/usr/local/bin/mkcert -key-file key.pem -cert-file cert.pem "hôte.domaine.local" "*.domaine.local" #Crée le certificat le DNS spécifié
|
||||
```
|
||||
## 4. Lancement
|
||||
## 3. Lancement
|
||||
|
||||
Le playbook lance les fichiers "docker-compose" à savoir : nextcloud.yml et traefik.yml qui démarrent les deux piles **docker**.
|
||||
|
||||
@ -37,22 +45,28 @@ ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin
|
||||
Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan
|
||||
|
||||
|
||||
## 5. Ajout authentification LDAP
|
||||
## 4. Ajout authentification LDAP
|
||||
|
||||
Pour ajouter l'authentification LDAP au Nextcloud, il faut :
|
||||
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et Application
|
||||
Pour ajouter l'authentification LDAP au Nextcloud, depuis **n-user** il faut :
|
||||
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et "Application"
|
||||
* Dans vos applications, descendre et activer "LDAP user and group backend"
|
||||
* Puis cliquer sur le profil, puis Paramètres d'administration et dans Administration cliquer sur Intégration LDAP/AD
|
||||
* Puis cliquer sur le profil, puis "Paramètres d'administration" et dans "Administration" cliquer sur "Intégration LDAP/AD"
|
||||
* Une fois sur la page d'intégration LDAP/AD :
|
||||
* Dans Hôte mettre :
|
||||
> ldap://s-win.gsb.lan
|
||||
* Cliquer sur Détecter le port (normalement le port 389 apparait)
|
||||
> **ldap://s-win.gsb.lan**
|
||||
* Cliquer sur "Détecter le port" (normalement le port 389 apparait)
|
||||
* Dans DN Utilisateur mettre :
|
||||
> CN=nextcloud,CN=Users,DC=GSB,DC=LAN
|
||||
> **CN=nextcloud,CN=Users,DC=gsb,DC=lan**
|
||||
* Mot de passe :
|
||||
> Azerty1+
|
||||
* Et dans Un DN de base par ligne :
|
||||
> DC=GSB,DC=LAN
|
||||
* Après la configuration passe OK
|
||||
* Une fois la configuration finie, cliquer 3 fois sur continuer
|
||||
* Une fois arrivé sur Groupes, vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.
|
||||
> **Azerty1+**
|
||||
* Et dans "Un DN de base par ligne" :
|
||||
> **DC=gsb,DC=lan**
|
||||
* Cliquer sur "Détecter le DN de base" (normalement il apparaitra automatiquement)
|
||||
* Après la configuration réaliser, cliquer sur "Continuer", puis cliquer 3 fois sur continuer
|
||||
* Une fois arrivé sur "Groupes", vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.
|
||||
|
||||
## Contributeurs
|
||||
|
||||
- LG
|
||||
- CH
|
||||
|
||||
|
@ -53,8 +53,8 @@ services:
|
||||
image: nextcloud
|
||||
container_name: app
|
||||
restart: always
|
||||
ports:
|
||||
- 8081:80
|
||||
#ports:
|
||||
#- 8081:80
|
||||
#links:
|
||||
depends_on:
|
||||
- db
|
||||
|
22
roles/nxc-traefik/files/savenextcloud.sh
Normal file
22
roles/nxc-traefik/files/savenextcloud.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Mettre le serveur NextCloud en mode maintenance
|
||||
docker compose exec -u www-data app php occ maintenance:mode --on
|
||||
|
||||
# Extraire les dossiers de sauvegarde
|
||||
cd /root/nxc
|
||||
|
||||
# Copie locale de la sauvegarde
|
||||
rsync -Aavx nextcloud/ nextcloud-dirbkp/
|
||||
|
||||
# Base de données MySQL/MariaDB
|
||||
docker compose exec db mysqldump -u nextcloud -pAzerty1+ nextcloud > nextcloud-sqlbkp.bak
|
||||
|
||||
# Sortir du mode maintenance
|
||||
docker compose exec -u www-data app php occ maintenance:mode --off
|
||||
|
||||
# création d'une archive
|
||||
tar cvfz nxc.tgz nextcloud-sqlbkp.bak nextcloud-dirbkp
|
||||
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
// The DHCPv4 server listens on this interface. When changing this to
|
||||
// the actual name of your interface, make sure to also update the
|
||||
// interface parameter in the subnet definition below.
|
||||
"interfaces": [ "enp0s8" ]
|
||||
"interfaces": [ "enp0s9" ]
|
||||
},
|
||||
|
||||
// Control socket is required for communication between the Control
|
||||
@ -88,7 +88,7 @@
|
||||
"high-availability": [ {
|
||||
// This parameter points to this server instance. The respective
|
||||
// HA peers must have this parameter set to their own names.
|
||||
"this-server-name": "kea1",
|
||||
"this-server-name": "s-kea1.gsb.lan",
|
||||
// The HA mode is set to hot-standby. In this mode, the active server handles
|
||||
// all the traffic. The standby takes over if the primary becomes unavailable.
|
||||
"mode": "hot-standby",
|
||||
@ -116,24 +116,24 @@
|
||||
"peers": [
|
||||
// This is the configuration of this server instance.
|
||||
{
|
||||
"name": "kea1",
|
||||
"name": "s-kea1.gsb.lan",
|
||||
// This specifies the URL of this server instance. The
|
||||
// Control Agent must run along with this DHCPv4 server
|
||||
// instance and the "http-host" and "http-port" must be
|
||||
// set to the corresponding values.
|
||||
"url": "http://172.16.64.1:8000/",
|
||||
"url": "http://172.16.64.20:8000/",
|
||||
// This server is primary. The other one must be
|
||||
// secondary.
|
||||
"role": "primary"
|
||||
},
|
||||
// This is the configuration of the secondary server.
|
||||
{
|
||||
"name": "kea2",
|
||||
"name": "s-kea2.gsb.lan",
|
||||
// Specifies the URL on which the partner's control
|
||||
// channel can be reached. The Control Agent is required
|
||||
// to run on the partner's machine with "http-host" and
|
||||
// "http-port" values set to the corresponding values.
|
||||
"url": "http://172.16.64.2:8000/",
|
||||
"url": "http://172.16.64.21:8000/",
|
||||
// The other server is secondary. This one must be
|
||||
// primary.
|
||||
"role": "standby"
|
||||
@ -152,7 +152,7 @@
|
||||
|
||||
// There are no relays in this network, so we need to tell Kea that this subnet
|
||||
// is reachable directly via the specified interface.
|
||||
"interface": "enp0s8",
|
||||
"interface": "enp0s9",
|
||||
|
||||
// Specify a dynamic address pool.
|
||||
"pools": [
|
||||
@ -171,7 +171,7 @@
|
||||
{
|
||||
// For each IPv4 subnet you typically need to specify at least one router.
|
||||
"name": "routers",
|
||||
"data": "172.16.64.1"
|
||||
"data": "172.16.64.254"
|
||||
},
|
||||
{
|
||||
// Using cloudflare or Quad9 is a reasonable option. Change this
|
||||
@ -179,7 +179,7 @@
|
||||
// choice is 8.8.8.8, owned by Google. Using third party DNS
|
||||
// service raises some privacy concerns.
|
||||
"name": "domain-name-servers",
|
||||
"data": "172.16.64.1"
|
||||
"data": "172.16.0.1"
|
||||
}
|
||||
],
|
||||
|
8
roles/old/kea-slave/default/main.yml
Normal file
8
roles/old/kea-slave/default/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
#variable kea
|
||||
kea_ver: "2.4.1"
|
||||
kea_dbname: ""
|
||||
kaa_dbuser: ""
|
||||
kea_dbpasswd: ""
|
||||
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
|
||||
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"
|
||||
|
10
roles/smb-backup/files/backupnxc.sh
Normal file
10
roles/smb-backup/files/backupnxc.sh
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# envoie sur s-backup
|
||||
BACKUP=/home/backup/s-nxc
|
||||
|
||||
# Préparation des dossiers qui vont accueillir les données à sauvegarder (-e lance le répertoire si il existe)
|
||||
[[ -e "${BACKUP}" ]] || mkdir -p "${BACKUP}"
|
||||
|
||||
# Sauvegarde du fichier nxc.tgz vers la machine s-backup
|
||||
scp -i ~/.ssh/id_rsa_sbackup root@s-nxc.gsb.adm:/root/nxc/nxc.tgz "${BACKUP}/"
|
@ -14,6 +14,14 @@
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: copie script backupnxc dans /usr/local/bin
|
||||
copy:
|
||||
src: backupnxc.sh
|
||||
dest: /usr/local/bin
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: crontab backupsmb ( commentee par defaut )
|
||||
cron:
|
||||
name: backupsmb
|
||||
|
1
roles/ssh-backup-key-gen/README.md
Normal file
1
roles/ssh-backup-key-gen/README.md
Normal file
@ -0,0 +1 @@
|
||||
###Génération de clé publique et privée###
|
20
roles/ssh-backup-key-gen/tasks/main.yml
Normal file
20
roles/ssh-backup-key-gen/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: on genere une cle privee pour s-backup
|
||||
openssh_keypair:
|
||||
path: /root/id_rsa_sbackup
|
||||
type: rsa
|
||||
state: present
|
||||
|
||||
- name: copie cle publique dans gsbstore
|
||||
copy:
|
||||
src: /root/id_rsa_sbackup.pub
|
||||
dest: /var/www/html/gsbstore
|
||||
mode: 0644
|
||||
remote_src: yes
|
||||
|
||||
- name: copie cle privee dans gsbstore
|
||||
copy:
|
||||
src: /root/id_rsa_sbackup
|
||||
dest: /var/www/html/gsbstore
|
||||
mode: 0600
|
||||
remote_src: yes
|
13
roles/ssh-backup-key-private/tasks/main.yml
Normal file
13
roles/ssh-backup-key-private/tasks/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: creation .ssh
|
||||
file:
|
||||
path: ~/.ssh
|
||||
state: directory
|
||||
mode: 0700
|
||||
|
||||
- name: recuperation de la cle privee generee par s-adm
|
||||
get_url:
|
||||
url: http://s-adm.gsb.adm/gsbstore/id_rsa_sbackup
|
||||
dest: /root/.ssh/id_rsa_sbackup
|
||||
mode: 0600
|
||||
|
6
roles/ssh-backup-key-pub/tasks/main.yml
Normal file
6
roles/ssh-backup-key-pub/tasks/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: recuperation de la cle publique generee par s-adm
|
||||
ansible.posix.authorized_key:
|
||||
user: root
|
||||
state: present
|
||||
key: http://s-adm.gsb.adm/gsbstore/id_rsa_sbackup.pub
|
21
roles/stork-agent/README.md
Normal file
21
roles/stork-agent/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Rôle Kea
|
||||
***
|
||||
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
|
||||
|
||||
## Tables des matières
|
||||
1. [Que fait le rôle Kea ?]
|
||||
2. [Installation et configuration de ka]
|
||||
3. [Remarques]
|
||||
|
||||
|
||||
## Que fait le rôle Kea ?
|
||||
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
|
||||
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
|
||||
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
|
||||
|
||||
### Installation et configuration de kea
|
||||
|
||||
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
|
||||
|
||||
### Remarquees ###
|
||||
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.
|
7
roles/stork-agent/handlers/main.yml
Normal file
7
roles/stork-agent/handlers/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Restart isc-stork-agent
|
||||
ansible.builtin.service:
|
||||
name: isc-stork-agent.service
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
21
roles/stork-agent/tasks/main.yml
Normal file
21
roles/stork-agent/tasks/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
|
||||
- name: Preparation
|
||||
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.deb.sh' | sudo bash
|
||||
|
||||
- name: Update apt
|
||||
ansible.builtin.apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Installation isc-stork-agent
|
||||
ansible.builtin.apt:
|
||||
name: isc-stork-agent
|
||||
state: present
|
||||
|
||||
- name: Generation du fichier de configuration agent.env
|
||||
ansible.builtin.template:
|
||||
src: agent.env.j2
|
||||
dest: /etc/stork/agent.env
|
||||
notify:
|
||||
- Restart isc-stork-agent
|
||||
|
45
roles/stork-agent/templates/agent.env.j2
Normal file
45
roles/stork-agent/templates/agent.env.j2
Normal file
@ -0,0 +1,45 @@
|
||||
### the IP or hostname to listen on for incoming Stork server connections
|
||||
STORK_AGENT_HOST={{ stork_host }}
|
||||
|
||||
### the TCP port to listen on for incoming Stork server connections
|
||||
STORK_AGENT_PORT={{ stork_port }}
|
||||
|
||||
### listen for commands from the Stork server only, but not for Prometheus requests
|
||||
# STORK_AGENT_LISTEN_STORK_ONLY=true
|
||||
|
||||
### listen for Prometheus requests only, but not for commands from the Stork server
|
||||
# STORK_AGENT_LISTEN_PROMETHEUS_ONLY=true
|
||||
|
||||
### settings for exporting stats to Prometheus
|
||||
### the IP or hostname on which the agent exports Kea statistics to Prometheus
|
||||
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_ADDRESS=
|
||||
### the port on which the agent exports Kea statistics to Prometheus
|
||||
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_PORT=
|
||||
### how often the agent collects stats from Kea, in seconds
|
||||
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_INTERVAL=
|
||||
## enable or disable collecting per-subnet stats from Kea
|
||||
# STORK_AGENT_PROMETHEUS_KEA_EXPORTER_PER_SUBNET_STATS=true
|
||||
### the IP or hostname on which the agent exports BIND 9 statistics to Prometheus
|
||||
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_ADDRESS=
|
||||
### the port on which the agent exports BIND 9 statistics to Prometheus
|
||||
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_PORT=
|
||||
### how often the agent collects stats from BIND 9, in seconds
|
||||
# STORK_AGENT_PROMETHEUS_BIND9_EXPORTER_INTERVAL=
|
||||
|
||||
### Stork Server URL used by the agent to send REST commands to the server during agent registration
|
||||
STORK_AGENT_SERVER_URL=http://s-backup.gsb.lan:8080/
|
||||
|
||||
### skip TLS certificate verification when the Stork Agent connects
|
||||
### to Kea over TLS and Kea uses self-signed certificates
|
||||
# STORK_AGENT_SKIP_TLS_CERT_VERIFICATION=true
|
||||
|
||||
|
||||
### Logging parameters
|
||||
|
||||
### Set logging level. Supported values are: DEBUG, INFO, WARN, ERROR
|
||||
# STORK_LOG_LEVEL=DEBUG
|
||||
### disable output colorization
|
||||
# CLICOLOR=false
|
||||
|
||||
### path to the hook directory
|
||||
# STORK_AGENT_HOOK_DIRECTORY=
|
21
roles/stork-server/README.md
Normal file
21
roles/stork-server/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Rôle Kea
|
||||
***
|
||||
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
|
||||
|
||||
## Tables des matières
|
||||
1. [Que fait le rôle Kea ?]
|
||||
2. [Installation et configuration de ka]
|
||||
3. [Remarques]
|
||||
|
||||
|
||||
## Que fait le rôle Kea ?
|
||||
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
|
||||
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
|
||||
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
|
||||
|
||||
### Installation et configuration de kea
|
||||
|
||||
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
|
||||
|
||||
### Remarquees ###
|
||||
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.
|
8
roles/stork-server/default/main.yml
Normal file
8
roles/stork-server/default/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
#variable kea
|
||||
kea_ver: "2.4.1"
|
||||
kea_dbname: ""
|
||||
kaa_dbuser: ""
|
||||
kea_dbpasswd: ""
|
||||
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
|
||||
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"
|
||||
|
6
roles/stork-server/handlers/main.yml
Normal file
6
roles/stork-server/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Restart isc-stork-server.service
|
||||
ansible.builtin.service:
|
||||
name: isc-stork-server.service
|
||||
state: restarted
|
||||
enabled: yes
|
31
roles/stork-server/tasks/main.yml
Normal file
31
roles/stork-server/tasks/main.yml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
|
||||
- name: Preparation
|
||||
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.deb.sh' | sudo bash
|
||||
|
||||
- name: Update apt
|
||||
ansible.builtin.apt:
|
||||
update_cache: yes
|
||||
|
||||
#- name: Installation paquet isc-kea-common
|
||||
# ansible.builtin.apt:
|
||||
# deb: isc-kea-common
|
||||
# state: present
|
||||
|
||||
- name: Installation isc-stork-server postgresql
|
||||
ansible.builtin.apt:
|
||||
pkg:
|
||||
- isc-stork-server
|
||||
- postgresql-15
|
||||
|
||||
- name: lancer la commande de création de la base de donnees stork
|
||||
ansible.builtin.shell: su postgres --command "stork-tool db-create --db-name {{ stork_db_name }} --db-user {{ stork_db_user }} --db-password {{ stork_db_passwd }}"
|
||||
|
||||
- name: Generation ---- du fichier de configuration server.env
|
||||
ansible.builtin.template:
|
||||
src: server.env.j2
|
||||
dest: /etc/stork/server.env
|
||||
notify:
|
||||
- Restart isc-stork-server.service
|
||||
|
||||
|
52
roles/stork-server/templates/server.env.j2
Normal file
52
roles/stork-server/templates/server.env.j2
Normal file
@ -0,0 +1,52 @@
|
||||
### database settings
|
||||
### the address of a PostgreSQL database
|
||||
STORK_DATABASE_HOST=localhost
|
||||
### the port of a PostgreSQL database
|
||||
STORK_DATABASE_PORT=5432
|
||||
### the name of a database
|
||||
STORK_DATABASE_NAME={{ stork_db_name }}
|
||||
### the username for connecting to the database
|
||||
STORK_DATABASE_USER_NAME={{ stork_db_user }}
|
||||
### the SSL mode for connecting to the database
|
||||
### possible values: disable, require, verify-ca, or verify-full
|
||||
# STORK_DATABASE_SSLMODE=
|
||||
### the location of the SSL certificate used by the server to connect to the database
|
||||
# STORK_DATABASE_SSLCERT=
|
||||
### the location of the SSL key used by the server to connect to the database
|
||||
# STORK_DATABASE_SSLKEY=
|
||||
### the location of the root certificate file used to verify the database server's certificate
|
||||
# STORK_DATABASE_SSLROOTCERT=
|
||||
### the password for the username connecting to the database
|
||||
### empty password is set to avoid prompting a user for database password
|
||||
STORK_DATABASE_PASSWORD={{stork_db_passwd }}
|
||||
|
||||
### REST API settings
|
||||
### the IP address on which the server listens
|
||||
# STORK_REST_HOST=
|
||||
### the port number on which the server listens
|
||||
# STORK_REST_PORT=
|
||||
### the file with a certificate to use for secure connections
|
||||
# STORK_REST_TLS_CERTIFICATE=
|
||||
### the file with a private key to use for secure connections
|
||||
# STORK_REST_TLS_PRIVATE_KEY=
|
||||
### the certificate authority file used for mutual TLS authentication
|
||||
# STORK_REST_TLS_CA_CERTIFICATE=
|
||||
### the directory with static files served in the UI
|
||||
STORK_REST_STATIC_FILES_DIR=/usr/share/stork/www
|
||||
### the base URL of the UI - to be used only if the UI is served from a subdirectory
|
||||
# STORK_REST_BASE_URL=
|
||||
|
||||
### enable Prometheus /metrics HTTP endpoint for exporting metrics from
|
||||
### the server to Prometheus. It is recommended to secure this endpoint
|
||||
### (e.g. using HTTP proxy).
|
||||
# STORK_SERVER_ENABLE_METRICS=true
|
||||
|
||||
### Logging parameters
|
||||
|
||||
### Set logging level. Supported values are: DEBUG, INFO, WARN, ERROR
|
||||
# STORK_LOG_LEVEL=DEBUG
|
||||
### disable output colorization
|
||||
# CLICOLOR=false
|
||||
|
||||
### path to the hook directory
|
||||
# STORK_SERVER_HOOK_DIRECTORY=
|
@ -24,7 +24,10 @@ bash r-vp1-post.sh
|
||||
```
|
||||
## Sur **r-vp2**:
|
||||
|
||||
Lancer le script r-vp2-post.sh pour récuperer le fichier de configuration et activer l'interface wg0.
|
||||
Lancer le playbook : *ansible-playbook -i localhost, -c local* r-vp2.yml sur **r-vp2**
|
||||
|
||||
Puis lancer le script r-vp2-post.sh pour récuperer le fichier de configuration et activer l'interface wg0.
|
||||
|
||||
### 🛠️ Lancer le script
|
||||
```bash
|
||||
cd /tools/ansible/gsb2023/Scripts
|
||||
@ -34,7 +37,11 @@ bash r-vp2-post.sh
|
||||
```
|
||||
## Fin
|
||||
|
||||
redemarer les machines
|
||||
Pour finir redemarer les machines.
|
||||
```bash
|
||||
reboot
|
||||
```
|
||||
Veuillez maintenant vous rendre dans le dossier du role ferm :
|
||||
*gsb2024/roles/fw-ferm*
|
||||
|
||||
*Modification : jm*
|
@ -1,2 +1,3 @@
|
||||
SERVER: "127.0.0.1"
|
||||
SERVERACTIVE: "172.16.0.8"
|
||||
SERVERACTIVE: "192.168.99.8"
|
||||
TOKENAPI: "f72473b7e5402a5247773e456f3709dcdd5e41792360108fc3451bbfeed8eafe"
|
||||
|
@ -28,3 +28,11 @@
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: mise en place script hostcreate
|
||||
template:
|
||||
src: hostcreate.sh.j2
|
||||
dest: /tmp/hostcreate.sh
|
||||
|
||||
#- name: lancement script hostcreate
|
||||
#command: bash /tmp/hostcreate.sh
|
||||
|
||||
|
1
roles/zabbix-cli/templates/hostcreate.sh.j2
Normal file
1
roles/zabbix-cli/templates/hostcreate.sh.j2
Normal file
@ -0,0 +1 @@
|
||||
curl -X POST -H "Content-Type: application/json" -d '{ "jsonrpc":"2.0","method":"host.create","params": {"host": "{{ ansible_hostname }}","groups": [{"groupid": "6"}],"templates": [{"templateid": "10343"}],"inventory_mode": 0,"inventory": {"type": 0}},"auth": "{{ TOKENAPI }}","id": 1}' http://{{ SERVERACTIVE }}/zabbix/api_jsonrpc.php
|
@ -29,15 +29,7 @@
|
||||
name: mariadb
|
||||
state: started
|
||||
|
||||
- name: 6. Créer la base de données
|
||||
community.mysql.mysql_db:
|
||||
name: zabbix
|
||||
encoding: utf8mb4
|
||||
collation: utf8mb4_bin
|
||||
state: present
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: 7. Creer un utilisateur et lui attribuer tous les droits
|
||||
- name: 6. Creer un utilisateur et lui attribuer tous les droits
|
||||
community.mysql.mysql_user:
|
||||
name: zabbix
|
||||
password: password
|
||||
@ -45,50 +37,52 @@
|
||||
state: present
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: 8. Modifier une variable pour importer un schema
|
||||
- name: 7. Modifier la variable trust function creators pour importer la base données
|
||||
community.mysql.mysql_variables:
|
||||
variable: log_bin_trust_function_creators
|
||||
value: 1
|
||||
mode: global
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: 9. Importer le schema initial
|
||||
- name: 8. Récupérer la base de données
|
||||
get_url:
|
||||
url: http://s-adm.gsb.adm/gsbstore/zabbix.sql.gz
|
||||
dest: /tmp
|
||||
|
||||
- name: 9. Importer la base de données
|
||||
community.mysql.mysql_db:
|
||||
state: import
|
||||
name: zabbix
|
||||
encoding: utf8mb4
|
||||
login_user: zabbix
|
||||
login_password: password
|
||||
target: /usr/share/zabbix-sql-scripts/mysql/server.sql.gz
|
||||
target: /tmp/zabbix.sql.gz
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: 10. Modifier la variable pour le schema
|
||||
- name: 10. Remettre a zero la variable trust function creators
|
||||
community.mysql.mysql_variables:
|
||||
variable: log_bin_trust_function_creators
|
||||
value: 0
|
||||
mode: global
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: 11. Configurer le mdp de la db
|
||||
replace:
|
||||
path: /etc/zabbix/zabbix_server.conf
|
||||
regexp: '^# DBPassword='
|
||||
replace: 'DBPassword=password'
|
||||
|
||||
- name: 12. Lancer le service zabbix-server
|
||||
- name: 11. Lancer le service zabbix-server
|
||||
service:
|
||||
name: zabbix-server
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: 13. Lancer le service zabbix-agent
|
||||
- name: 12. Lancer le service zabbix-agent
|
||||
service:
|
||||
name: zabbix-agent
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: 14. Lancer le service apache2
|
||||
- name: 13. Lancer le service apache2
|
||||
service:
|
||||
name: apache2
|
||||
state: restarted
|
||||
enabled: yes
|
||||
|
||||
- name: 14. Gotify
|
||||
copy:
|
||||
src: gotify.sh
|
||||
dest: /usr/lib/zabbix/alertscripts
|
||||
|
@ -7,6 +7,7 @@
|
||||
- s-ssh
|
||||
- dnsmasq
|
||||
- squid
|
||||
- ssh-backup-key-gen
|
||||
# - local-store
|
||||
- zabbix-cli
|
||||
## - syslog-cli
|
||||
|
12
s-backup.yml
12
s-backup.yml
@ -1,14 +1,20 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
stork_db_user: "stork-server"
|
||||
stork_db_passwd: "Azerty1+"
|
||||
stork_db_name: "stork"
|
||||
|
||||
roles:
|
||||
- base
|
||||
- goss
|
||||
# - proxy3
|
||||
- zabbix-cli
|
||||
# - ssh-cli
|
||||
# - syslog-cli
|
||||
- gotify
|
||||
- stork-server
|
||||
- ssh-cli
|
||||
#- syslog-cli
|
||||
- smb-backup
|
||||
- dns-slave
|
||||
- post
|
||||
- ssh-backup-key-private
|
||||
|
23
s-kea1.yml
23
s-kea1.yml
@ -1,13 +1,24 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
kea_this_server: "s-kea1"
|
||||
kea_srv1: "s-kea1"
|
||||
kea_srv2: "s-kea2"
|
||||
kea_ctrl_address_this: "172.16.0.20"
|
||||
kea_ctrl_address1: "172.16.0.20"
|
||||
kea_ctrl_address2: "172.16.0.21"
|
||||
kea_dhcp_int: "enp0s9"
|
||||
stork_host: "s-kea1.gsb.lan"
|
||||
stork_port: "8081"
|
||||
|
||||
roles:
|
||||
- base
|
||||
#- goss
|
||||
#- ssh-cli
|
||||
- kea-master
|
||||
#- zabbix-cli
|
||||
#- journald-snd
|
||||
#- snmp-agent
|
||||
- goss
|
||||
- ssh-cli
|
||||
- kea
|
||||
- stork-agent
|
||||
- zabbix-cli
|
||||
- journald-snd
|
||||
- snmp-agent
|
||||
- post
|
||||
|
23
s-kea2.yml
23
s-kea2.yml
@ -1,13 +1,24 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
vars:
|
||||
kea_this_server: "s-kea2"
|
||||
kea_srv1: "s-kea1"
|
||||
kea_srv2: "s-kea2"
|
||||
kea_ctrl_address_this: "172.16.0.21"
|
||||
kea_ctrl_address1: "172.16.0.20"
|
||||
kea_ctrl_address2: "172.16.0.21"
|
||||
kea_dhcp_int: "enp0s9"
|
||||
stork_host: "s-kea2.gsb.lan"
|
||||
stork_port: "8081"
|
||||
|
||||
roles:
|
||||
- base
|
||||
# - goss
|
||||
# - ssh-cli
|
||||
- kea-slave
|
||||
# - zabbix-cli
|
||||
# - journald-snd
|
||||
# - snmp-agent
|
||||
- goss
|
||||
- ssh-cli
|
||||
- kea
|
||||
- stork-agent
|
||||
- zabbix-cli
|
||||
- journald-snd
|
||||
- snmp-agent
|
||||
- post
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
roles:
|
||||
- base
|
||||
- goss
|
||||
- post-lb
|
||||
- lb-web
|
||||
# - zabbix-cli
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
roles:
|
||||
- base
|
||||
- goss
|
||||
- post-lb
|
||||
- lb-web
|
||||
# - zabbix-cli
|
||||
|
55
s-nxc.yaml
Normal file
55
s-nxc.yaml
Normal file
@ -0,0 +1,55 @@
|
||||
command:
|
||||
ls -l .:
|
||||
exit-status: 0
|
||||
stdout:
|
||||
- total 200
|
||||
- -rwxr-xr-x 1 root root 232 15 janv. 17:38 agoss
|
||||
- -rw-r--r-- 1 root root 212 15 janv. 17:38 changelog
|
||||
- drwxr-xr-x 3 root root 4096 15 janv. 17:38 doc
|
||||
- drwxr-xr-x 2 root root 4096 19 janv. 10:50 goss
|
||||
- -rwxr-xr-x 1 root root 209 15 janv. 17:38 gsbchk
|
||||
- -rwxr-xr-x 1 root root 7174 15 janv. 17:38 gsbstart
|
||||
- -rwxr-xr-x 1 root root 728 15 janv. 17:38 gsbstartl
|
||||
- -rw-r--r-- 1 root root 289 15 janv. 17:38 lisezmoi.txt
|
||||
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 old
|
||||
- drwxr-xr-x 2 root root 4096 19 janv. 09:16 pre
|
||||
- -rw-r--r-- 1 root root 477 19 janv. 09:16 pull-config
|
||||
- -rw-r--r-- 1 root root 5070 19 janv. 09:16 README.md
|
||||
- -rw-r--r-- 1 root root 141 15 janv. 17:38 r-ext.yml
|
||||
- -rw-r--r-- 1 root root 151 15 janv. 17:38 r-int.yml
|
||||
- drwxr-xr-x 55 root root 4096 19 janv. 09:16 roles
|
||||
- -rw-r--r-- 1 root root 177 15 janv. 17:38 r-vp1-fw.yml
|
||||
- -rw-r--r-- 1 root root 259 15 janv. 17:38 r-vp1.yml
|
||||
- -rw-r--r-- 1 root root 173 15 janv. 17:38 r-vp2-fw.yml
|
||||
- -rw-r--r-- 1 root root 305 15 janv. 17:38 r-vp2.yml
|
||||
- -rw-r--r-- 1 root root 181 19 janv. 09:16 s-adm.yml
|
||||
- -rw-r--r-- 1 root root 119 15 janv. 17:38 s-agence.yml
|
||||
- -rw-r--r-- 1 root root 166 19 janv. 09:16 s-appli.yml
|
||||
- -rw-r--r-- 1 root root 182 19 janv. 09:16 s-backup.yml
|
||||
- drwxr-xr-x 3 root root 4096 19 janv. 09:16 scripts
|
||||
- -rw-r--r-- 1 root root 213 15 janv. 17:38 s-docker.yml
|
||||
- -rw-r--r-- 1 root root 144 15 janv. 17:38 s-elk.yml
|
||||
- -rw-r--r-- 1 root root 178 19 janv. 09:16 s-fog-post.yml
|
||||
- -rw-r--r-- 1 root root 162 19 janv. 09:16 s-fog.yml
|
||||
- -rw-r--r-- 1 root root 199 19 janv. 09:16 s-infra.yml
|
||||
- -rw-r--r-- 1 root root 351 15 janv. 17:38 s-itil.yml
|
||||
- -rw-r--r-- 1 root root 185 19 janv. 09:16 s-kea1.yml
|
||||
- -rw-r--r-- 1 root root 174 19 janv. 09:16 s-kea2.yml
|
||||
- -rw-r--r-- 1 root root 131 19 janv. 09:16 s-lb-bd.yml
|
||||
- -rw-r--r-- 1 root root 127 19 janv. 09:16 s-lb-web1.yml
|
||||
- -rw-r--r-- 1 root root 127 19 janv. 09:16 s-lb-web2.yml
|
||||
- -rw-r--r-- 1 root root 145 19 janv. 09:16 s-lb.yml
|
||||
- -rw-r--r-- 1 root root 148 19 janv. 09:16 s-mess.yml
|
||||
- -rw-r--r-- 1 root root 241 19 janv. 09:16 s-mon.yml
|
||||
- -rw-r--r-- 1 root root 290 19 janv. 09:16 s-nas.yml
|
||||
- -rw-r--r-- 1 root root 156 15 janv. 17:38 s-nxc.yml
|
||||
- -rw-r--r-- 1 root root 140 15 janv. 17:38 s-peertube.yml
|
||||
- -rw-r--r-- 1 root root 148 19 janv. 09:16 s-proxy.yml
|
||||
- -rw-r--r-- 1 root root 161 15 janv. 17:38 s-test.yml
|
||||
- drwxr-xr-x 3 root root 4096 15 janv. 17:38 sv
|
||||
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 tests
|
||||
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 vagrant
|
||||
- drwxr-xr-x 2 root root 4096 15 janv. 17:38 windows
|
||||
- drwxr-xr-x 7 root root 4096 19 janv. 09:16 wireguard
|
||||
stderr: []
|
||||
timeout: 10000
|
16
scripts/mkvm
16
scripts/mkvm
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
mkvmrelease="v1.3.2"
|
||||
mkvmrelease="v1.3.3"
|
||||
|
||||
ovarelease="2023c"
|
||||
ovafogrelease="2024a"
|
||||
@ -10,10 +10,18 @@ ovafilefog="$HOME/Téléchargements/debian-bullseye-gsb-${ovafogrelease}.ova"
|
||||
startmode=0
|
||||
deletemode=0
|
||||
|
||||
declare -A vmMem
|
||||
vmMem[r-int]=512
|
||||
vmMem[r-ext]=512
|
||||
vmMem[s-nas]=512
|
||||
vmMem[s-infra]=768
|
||||
vmMem[s-backup]=768
|
||||
vmMem[s-elk]=3072
|
||||
|
||||
usage () {
|
||||
echo "$0 - version ${mkvmrelease} - Ova version ${ovarelease}"
|
||||
echo "$0 : creation VM et parametrage interfaces"
|
||||
echo "usage : $0 [-r] [-s] <s-adm|s-infra|r-int|r-ext|s-proxy|s-mon|s-appli|s-backup|s-itil|s-ncx|s-fog>"
|
||||
echo "usage : $0 [-r] [-s] <s-adm|s-infra|r-int|r-ext|s-proxy|s-mon|s-appli|s-backup|s-itil|s-nxc|s-fog>"
|
||||
echo " option -r : efface VM existante avant creation nouvelle"
|
||||
echo " option -s : start VM apres creation"
|
||||
exit 1
|
||||
@ -33,6 +41,10 @@ create_vm () {
|
||||
VBoxManage unregistervm --delete "${nom}"
|
||||
fi
|
||||
vboxmanage import "${nomova}" --vsys 0 --vmname "${nom}"
|
||||
if [[ -v vmMem["${nom}" ]]; then
|
||||
mem=vmMem["${nom}"]
|
||||
VBoxManage modifyvm "${nom}" --memory "${mem}"
|
||||
fi
|
||||
}
|
||||
|
||||
setif () {
|
||||
|
@ -11,12 +11,27 @@ $ovafilefog="$HOME\Downloads\debian-bullseye-gsb-${ovafogrelease}.ova"
|
||||
$vboxmanage="C:\Program Files\Oracle\VirtualBox\VBoxManage.exe"
|
||||
$deletemode=0
|
||||
|
||||
$vmMem = @{
|
||||
"r-int" = "512"
|
||||
"r-ext" = "512"
|
||||
"s-nas" = "512"
|
||||
"s-infra" = "768"
|
||||
"s-backup" = "768"
|
||||
"s-elk" = "3072"
|
||||
}
|
||||
|
||||
#FONCTIONS
|
||||
|
||||
function create_vm{ param([string]$nomvm)
|
||||
#Importation depuis l'ova
|
||||
& "$vboxmanage" import "$ovafile" --vsys 0 --vmname "$nomvm"
|
||||
Write-Host "Machine $nomvm importée"
|
||||
|
||||
if ($vmMem.ContainsKey($nomvm)) {
|
||||
& "$vboxmanage" import "$ovafile" --vsys 0 --vmname "$nomvm" --memory $vmMem[$nomvm]
|
||||
Write-Host "Machine $nomvm importée"
|
||||
} else {
|
||||
#Importation depuis l'ova
|
||||
& "$vboxmanage" import "$ovafile" --vsys 0 --vmname "$nomvm"
|
||||
Write-Host "Machine $nomvm importée"
|
||||
}
|
||||
}
|
||||
|
||||
function create_if{ param([string]$nomvm, [string]$nic, [int]$rang, [string]$reseau)
|
||||
|
18
wireguard/README.md
Normal file
18
wireguard/README.md
Normal file
@ -0,0 +1,18 @@
|
||||
# **Explication :**
|
||||
|
||||
Le dossier Wireguard comprend tous les tests de ping à effectuer une fois l'installation complète complète de wireguard.
|
||||
|
||||
Les dossiers présent dans ce dossier contiennent les routes qui doivent être présent sur nos différentes machines. Vous pouvez comparer les interface avec un "ip a" en cas de disfonctionnement.
|
||||
|
||||
# **Etapes pour lancer les tests:**
|
||||
|
||||
Pour tester le bon fonctionnement du VPN et faire la phase de test, rendez vous sur la machine ou vous voulez faire les tests de ping (nous allons prendre ping-sinfra.sh comme exemple)
|
||||
|
||||
* Mettez vous dans le dossier tools/ansible/gsb2024/wireguard
|
||||
|
||||
* Lancer le script de s-infra : bash ping-sinfra.sh
|
||||
|
||||
Une fois lancer une série de ping vont se lancer automatiquement, si tout est bon le scipt devrait arrivé à sa fin.
|
||||
Si toutefois un ping ne passe pas, le scipt vaa bloquer sur le ping qui est en cours d'éxécution !
|
||||
|
||||
*Modification : jm*
|
Reference in New Issue
Block a user