Compare commits

...

219 Commits

Author SHA1 Message Date
bbbb
45e4401dcc maj lb-front-ssl pour une version fonctionnelle 2024-06-04 15:49:33 +02:00
sio user
dc50059f19 Maj version ova README.md 2024-05-23 16:15:40 +02:00
sio user
ada657401f Mef 2 2024-05-23 16:13:12 +02:00
sio user
b126e7f9e3 Mef README.md 2024-05-23 16:11:47 +02:00
sio user
f9f3e8da8e Maj version debian-bookworm-gsb-2024a -> b 2024-05-23 16:08:27 +02:00
Jimmy Chevanne
be4f3b9030 maj mkvm 2024-05-23 11:26:02 +02:00
c590edb875 maj role zabbix-cli et awx-user sur s-adm et s-infra 2024-05-02 09:49:05 +02:00
3cd52a230e maj awx 2024-05-02 08:39:34 +02:00
root
8dd65d65eb role awx actualisé 2024-05-01 12:02:05 +02:00
a
aac0f13134 script mkvm.ps1 2024-04-17 21:09:06 +02:00
a
ea60c89bf1 fix script mkvm.ps1 2024-04-17 09:01:47 +02:00
bbbb
272867304a reparation script mkvm.ps1 2024-04-16 11:27:20 +02:00
sio user
b53cd8c848 MAJ README.md apres chgt version images .ova 2024-04-12 09:00:32 +02:00
Jimmy Chevanne
8ed9ebe6f8 maj mkvm.ps1 2024-04-12 08:54:27 +02:00
Jimmy Chevanne
c1fe781ca2 maj mkvm 2024-04-12 08:50:01 +02:00
gsb
c2c1e8acb7 Actualiser scripts/mkvm.ps1 2024-03-03 19:40:35 +01:00
Jimmy Chevanne
5176ad216c maj lb-front-ssl 2024-02-16 15:08:17 +01:00
Jimmy Chevanne
e45118eef1 maj role lb-front-ssl 2024-02-16 14:48:52 +01:00
root
959e056c5f ajout role firewalld 2024-02-09 10:57:20 +01:00
Jimmy Chevanne
8d7c5f7cfb creation role lb-front-ssl 2024-02-05 16:02:10 +01:00
Alhassane Kone
80295cba99 script mkvm mise a jour 2024-02-02 15:00:22 +01:00
root
35e816c2eb modif role kea 2024-02-01 11:51:52 +01:00
root
b1e4b50982 maj README windows 2024-02-01 11:40:25 +01:00
root
d65fe53ef8 role AWX 2024-01-30 12:55:46 +01:00
sio user
451c8ba094 roles awx-usser et awx-user-cli 2024-01-30 12:04:34 +01:00
root
88061eb89d reorganisation du role s-awx et s-awx-post 2024-01-30 11:55:30 +01:00
root
6fbad9d9fa MAJ role AWX 2024-01-30 11:38:16 +01:00
sio user
30c7275ba6 filebeat... 2024-01-30 11:25:20 +01:00
root
33529f2781 ajout role zabbix-cli pour vm à superviser & token à jour avec BD 2024-01-30 10:48:57 +01:00
Flo
77a7f3c567 update zabbix 2024-01-29 21:15:11 +01:00
root
a3235af304 modifs diverses 2024-01-29 18:06:09 +01:00
root
17647b17da ajout role awx 2024-01-29 18:02:49 +01:00
root
78230b7f21 bash 2024-01-29 17:05:20 +01:00
root
7d90939ea3 clé pub & priv 2024-01-29 17:02:55 +01:00
root
7c01d0aa18 chgt role sshcli 2024-01-29 17:02:31 +01:00
root
ea513e616d explication savenextcloud 2024-01-29 16:47:47 +01:00
Adam Alphonso
1d7fa35e48 maj README 2024-01-29 16:43:37 +01:00
Adam Alphonso
250483501e Merge branch 'main' of https://gitea.lyc-lecastel.fr/gsb/gsb2024 2024-01-29 16:36:29 +01:00
Adam Alphonso
4abf4d4950 maj script r-vp1 2024-01-29 16:35:03 +01:00
root
8ebf476e05 script clé publique 2024-01-29 16:34:12 +01:00
root
26ae726457 création readme s-backup 2024-01-29 16:16:24 +01:00
root
b1bd102d85 création readme s-backup 2024-01-29 16:04:05 +01:00
root
6cfe40b998 création readme s-backup 2024-01-29 15:56:30 +01:00
florian
e48d63a8bc update 2024-01-29 14:32:54 +01:00
florian
873b6b6def update readme 2024-01-29 14:28:53 +01:00
florian
3d94e6c050 update README 2024-01-29 14:23:08 +01:00
root
151c0adf88 elk-filebeat-cli filebeat.yml 2024-01-26 15:48:18 +01:00
florian
745bc05e76 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gsb/gsb2024 2024-01-26 15:00:44 +01:00
florian
82561d5d0a update gotify.sh 2024-01-26 14:59:57 +01:00
root
df1000e1b5 modif droit copie cle privee 2024-01-26 14:55:25 +01:00
florian
0824fd9621 update 2024-01-26 14:22:09 +01:00
florian
3c680769be update zabbix-srv pour gotify 2024-01-26 14:15:31 +01:00
Jimmy Chevanne
8ceaa8791f maj readme zabbix-srv 2024-01-26 10:55:33 +01:00
root
5f5aea168c chgt version ELK : 8.11.4 2024-01-26 09:56:04 +01:00
sio user
ef5701c5d1 mkvm: hash table pour memoire VM 2024-01-26 09:33:11 +01:00
Jimmy Chevanne
f74728292b peaufinage README lb 2024-01-26 09:27:29 +01:00
Jimmy Chevanne
bfdca163f7 ajout README roles lb 2024-01-26 09:23:17 +01:00
florian
cb1b315819 update mkvm.ps1 2024-01-26 08:56:23 +01:00
phil
c086bcdc7f mkvm : gere memoire VM 2024-01-26 00:08:14 +01:00
sio user
1134ca261d MAJ role elk ... 2024-01-25 17:11:56 +01:00
Jim
b0d81dc69c maj role lb-front 2024-01-25 16:29:11 +01:00
root
331b8b0fb6 maj README nextcloud 2024-01-25 15:43:50 +01:00
sio user
4025f996dc modif inst-depl pour zabbix.sql.gz et SRC 2024-01-25 14:44:50 +01:00
Jimmy Chevanne
a1ee9c6207 maj zabbix-srv 2024-01-25 11:54:57 +01:00
root
a1442e534d Premier commit ... 2024-01-25 11:40:07 +01:00
root
e78ef5948b modif save 2024-01-25 11:23:58 +01:00
root
298f105805 maj zabbix-srv 2024-01-25 11:22:18 +01:00
root
d88745e741 modif s-backup.yml 2024-01-25 11:11:56 +01:00
root
fffcb22db8 modif cle priv 2024-01-25 11:09:25 +01:00
Jimmy Chevanne
abb8c15028 maj zabbix-srv 2024-01-25 11:01:02 +01:00
root
73b4560dd9 modif cle privee 2024-01-25 10:49:53 +01:00
root
91d8b57029 modif role 2024-01-25 10:10:50 +01:00
root
37bbbad9dd script recup cle pub 2024-01-25 10:03:20 +01:00
root
84215f502b generate cle publique et privee 2024-01-25 09:53:45 +01:00
flo
2606cd19b0 maj zabbix-srv 2024-01-25 09:51:35 +01:00
root
b27ce2a372 maj goss s-nxc 2024-01-25 08:31:06 +01:00
root
18ce1f65ad maj goss s-nxc 2024-01-25 08:19:51 +01:00
root
116b84d230 ajout role stork-agent 2024-01-24 23:56:15 +01:00
root
c92a7654d3 ajout role stork-server 2024-01-24 19:22:14 +01:00
root
02c7f3dffd script saveNextcloud 2024-01-23 11:22:40 +01:00
root
5a8558d701 modif script save 2024-01-22 17:54:29 +01:00
root
7d6b15844a script de sauvegarde de nextcloud 2024-01-22 17:07:05 +01:00
root
2653221559 MAJ role KEA MAJ test goss KEA 2024-01-22 16:49:58 +01:00
Jimmy Chevanne
3100ba51e2 maj role lb-front 2024-01-22 16:26:02 +01:00
gsb
bbe58dbb01 Actualiser roles/fw-ferm/README.md 2024-01-22 15:27:26 +01:00
root
7124d8aaff type kea-ctrl-agent.conf.j2 2024-01-21 01:03:02 +01:00
root
0afa2c3596 corrections diverses template kea-ctrl pb avec jinja2 2024-01-21 00:54:39 +01:00
phil
38602033b3 ajout role kea + s-kea1-ps.yml 2024-01-20 22:42:27 +01:00
root
1c1993021b ajout du role gotify 2024-01-20 20:22:03 +01:00
root
b146170467 maj docker 2024-01-20 17:11:15 +01:00
root
df9d3c6c1c maj goss s-nxc 2024-01-20 17:02:33 +01:00
root
d75f4ffb3f zabbix ajout auto 2024-01-20 17:19:38 +01:00
root
eaf75de89e maj role kea 2024-01-19 20:21:12 +01:00
root
02fc23d224 modif test goss 2024-01-19 15:47:43 +01:00
gsb
bdc71bbb3c Actualiser roles/wireguard-r/README.md 2024-01-19 15:05:39 +01:00
gsb
308504062e Actualiser roles/wireguard-r/README.md 2024-01-19 15:04:18 +01:00
root
c3ad470fd1 re teste zabbix api 2024-01-19 15:00:07 +01:00
gsb
2d3067d67b Actualiser roles/wireguard-r/README.md 2024-01-19 14:59:11 +01:00
root
7d885b08b8 modif port docker-compose 2024-01-19 14:42:50 +01:00
Florian Ribeiro
d88044350a test api zabbix 2024-01-19 14:41:21 +01:00
gsb
ca6d1d2e09 Actualiser wireguard/README.md 2024-01-19 14:35:17 +01:00
root
1a2c349969 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gsb/gsb2024 2024-01-19 14:30:22 +01:00
root
3a18a3bd9a maj goss fichier 2024-01-19 14:28:41 +01:00
root
239480a12b maj goss fichier 2024-01-19 14:28:41 +01:00
gsb
f66774efe1 Actualiser wireguard/README.md 2024-01-19 14:28:37 +01:00
root
b57b0763e9 test goss machine s-mon 2024-01-19 14:25:34 +01:00
gsb
79279fc3a1 Actualiser wireguard/README.md 2024-01-19 14:22:50 +01:00
gsb
54ef5103ca Actualiser wireguard/README.md 2024-01-19 14:22:36 +01:00
gsb
a87853372c Actualiser wireguard/README.md 2024-01-19 14:17:49 +01:00
gsb
378a20f02a Ajouter wireguard/README.md 2024-01-19 14:13:55 +01:00
sio user
21ee40ab59 Maj README.md 2024-01-19 11:53:02 +01:00
sio user
d393b1eebe ajout entrees DNS s-stork et s-gotify 2024-01-19 11:48:33 +01:00
Jimmy Chevanne
bff32cd191 maj goss lb 2024-01-19 10:47:31 +01:00
root
050a4fdc7d maj scipt python wireguard 2024-01-19 10:33:38 +01:00
root
8568463dc7 goss s-itil pages web 2024-01-19 10:01:28 +01:00
root
d58d3ae8d7 actualisation test goss s-itil 2024-01-19 09:57:01 +01:00
root
16af48fbf3 test playbook docker 2024-01-19 08:55:25 +01:00
root
b104d23495 Mise à jour fichier README.md Zabbix-cli 2024-01-18 19:52:08 +01:00
root
77e9367396 fin #- zabbix-cli 2024-01-18 19:25:26 +01:00
root
38582b8f8d mise à jour role zabbix-cli 2024-01-18 18:59:18 +01:00
gsb
1eae98a064 Actualiser roles/wireguard-r/README.md 2024-01-18 18:44:38 +01:00
sio user
39ee37f3e8 ajout entree pour s-awx 2024-01-18 17:10:33 +01:00
gsb
1f4c957726 Actualiser roles/wireguard-r/README.md 2024-01-18 16:42:19 +01:00
gsb
89515287b0 Actualiser roles/wireguard-r/README.md 2024-01-18 16:17:14 +01:00
gsb
77d1440da7 Actualiser roles/wireguard-r/README.md 2024-01-18 16:13:04 +01:00
gsb
be66b9e2f4 Actualiser roles/wireguard-r/-README.md 2024-01-18 16:10:05 +01:00
gsb
91417b7f8e Actualiser roles/wireguard-r/-README.md 2024-01-18 16:03:25 +01:00
Jimmy Chevanne
69052938f7 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gsb/gsb2024 2024-01-18 15:46:08 +01:00
gsb
81af190640 Actualiser roles/wireguard-r/README.md 2024-01-18 15:44:10 +01:00
Jimmy Chevanne
8b80414e46 maj lb-nfs-server 2024-01-18 15:44:02 +01:00
gsb
91acd3c18d Actualiser roles/wireguard-r/README.md 2024-01-18 15:37:07 +01:00
gsb
8498d7be15 Actualiser roles/wireguard-r/README.md 2024-01-18 15:35:08 +01:00
root
15e57a4a40 modif docker install 2024-01-18 11:51:53 +01:00
Jimmy Chevanne
8b59a5553f maj post-lb web2 2024-01-18 11:45:25 +01:00
Florian Ribeiro
5f1b04fd96 update 2024-01-18 11:34:02 +01:00
root
3b88857c0b modif reseau proxy 2024-01-18 11:19:52 +01:00
root
72c5498e64 maj role fw 2024-01-18 11:11:34 +01:00
root
e1cc021ee2 mise a jour role kea-master et kea-slave modif des fichiers de config 2024-01-18 10:59:19 +01:00
Jimmy Chevanne
76528fad6f maj playbooks lb 2024-01-18 10:36:06 +01:00
Jimmy Chevanne
bc7cdc993f maj playbooks lb 2024-01-18 10:01:18 +01:00
phil
01e06119a5 mkvm : mode normal pour option -s 2024-01-18 00:14:03 +01:00
root
99672bef0d reorganisation roles kea-master et kea-slave 2024-01-17 22:25:55 +01:00
phil
f9e801c39e MAJ doc README.md 2024-01-17 18:06:26 +01:00
phil
85374ee503 s-fog.yml et s-fog-post.yml 2024-01-17 17:00:31 +01:00
phil
5232d80321 syntax dans inst-depl 2024-01-17 15:08:29 +01:00
phil
5acca816af inst-depl 2024-01-17 15:01:52 +01:00
root
394a8d8cd8 'fogsettings : short hostname 2024-01-17 14:53:47 +01:00
root
ff03ee66a5 fog: test goss et fogsettings adresse 2024-01-17 14:45:05 +01:00
root
1e30fd87a9 adapt diverses 2024-01-17 13:39:34 +01:00
root
924a11f843 inst-depl echappement 2024-01-17 13:06:42 +01:00
phil
83a3942900 inst-depl inst1 et inst2 2024-01-17 12:18:45 +01:00
phil
0d2968b2c8 prepa role fog 2024-01-16 23:55:52 +01:00
phil
86afa7c616 nettoyage pull-config 2024-01-16 23:14:22 +01:00
phil
00071b1c67 chgt plage adresse pour n-user : 100-150 2024-01-16 22:05:56 +01:00
root
b5237811e1 s-fog.yaml et fogsettings single interface 2024-01-16 15:34:41 +01:00
Jimmy Chevanne
25bb47afd3 Creation role kea a continuer 2024-01-16 12:41:50 +01:00
root
addabae478 maj s-mon : become 2024-01-16 12:06:22 +01:00
root
a57998f5de maj goss r-vp2.yaml 2024-01-16 11:39:57 +01:00
root
262b7bdb13 maj goss r-vp2 2024-01-16 11:03:44 +01:00
Alhassane Kone
c45dc50d12 maj mvkm.ps1: ajout kea1 et kea2 2024-01-16 10:38:55 +01:00
Jimmy Chevanne
d1116a91c3 update 2024-01-16 10:32:09 +01:00
Jimmy Chevanne
9c8dca44c9 mise à jour mkvm 2024-01-16 10:24:04 +01:00
root
ce3b6e0a77 nettoyage s-fog 2024-01-15 21:47:44 +01:00
phil
a03298ed54 php version dans fogsettings 2024-01-15 21:34:04 +01:00
sio user
80b54a50df ajout entrée dnas base, post et dns-master pour s-kea1 et s-kea2 2024-01-15 17:54:31 +01:00
root
045af9bea2 maj zabbix cli 2024-01-15 17:22:15 +01:00
gsb
6b10b981f4 Actualiser roles/journald-rcv/README.md 2024-01-15 13:57:01 +01:00
root
3811e2df5c README.md 2024-01-15 00:49:40 +01:00
root
27aad0dcb5 commente appel role zabbix-cli non fonctionnel 2024-01-15 00:42:05 +01:00
phil
c03c066d41 mkvm options 2024-01-15 00:02:43 +01:00
phil
beca7dbdcc ajout option -s pour mkvm 2024-01-14 23:19:06 +01:00
root
5dcaeb0629 s-adm.yaml pour goss 2024-01-14 23:03:57 +01:00
gsb
82bda1c85b Actualiser roles/journald-rcv/README.md 2024-01-14 22:51:39 +01:00
root
0537e6f942 mise a jour test goss role s-itil s-mon. Actualisation de la documentation role journald-rcv modification des roles s-fog et s-itil 2024-01-14 22:47:02 +01:00
phil
7310641ce0 typo 2024-01-14 22:44:49 +01:00
phil
48b16468b6 typo inst-depl 2024-01-14 22:43:05 +01:00
phil
c7a893651e inst-depl : inst goss 2024-01-14 22:39:56 +01:00
phil
236e560329 inst-depl ipv4 et gestup 2024-01-14 22:30:01 +01:00
phil
84144c72fb inst-depl => lighttpd 2024-01-14 22:15:23 +01:00
root
84aa96c106 maj handler role glpi 2024-01-14 14:20:13 +01:00
phil
d6ddbb4dd6 MAJ doc README.md 2024-01-13 16:08:49 +01:00
root
cc36b5dcf7 fogsettings : trash 2024-01-12 22:39:04 +01:00
root
5aa12d8acd chgt role base 2024-01-12 22:05:05 +01:00
root
35a21bab2f comment appel zabbix-cli dans s-adm.yml 2024-01-12 21:43:40 +01:00
root
cdd64636ed maj README windows 2024-01-12 15:48:39 +01:00
root
db966c5e9e README.md à jour 2024-01-12 15:22:43 +01:00
root
8107158a4f README.md à jour 2024-01-12 15:20:02 +01:00
Jimmy Chevanne
18cb3a1d99 creation goss s-mon 2024-01-12 15:17:41 +01:00
Jimmy Chevanne
0997c5d44a creation goss s-mon 2024-01-12 15:14:00 +01:00
gsb
bf877b63a6 Actualiser roles/glpi/handlers/main.yml 2024-01-12 15:07:06 +01:00
root
a4b7b06c3f modif test agoss 2024-01-12 14:47:21 +01:00
root
8a81aac1e2 modif test agoss 2024-01-12 14:37:22 +01:00
Jimmy Chevanne
8f4b5b6398 maj readme zabbix-srv 2024-01-12 14:33:51 +01:00
Jimmy Chevanne
03b7706c23 maj readme zabbix-srv 2024-01-12 14:26:18 +01:00
root
008731f456 modif role fog & routeur 2024-01-12 14:07:00 +01:00
gsb
cbcefb5fbd Actualiser roles/glpi/README.md 2024-01-12 10:56:00 +01:00
gsb
2b8745bedf Actualiser roles/glpi/README.md 2024-01-12 10:54:28 +01:00
gsb
1613c632b4 Actualiser roles/glpi/README.md 2024-01-12 10:52:24 +01:00
Jimmy Chevanne
41aa9c626c maj role zabbix srv : rendre idempottent le playbook 2024-01-12 10:23:24 +01:00
gsb
6816bca773 Actualiser roles/glpi/README.md 2024-01-12 10:19:19 +01:00
gsb
fe386b4f4c Actualiser roles/glpi/README.md 2024-01-12 10:01:19 +01:00
gsb
008b6ce0bb Actualiser roles/glpi/README.md 2024-01-12 09:52:11 +01:00
gsb
9447a6c726 Actualiser roles/glpi/README.md 2024-01-12 09:45:59 +01:00
gsb
c550ea90c2 Actualiser roles/glpi/README.md 2024-01-12 09:39:57 +01:00
gsb
dc010b3562 Actualiser roles/glpi/README.md 2024-01-12 09:28:04 +01:00
gsb
46cd74fed6 Actualiser roles/glpi/README.md 2024-01-12 09:27:42 +01:00
gsb
1f1ade55c7 Actualiser roles/glpi/README.md 2024-01-12 09:22:41 +01:00
gsb
e5ba286b4f Actualiser roles/glpi/README.md 2024-01-12 09:21:06 +01:00
gsb
9faec4b433 Actualiser roles/glpi/README.md 2024-01-12 09:20:22 +01:00
gsb
29f0b8e269 Actualiser roles/glpi/README.md 2024-01-12 09:17:21 +01:00
gsb
4722affa2e Actualiser roles/glpi/README.md 2024-01-12 09:16:56 +01:00
gsb
fb290afb6a Actualiser roles/glpi/README.md 2024-01-12 09:15:33 +01:00
gsb
a0be338fd5 Actualiser roles/glpi/README.md 2024-01-12 09:13:33 +01:00
gsb
1e8e9e1281 Actualiser roles/glpi/README.md 2024-01-12 08:53:31 +01:00
gsb
713a9ecc28 Actualiser roles/glpi/README.md 2024-01-12 08:50:43 +01:00
gsb
6a023456fb Actualiser roles/glpi/README.md 2024-01-12 08:49:59 +01:00
gsb
932728ae3a Actualiser roles/glpi/README.md 2024-01-12 08:45:44 +01:00
gsb
b35f036ce2 Actualiser roles/glpi/README.md 2024-01-12 08:44:54 +01:00
gsb
fd25eba978 Actualiser roles/glpi/README.md 2024-01-12 08:20:45 +01:00
159 changed files with 5039 additions and 867 deletions

View File

@ -1,34 +1,36 @@
# gsb2024
2024-12-21 ps
* 2024-05-23 16h07 ps
* 2024-04-12 8h55 ps
* 2024-01-19 11h45 ps
Environnement et playbooks ansible pour le projet GSB 2024
Environnement et playbooks **ansible** pour le projet **GSB 2024**
## Quickstart
Prérequis :
* une machine LInux Debian Bookworm ou Windows
* une machine **Linux Debian Bookworm** ou **Windows**
* VirtualBox
* git
* fichier machines virtuelles **ova** :
* **debian-bookworm-gsb-2023c.ova**
* **debian-buster-gsb-2023a.ova**
* **debian-bookworm-gsb-2024b.ova**
* **debian-bullseye-gsb-2024b.ova**
## Les machines
* **s-adm** : routeur adm, DHCP + NAT, deploiement, proxy squid
* **s-adm** : routeur adm, DHCP + NAT, déploiement, proxy squid
* **s-infra** : DNS maitre, autoconfiguration navigateurs avec **wpad**
* **r-int** : routage, DHCP
* **r-ext** : routage, NAT
* **s-proxy** : squid
* **s-proxy** : proxy **squid**
* **s-itil** : serveur GLPI
* **s-backup** : DNS esclave + sauvegarde s-win (SMB)
* **s-mon** : supervision avec **Nagios4**, notifications et syslog
* **s-backup** : DNS esclave + sauvegarde s-win (SMB), Stork et Gotify
* **s-mon** : supervision avec **Nagios4/Zabbix**, notifications et journald
* **s-fog** : deploiement postes de travail avec **FOG**
* **s-win** : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* **s-nxc** : NextCloud avec **docker**
* **s-elk** : pile ELK dockerisée
* **s-nxc** : NextCloud avec **docker** via proxy inverse **traefik** et certificat auto-signé
* **s-elk** : pile **ELK** dockerisée
* **s-lb** : Load Balancer **HaProxy** pour application Wordpress (DMZ)
* **r-vp1** : Routeur VPN Wireguard coté siège
* **r-vp2** : Routeur VPN Wireguard coté agence, DHCP
@ -38,6 +40,8 @@ Prérequis :
* **s-lb-web2** : Serveur Wordpress 2 Load Balancer
* **s-lb-db** : Serveur Mariadb pour Wordpress
* **s-nas** : Serveur NFS pour application Wordpress avec LB
* **s-kea1** : Serveur DHCP Kea HA 1
* **s-kea2** : Serveur DHCP Kea HA 2
## Les playbooks
@ -47,12 +51,12 @@ Il existe un playbook ansible pour chaque machine à installer, nommé comme la
## Installation
On utilisera les images de machines virtuelle suivantes :
* **debian-bookworm-gsb-2023c.ova** (2023-12-18)
* Debian Bookworm 12.4 - 2 cartes - 1 Go - Stockage 20 Go
* **debian-bookworm-gsb-2024b.ova** (2024-05-23)
* Debian Bookworm 12.5 - 2 cartes - 1 Go - Stockage 20 Go
et pour **s-fog** :
* **debian-buster-2023a.ova** (2023-01-06)
* Debian Buster 10 - 2 cartes - 1 Go - stockage 20 Go
* **debian-bullseye-2024b.ova** (2024-04-11)
* Debian Bullseye 11.9 - 2 cartes - 1 Go - stockage 20 Go
Les images **.ova** doivent etre stockées dans le répertoire habituel de téléchargement de l'utilisateur courant.
@ -72,6 +76,10 @@ mkvm -r s-adm
```
### Machine s-adm
La machine **-sadm** est la première machine à installer.
* créer la machine virtuelle **s-adm** avec **mkvm** comme décrit plus haut.
* démarrer la VM puis ouvir une session
* utiliser le script de renommage comme suit :
@ -85,48 +93,49 @@ bash chname <nouveau_nom_de_machine>` , puis redémarrer
git clone https://gitea.lyc-lecastel.fr/gsb/gsb2024.git
cd gsb2024/pre
bash inst-depl
cd /var/www/html/gsbstore
bash getall
cd /root/tools/ansible/gsb024/pre
bash gsbboot
cd .. ; bash pull-config
cd /root/tools/ansible/gsb2024/pre
DEPL=192.168.99.99 bash gsbboot
cd ../.. ; bash pull-config
```
- redémarrer
- la machine **s-adm** doit etre opérationnelle
### Pour chaque machine
#### Etape 1
#### Etape 1 - Nommage machine
- créer la machine avec **mkvm -r**, les cartes réseau sont paramétrées par **mkvm** selon les spécifications
- ouvrir une session sur la machine considérée
- renomme la machine soit
- renommer la machine soit
* en utilisant le script de renommage comme suit :
` /root/tools/ansible/gsb2024/scripts/chname <nouveau_nom_de_machine>`
* soit avec :
* soit (ici on renomme la machine en **s-infra**) avec :
```shell
NHOST=mavm
sed -i "s/bookworm/${NHOST}/g" /etc/host{s,name}
sudo reboot # on redemarre
export HOST=s-infra
curl 192.168.99.99/gsbstore/inst1|bash
reboot # on redemarre
```
#### Etape 2
#### Etape 2 - installation outils, depot gsb2024 et lancement playbook
- utiliser le script **gsb-start** : `bash gsb-start`
- ou sinon:
```shell
mkdir -p tools/ansible ; cd tools/ansible
git clone https://gitea.lyc-lecastel.fr/gsb/gsb2024.git
cd gsb2024/pre
DEPL=192.168.99.99 bash gsbboot
cd ../..
bash pull-config
curl 192.168.99.99/gsbstore/inst2|bash
```
- le script recupere le dépot **gsb2024.git**
- il lance ensuite le script **pull-config** avec le script porant le nom de la machine
- on peut alors redémarrer
#### Etape 3
#### Etape 3 - Redémarrage et tests
- redémarrer
- **Remarque** : une machine doit avoir été redémarrée pour prendre en charge la nouvelle configuration
- **Remarque** : une machine doit avoir été redémarrée pour prendre en charge la nouvelle configuration, en particulier la couche réseau et l'adressage.
- selon les situations, il est possible qu'un seul playbook ne soit pas suffisant pour installer complètement une machine. Dans ce cas de figure, le second playbook s'appelle **s-machine-post.yml**.
Il est à lancer depuis ''tools/ansible/gsb2024'' :
```shell
ansible-playbook -i localhost, -c local s-machine-post.yml
```
## Les tests

7
firewalld.yml Normal file
View File

@ -0,0 +1,7 @@
---
- hosts: localhost
connection: local
become: yes
roles:
- firewalld

25
goss.yaml Normal file
View File

@ -0,0 +1,25 @@
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
service:
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 101
gid: 65534
groups:
- nogroup
home: /run/sshd
shell: /usr/sbin/nologin
process:
sshd:
running: true

View File

@ -1,21 +1,20 @@
file:
/etc/wireguard/wg0.conf:
exists: true
mode: "0644"
mode: "0600"
owner: root
group: root
filetype: file
contains:
- AllowedIPs = 10.0.0.2/32, 172.16.128.0/24
contains: []
package:
wireguard:
installed: true
versions:
- 1.0.20210223-1
- 1.0.20210914-1
wireguard-tools:
installed: true
versions:
- 1.0.20210223-1
- 1.0.20210914-1+b1
service:
wg-quick@wg0:
enabled: true

View File

@ -1,7 +1,8 @@
file:
/etc/wireguard/wg0.conf:
exists: true
mode: "0644"
mode: "0600"
size: 374
owner: root
group: root
filetype: file
@ -10,11 +11,11 @@ package:
wireguard:
installed: true
versions:
- 1.0.20210223-1
- 1.0.20210914-1
wireguard-tools:
installed: true
versions:
- 1.0.20210223-1
- 1.0.20210914-1+b1
service:
isc-dhcp-server:
enabled: true

View File

@ -1,6 +1,18 @@
file:
/var/www/html/gsbstore/getall:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
package:
dnsmasq:
installed: true
lighttpd:
installed: true
versions:
- 1.4.69-1
squid:
installed: true
addr:
@ -12,10 +24,18 @@ port:
listening: true
ip:
- 0.0.0.0
tcp:80:
listening: true
ip:
- 0.0.0.0
tcp6:53:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
udp:53:
listening: true
ip:
@ -32,6 +52,9 @@ service:
dnsmasq:
enabled: true
running: true
lighttpd:
enabled: true
running: true
squid:
enabled: true
running: true
@ -61,6 +84,8 @@ dns:
process:
dnsmasq:
running: true
lighttpd:
running: true
squid:
running: true
interface:

6
goss/s-awx.yaml Normal file
View File

@ -0,0 +1,6 @@
interface:
enp0s8:
exists: true
addrs:
- 172.16.0.22/24
mtu: 1500

View File

@ -1,28 +1,77 @@
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.16/24
interface:
enp0s8:
exists: true
addrs:
- 172.16.0.16/24
interface:
enp0s9:
exists: true
addrs:
- 172.16.64.16/24
file:
/tftpboot/default.ipxe:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contains: []
contents: null
package:
apache2:
installed: true
versions:
- 2.4.56-1~deb11u2
isc-dhcp-server:
installed: true
versions:
- 4.4.1-2.3+deb11u2
mariadb-server:
installed: true
versions:
- 1:10.5.21-0+deb11u1
tftpd-hpa:
installed: true
versions:
- 5.2+20150808-1.2
port:
tcp:80:
listening: true
ip:
- 0.0.0.0
tcp:443:
listening: true
ip:
- 0.0.0.0
udp:67:
listening: true
ip:
- 0.0.0.0
udp:69:
listening: true
ip:
- 0.0.0.0
service:
apache2:
enabled: true
running: true
isc-dhcp-server:
enabled: true
running: true
nfs-server:
enabled: true
running: true
tftpd-hpa:
enabled: true
running: true
command:
ping -c 4 192.168.99.99:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 4 google.fr:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 4 192.168.99.99:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 4 google.fr:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
process:
apache2:
running: true
interface:
enp0s9:
exists: true
addrs:
- 172.16.64.16/24

View File

@ -1,36 +1,87 @@
file:
/var/www/html/glpi:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
/var/www/html/ficlients:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: directory
/var/www/html/glpi/plugins:
exists: true
mode: "0777"
filetype: directory
/var/www/html/index.nginx-debian.html:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: file
/etc/nginx/sites-enabled/default:
exists: false
contents: []
/etc/nginx/sites-enabled/glpi:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
/var/www/html/glpi:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
contents: []
/var/www/html/glpicli:
exists: true
mode: "0775"
owner: www-data
group: www-data
filetype: directory
contents: []
/var/www/html/glpicli/GLPI-Agent-1.7-x64.msi:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp:80:
listening: true
ip:
- 0.0.0.0
tcp:3306:
listening: true
ip:
- 127.0.0.1
tcp:9000:
listening: true
ip:
- 127.0.0.1
tcp:10050:
listening: true
ip:
- 0.0.0.0
service:
mariadb:
enabled: true
running: true
nginx:
enabled: true
running: true
mariadb.service:
enabled: true
running: true
nginx:
enabled: true
running: true
php8.2-fpm.service:
enabled: true
running: true
ssh:
enabled: true
running: true
systemd-journal-upload:
enabled: true
running: true
zabbix-agent:
enabled: true
running: true
http:
http://s-itil.gsb.lan/:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
username: glpi
password: glpi
http://s-itil.gsb.lan/glpicli:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []

93
goss/s-kea1.yaml Normal file
View File

@ -0,0 +1,93 @@
file:
/etc/kea/kea-ctrl-agent.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/etc/kea/kea-dhcp4.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/tmp/kea4-ctrl-socket:
exists: true
mode: "0755"
size: 0
owner: _kea
group: _kea
filetype: socket
contains: []
contents: null
/usr/lib/x86_64-linux-gnu/kea:
exists: true
mode: "0755"
owner: root
group: root
filetype: directory
contents: []
package:
isc-kea-common:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-ctrl-agent:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-dhcp4:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-hooks:
installed: true
versions:
- 2.4.1-isc20231123184533
libmariadb3:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mariadb-common:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mysql-common:
installed: true
versions:
- 5.8+1.1.0
addr:
udp://172.16.64.254:67:
local-address: 127.0.0.1
reachable: true
timeout: 500
port:
tcp:8000:
listening: true
ip:
- 172.16.0.20
service:
isc-kea-ctrl-agent.service:
enabled: true
running: true
isc-kea-dhcp4-server.service:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.20/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.20/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 172.16.64.20/24
mtu: 1500

93
goss/s-kea2.yaml Normal file
View File

@ -0,0 +1,93 @@
file:
/etc/kea/kea-ctrl-agent.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/etc/kea/kea-dhcp4.conf:
exists: true
mode: "0644"
owner: _kea
group: root
filetype: file
contents: []
/tmp/kea4-ctrl-socket:
exists: true
mode: "0755"
size: 0
owner: _kea
group: _kea
filetype: socket
contains: []
contents: null
/usr/lib/x86_64-linux-gnu/kea:
exists: true
mode: "0755"
owner: root
group: root
filetype: directory
contents: []
package:
isc-kea-common:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-ctrl-agent:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-dhcp4:
installed: true
versions:
- 2.4.1-isc20231123184533
isc-kea-hooks:
installed: true
versions:
- 2.4.1-isc20231123184533
libmariadb3:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mariadb-common:
installed: true
versions:
- 1:10.11.4-1~deb12u1
mysql-common:
installed: true
versions:
- 5.8+1.1.0
addr:
udp://172.16.64.254:67:
local-address: 127.0.0.1
reachable: true
timeout: 500
port:
tcp:8000:
listening: true
ip:
- 172.16.0.21
service:
isc-kea-ctrl-agent.service:
enabled: true
running: true
isc-kea-dhcp4-server.service:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.21/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.21/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 172.16.64.21/24
mtu: 1500

View File

@ -1,21 +1,38 @@
package:
mysql-server:
installed: true
versions:
- 5.5.54-0+deb8u1
command:
egrep "#bind-address" /etc/mysql/my.cnf:
exit-status: 0
stdout:
- "#bind-address\t\t= 127.0.0.1"
stderr: []
timeout: 10000
addr:
tcp://192.168.102.1:80:
reachable: true
timeout: 500
tcp://192.168.102.2:80:
reachable: true
timeout: 500
service:
mariadb:
enabled: true
running: true
mysql:
enabled: true
running: true
user:
mysql:
exists: true
uid: 104
gid: 111
groups:
- mysql
home: /nonexistent
shell: /bin/false
group:
mysql:
exists: true
gid: 111
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.13/24
enp0s8:
exists: true
addrs:
- 192.168.102.50/24
enp0s3:
exists: true
addrs:
- 192.168.99.154/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.102.254/24
mtu: 1500

View File

@ -1,63 +1,62 @@
package:
apache2:
installed: true
versions:
- 2.4.10-10+deb8u7
php5:
installed: true
versions:
- 5.6.29+dfsg-0+deb8u1
apache2:
installed: true
versions:
- 2.4.57-2
nfs-common:
installed: true
versions:
- 1:2.6.2-4
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
service:
apache2:
enabled: true
running: true
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 105
gid: 65534
groups:
- nogroup
home: /var/run/sshd
shell: /usr/sbin/nologin
command:
egrep 192.168.102.14:/export/www /etc/fstab:
exit-status: 0
stdout:
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
stderr: []
timeout: 10000
apache2:
enabled: true
running: true
nfs-common:
enabled: false
running: false
process:
apache2:
running: true
sshd:
running: true
apache2:
running: true
mount:
/var/www/html:
exists: true
opts:
- rw
- relatime
vfs-opts:
- rw
- vers=4.2
- rsize=131072
- wsize=131072
- namlen=255
- hard
- proto=tcp
- timeo=600
- retrans=2
- sec=sys
- clientaddr=192.168.102.1
- local_lock=none
- addr=192.168.102.253
source: 192.168.102.253:/home/wordpress
filesystem: nfs4
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.11/24
enp0s8:
exists: true
addrs:
- 192.168.101.1/24
enp0s9:
exists: true
addrs:
- 192.168.102.1/24
enp0s3:
exists: true
addrs:
- 192.168.99.101/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.101.1/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.102.1/24
mtu: 1500

View File

@ -1,63 +1,62 @@
package:
apache2:
installed: true
versions:
- 2.4.10-10+deb8u7
php5:
installed: true
versions:
- 5.6.29+dfsg-0+deb8u1
apache2:
installed: true
versions:
- 2.4.57-2
nfs-common:
installed: true
versions:
- 1:2.6.2-4
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp6:22:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
tcp6:80:
listening: true
ip:
- '::'
service:
apache2:
enabled: true
running: true
sshd:
enabled: true
running: true
user:
sshd:
exists: true
uid: 105
gid: 65534
groups:
- nogroup
home: /var/run/sshd
shell: /usr/sbin/nologin
command:
egrep 192.168.102.14:/export/www /etc/fstab:
exit-status: 0
stdout:
- 192.168.102.14:/export/www /var/www/html nfs _netdev rw 0 0
stderr: []
timeout: 10000
apache2:
enabled: true
running: true
nfs-common:
enabled: false
running: false
process:
apache2:
running: true
sshd:
running: true
apache2:
running: true
mount:
/var/www/html:
exists: true
opts:
- rw
- relatime
vfs-opts:
- rw
- vers=4.2
- rsize=131072
- wsize=131072
- namlen=255
- hard
- proto=tcp
- timeo=600
- retrans=2
- sec=sys
- clientaddr=192.168.102.2
- local_lock=none
- addr=192.168.102.253
source: 192.168.102.253:/home/wordpress
filesystem: nfs4
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.12/24
enp0s8:
exists: true
addrs:
- 192.168.101.2/24
enp0s9:
exists: true
addrs:
- 192.168.102.2/24
enp0s3:
exists: true
addrs:
- 192.168.99.102/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.101.2/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.102.2/24
mtu: 1500

View File

@ -1,28 +1,55 @@
package:
haproxy:
installed: true
versions:
- 2.6.12-1+deb12u1
addr:
tcp://192.168.101.1:80:
reachable: true
timeout: 500
tcp://192.168.101.2:80:
reachable: true
timeout: 500
port:
tcp:80:
listening: true
ip:
- 192.168.100.11
tcp:80:
listening: true
ip:
- 192.168.100.10
service:
haproxy:
enabled: true
running: true
sshd:
enabled: true
running: true
haproxy:
enabled: true
running: true
user:
haproxy:
exists: true
uid: 104
gid: 111
groups:
- haproxy
home: /var/lib/haproxy
shell: /usr/sbin/nologin
group:
haproxy:
exists: true
gid: 111
process:
haproxy:
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.100/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.100.11/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.101.254/24
mtu: 1500
enp0s3:
exists: true
addrs:
- 192.168.99.100/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.100.10/24
mtu: 1500
http:
http://192.168.100.10/:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []

View File

@ -1,63 +1,62 @@
file:
/etc/nagios4/htdigest.users:
exists: true
mode: "0640"
owner: nagios
group: www-data
filetype: file
contains: [nagiosadmin]
/etc/systemd/system/systemd-journal-remote.service:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contents: []
/var/log/journal/remote:
exists: true
mode: "0755"
owner: systemd-journal-remote
group: systemd-journal-remote
filetype: directory
contents: []
package:
apache2:
installed: true
nagios-snmp-plugins:
installed: true
nagios4:
installed: true
snmp:
installed: true
python3-passlib:
installed: true
port:
tcp:80:
listening: true
ip:
- 0.0.0.0
udp:514:
listening: true
ip:
- 0.0.0.0
apache2:
installed: true
versions:
- 2.4.57-2
mariadb-server:
installed: true
versions:
- 1:10.11.4-1~deb12u1
systemd-journal-remote:
installed: true
versions:
- 252.19-1~deb12u1
service:
apache2:
enabled: true
running: true
nagios4:
enabled: true
running: true
command:
sysctl net.ipv4.ip_forward:
exit-status: 0
stdout:
- net.ipv4.ip_forward = 0
stderr: []
timeout: 10000
process:
apache2:
running: true
nagios4:
running: true
apache2:
enabled: true
running: true
mariadb.service:
enabled: true
running: true
systemd-journal-remote.socket:
enabled: true
running: true
zabbix-agent:
enabled: true
running: true
zabbix-server:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.8/24
enp0s8:
exists: true
addrs:
- 172.16.0.8/24
enp0s3:
exists: true
addrs:
- 192.168.99.8/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.8/24
mtu: 1500
http:
http://localhost/nagios4:
status: 401
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []
http://s-mon.gsb.lan/zabbix:
status: 200
allow-insecure: false
no-follow-redirects: false
timeout: 5000
body: []

55
goss/s-nas.yaml Normal file
View File

@ -0,0 +1,55 @@
file:
/home/wordpress:
exists: true
mode: "0755"
owner: www-data
group: www-data
filetype: directory
contents: []
package:
file:
installed: true
versions:
- 1:5.44-3
nfs-common:
installed: true
versions:
- 1:2.6.2-4
nfs-kernel-server:
installed: true
versions:
- 1:2.6.2-4
addr:
tcp://192.168.102.1:80:
reachable: true
timeout: 500
tcp://192.168.102.2:80:
reachable: true
timeout: 500
service:
nfs-common:
enabled: false
running: false
nfs-kernel-server:
enabled: true
running: true
nfs-mountd:
enabled: true
running: true
nfs-server:
enabled: true
running: true
nfs-utils:
enabled: true
running: false
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.153/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.102.253/24
mtu: 1500

145
goss/s-nxc.yaml Normal file
View File

@ -0,0 +1,145 @@
file:
/root/nxc:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/certs:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/config:
exists: true
mode: "0755"
#size: 4096
#owner: root
#group: root
filetype: directory
contains: []
/root/nxc/config/dynamic.yml:
exists: true
mode: "0644"
#size: 415
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/config/static.yml:
exists: true
mode: "0644"
#size: 452
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/docker-compose.yml:
exists: true
mode: "0644"
#size: 2135
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-debug.sh:
exists: true
mode: "0755"
#size: 64
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-prune.sh:
exists: true
mode: "0755"
#size: 110
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-start.sh:
exists: true
mode: "0755"
#size: 34
#owner: root
#group: root
filetype: file
contains: []
/root/nxc/nxc-stop.sh:
exists: true
mode: "0755"
#size: 32
#owner: root
#group: root
filetype: file
contains: []
/usr/local/bin/mkcert:
exists: true
mode: "0755"
#size: 4788866
#owner: root
#group: root
filetype: file
contains: []
#addr:
#tcp://s-nxc.gsb.lan:443:
#reachable: true
#timeout: 500
port:
tcp:22:
listening: true
ip:
- 0.0.0.0
tcp:80:
listening: true
ip: []
tcp:443:
listening: true
ip: []
#tcp:8081:
#listening: true
#ip:
#- 0.0.0.0
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.7/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.7/24
mtu: 1500
http:
https://s-nxc.gsb.lan:
status: 200
allow-insecure: true
no-follow-redirects: false
timeout: 5000
body:
- Nextcloud

View File

@ -1,83 +0,0 @@
# Ce fichier viminfo a été généré par Vim 9.0.
# Vous pouvez l'éditer, mais soyez prudent.
# Viminfo version
|1,4
# 'encoding' dans lequel ce fichier a été écrit
*encoding=utf-8
# hlsearch on (H) or off (h):
~h
# Historique ligne de commande (chronologie décroissante) :
:q!
|2,0,1703236388,,"q!"
:x
|2,0,1703236381,,"x"
:x!
|2,0,1703236221,,"x!"
# Historique chaîne de recherche (chronologie décroissante) :
# Historique expression (chronologie décroissante) :
# Historique ligne de saisie (chronologie décroissante) :
# Historique Ligne de débogage (chronologie décroissante) :
# Registres :
""1 LINE 0
connection: local
|3,1,1,1,1,0,1703236374," connection: local"
"2 LINE 0
hosts: localhost
|3,0,2,1,1,0,1703236374," hosts: localhost"
# Marques dans le fichier :
'0 1 2 ~/tools/ansible/gsb2024/s-mon.yml
|4,48,1,2,1703236388,"~/tools/ansible/gsb2024/s-mon.yml"
'1 1 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,49,1,9,1703236339,"~/tools/ansible/gsb2024/s-mon.yml"
'2 9 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,50,9,9,1703236221,"~/tools/ansible/gsb2024/s-mon.yml"
'3 9 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,51,9,9,1703236221,"~/tools/ansible/gsb2024/s-mon.yml"
'4 11 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,52,11,9,1703236221,"~/tools/ansible/gsb2024/s-mon.yml"
'5 11 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,53,11,9,1703236221,"~/tools/ansible/gsb2024/s-mon.yml"
'6 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,54,1,13,1703236013,"~/tools/ansible/gsb2024/s-mon.yml"
'7 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,55,1,13,1703236013,"~/tools/ansible/gsb2024/s-mon.yml"
'8 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,56,1,13,1703236013,"~/tools/ansible/gsb2024/s-mon.yml"
'9 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,57,1,13,1703236013,"~/tools/ansible/gsb2024/s-mon.yml"
# Liste de sauts (le plus récent en premier) :
-' 1 2 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,1,2,1703236388,"~/tools/ansible/gsb2024/s-mon.yml"
-' 1 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,1,9,1703236339,"~/tools/ansible/gsb2024/s-mon.yml"
-' 9 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,9,9,1703236318,"~/tools/ansible/gsb2024/s-mon.yml"
-' 11 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,11,9,1703236318,"~/tools/ansible/gsb2024/s-mon.yml"
-' 11 9 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,11,9,1703236221,"~/tools/ansible/gsb2024/s-mon.yml"
-' 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,1,13,1703236018,"~/tools/ansible/gsb2024/s-mon.yml"
-' 1 13 ~/tools/ansible/gsb2024/s-mon.yml
|4,39,1,13,1703236013,"~/tools/ansible/gsb2024/s-mon.yml"
# Historique des marques dans les fichiers (les plus récentes en premier) :
> ~/tools/ansible/gsb2024/s-mon.yml
* 1703236386 0
" 1 2
^ 9 10
. 2 0
+ 10 0
+ 2 0

View File

@ -1,77 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://vagrantcloud.com/search.
config.vm.box = "debian/buster64"
config.vm.hostname = "s-adm"
config.vm.define "s-adm"
config.vm.provider :virtualbox do |vb|
vb.name = "s-adm"
end
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# NOTE: This will enable public access to the opened port
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine and only allow access
# via 127.0.0.1 to disable public access
# config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
# Create a private network, which allows host-only access to the machine
# using a specific IP.
config.vm.network "public_network", ip: "192.168.1.91"
config.vm.network "private_network", ip: "192.168.99.99"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.
# Enable provisioning with a shell script. Additional provisioners such as
# Ansible, Chef, Docker, Puppet and Salt are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", inline: <<-SHELL
apt-get update
apt-get upgrade
apt-get install -y vim wget curl
# apt-get install -y apache2
SHELL
end

0
pre/gsbboot Normal file → Executable file
View File

81
pre/inst-depl Normal file → Executable file
View File

@ -2,52 +2,60 @@
## aa : 2023-01-18 15:25
## ps : 2023-02-01 15:25
## ps : 2023-12-18 15:25
## ps : 2024-01-17 15:25
set -o errexit
set -o pipefail
GITUSR=gitgsb
GITPRJ=gsb2024
apt-get update
apt-get install -y apache2 git
apt-get install -y lighttpd git
STOREREP="/var/www/html/gsbstore"
SRC="${SRC:-http://depl.sio.lan/gsbstore}"
GLPIREL=10.0.11
str="wget -nc https://github.com/glpi-project/glpi/releases/download/${GLPIREL}/glpi-${GLPIREL}.tgz"
str="wget -nc -4 https://github.com/glpi-project/glpi/releases/download/${GLPIREL}/glpi-${GLPIREL}.tgz"
#GLPI Agent
GLPIAGVER=1.7
str31="wget -nc https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x64.msi"
str31="wget -nc -4 https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x64.msi"
#str32="wget -nc https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x86.msi"
#str32="wget -nc -4 https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x86.msi"
FOGREL=1.5.10
str4="wget -nc https://github.com/FOGProject/fogproject/archive/${FOGREL}.tar.gz -O fogproject-${FOGREL}.tar.gz"
str4="wget -nc -4 https://github.com/FOGProject/fogproject/archive/${FOGREL}.tar.gz -O fogproject-${FOGREL}.tar.gz"
WPREL=6.4.2
#v6.1.1 le 17/01/2023
str5="wget -nc https://fr.wordpress.org/latest-fr_FR.tar.gz -O wordpress-6.4.2-fr_FR.tar.gz"
str5="wget -nc -4 https://fr.wordpress.org/latest-fr_FR.tar.gz -O wordpress-6.4.2-fr_FR.tar.gz"
str6="curl -L https://github.com/goss-org/goss/releases/latest/download/goss-linux-amd64 -o goss"
str6="wget -nc -4 https://github.com/goss-org/goss/releases/latest/download/goss-linux-amd64 -O goss"
str7="curl -L https://github.com/goss-org/goss/releases/latest/download/dgoss -o dgoss"
str7="wget -nc -4 https://github.com/goss-org/goss/releases/latest/download/dgoss -O dgoss"
#GESTSUPREL=3.2.30
#str8="wget -nc 'https://gestsup.fr/index.php?page=download&channel=stable&version=${GESTSUPREL}&type=gestsup' -O gestsup_${GESTSUPREL}.zip"
str8="wget -nc 'https://gestsup.fr/index.php?page=download&channel=stable&version=3.2.30&type=gestsup' -O gestsup_3.2.30.zip"
#str8="wget -nc -4 'https://gestsup.fr/index.php?page=download&channel=stable&version=${GESTSUPREL}&type=gestsup' -O gestsup_${GESTSUPREL}.zip"
str8="wget -nc -4 'https://gestsup.fr/index.php?page=download&channel=stable&version=3.2.30&type=gestsup' -O gestsup_3.2.30.zip"
#METRICBEAT ET FILEBEAT
ELKREL=8.11.3
str81="wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-amd64.deb"
str82="wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-windows-x86_64.zip"
str83="wget https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${ELKREL}-windows-x86_64.zip"
str84="wget https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${ELKREL}-amd64.deb"
ELKREL=8.11.4
str81="wget -nc -4 https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-amd64.deb"
str82="wget -nc -4 https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${ELKREL}-windows-x86_64.zip"
str83="wget -nc -4 https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${ELKREL}-windows-x86_64.zip"
str84="wget -nc -4 https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${ELKREL}-amd64.deb"
[[ -d "${STOREREP}" ]]|| mkdir "${STOREREP}"
[[ -d "${STOREREP}" ]] || mkdir "${STOREREP}"
(cat <<EOT > "${STOREREP}/getall"
#!/bin/bash
if [[ -z "${SRC+x}" ]]; then
echo "erreur : variable SRC indefinie"
echo " SRC : URL serveur deploiement"
echo "export SRC=http://depl.sio.adm/gsbstore ; ./$0"
exit 1
fi
${str}
${str31}
@ -58,20 +66,55 @@ ${str7}
chmod +x ./goss ./dgoss
curl -L https://get.docker.com -o getdocker.sh
wget -nc -4 https://get.docker.com -O getdocker.sh
chmod +x ./getdocker.sh
wget -nc https://github.com/FiloSottile/mkcert/releases/download/v1.4.4/mkcert-v1.4.4-linux-amd64 -O mkcert
wget -nc -4 https://github.com/FiloSottile/mkcert/releases/download/v1.4.4/mkcert-v1.4.4-linux-amd64 -O mkcert
chmod +x ./mkcert
${str8}
#${str8}
${str81}
${str82}
${str83}
${str84}
wget -nc -4 "${SRC}/zabbix.sql.gz" -O zabbix.sql.gz
EOT
)
cat "${STOREREP}/getall"
cd "${STOREREP}" || exit 2
bash getall
cp goss /usr/local/bin
(cat <<'EOT' > "${STOREREP}/inst1"
#!/bin/bash
if [[ -z "${HOST+x}" ]]; then
echo "erreur : variable HOST indefinie"
echo " HOST : adresse serveur deploiement"
echo "export HOST=s-xyzt ; ./$0"
exit 1
fi
hostname=$(hostname)
echo "${HOST}" > /etc/hostname
hostnamectl set-hostname "${HOST}"
sed -i "s/${hostname}/${HOST}/g" /etc/hosts
echo "vous pouvez redemarrer ..."
EOT
)
(cat <<'EOT' > "${STOREREP}/inst2"
#!/bin/bash
mkdir -p ~/tools/ansible ; cd ~/tools/ansible
git clone https://gitea.lyc-lecastel.fr/gsb/gsb2024.git
cd gsb2024/pre
DEPL=192.168.99.99 bash gsbboot
cd ../.. ; bash pull-config
EOT
)

View File

@ -1,48 +0,0 @@
#!/bin/bash
set -o errexit
set -o pipefail
GITUSR=gitgsb
GITPRJ=gsb
apt update && apt upgrade
apt install -y apache2 git
getent passwd "${GITUSR}" >> /dev/null
if [[ $? != 0 ]]; then
echo "creation utilisateur "${GITUSR}" ..."
/sbin/useradd -m -d /home/"${GITUSR}" -s /bin/bash "${GITUSR}"
echo "${GITUSR}:${GITUSR}" | /sbin/chpasswd
else
echo "utilisateur "${GITUSR}" existant..."
fi
su -c "git init --share --bare /home/${GITUSR}/${GITPRJ}.git" "${GITUSR}"
su -c "cd ${GITPRJ}.git/.git/hooks && mv post-update.sample post-update" "${GITUSR}"
[[ -h /var/www/html/"${GITPRJ}".git ]]|| ln -s /home/"${GITUSR}"/"${GITPRJ}".git /var/www/html/"${GITPRJ}".git
[[ -d /var/www/html/gsbstore ]]|| mkdir /var/www/html/gsbstore
(cat <<EOT > /var/www/html/gsbstore/getall
#!/bin/bash
set -o errexit
set -o pipefail
GLPIREL=9.4.5
wget -nc https://github.com/glpi-project/glpi/releases/download/\${GLPIREL}/glpi-\${GLPIREL}.tgz
FIREL=9.4+2.4
wget -nc -O fusioninventory-glpi\${FIREL}.tag.gz https://github.com/fusioninventory/fusioninventory-for-glpi/archive/glpi\${FIREL}.tar.gz
#https://github.com/fusioninventory/fusioninventory-for-glpi/archive/glpi9.4+2.4.tar.g
FIAGREL=2.5.2
wget -nc https://github.com/fusioninventory/fusioninventory-agent/releases/download/\${FIAGREL}/fusioninventory-agent_windows-x64_\${FIAGREL}.exe
wget -nc https://github.com/fusioninventory/fusioninventory-agent/releases/download/\$FIAGREL/fusioninventory-agent_windows-x86_\${FIAGREL}.exe
FOGREL=1.5.7
wget -nc https://github.com/FOGProject/fogproject/archive/\${FOGREL}.tar.gz -O fogproject-\${FOGREL}.tar.gz
wget -nc https://fr.wordpress.org/wordpress-5.3.2-fr_FR.tar.gz
EOT
)
cat /var/www/html/gsbstore/getall

8
pre/pull-config Normal file → Executable file
View File

@ -14,15 +14,15 @@ dir=/root/tools/ansible
cd "${dir}" || exit 1
hostname > hosts
if [[ $# == 1 ]] ; then
opt=$1
fi
if [[ "${opt}" == '-l' ]] ; then
cd "${dir}/${prj}" || exit 2
ansible-playbook -i localhost, -c local "$(hostname).yml"
cd "${dir}/${prj}" || exit 2
echo "Execution locale ...."
ansible-playbook -i localhost, -c local "$(hostname).yml"
else
ansible-pull -i "${dir}/hosts" -C main -U "${UREP}"
ansible-pull -i "$(hostname)," -U "${UREP}"
fi
exit 0

View File

@ -1,7 +1,11 @@
#!/bin/bash
dir=/root/tools/ansible
prj=gsb2024
opt=""
if [ -z ${UREP+x} ]; then
UREP=https://gitea.lyc-lecastel.fr/gsb/gsb2024.git
UREP=https://gitea.lyc-lecastel.fr/gsb/gsb2024.git
fi
dir=/root/tools/ansible
@ -10,7 +14,15 @@ dir=/root/tools/ansible
cd "${dir}" || exit 1
hostname > hosts
ansible-pull -i "${dir}/hosts" -C main -U "${UREP}"
if [[ $# == 1 ]] ; then
opt=$1
fi
if [[ "${opt}" == '-l' ]] ; then
cd "${dir}/${prj}" || exit 2
echo "Execution locale ...."
ansible-playbook -i localhost, -c local "$(hostname).yml"
else
ansible-pull -i "$(hostname)," -U "${UREP}"
fi
exit 0

View File

@ -1,12 +1,14 @@
---
- hosts: localhost
connection: local
become: yes
roles:
- base
- goss
- r-ext
- snmp-agent
- zabbix-cli
- ssh-cli
# - syslog-cli
- post

View File

@ -1,6 +1,7 @@
---
- hosts: localhost
connection: local
become: yes
roles:
- base
@ -9,5 +10,5 @@
- ssh-cli
# - syslog-cli
- dhcp
- snmp-agent
- zabbix-cli
- post

View File

@ -0,0 +1,24 @@
---
- name: Creation user awx
ansible.builtin.user:
name: awx
groups: sudo
append: yes
- name: Cration d'un mdp pour user awx
user:
name: awx
password: '$5$1POIEvs/Q.DHI4/6$RT6nl42XkekxTPKA/dktbnCMxL8Rfk8GAK7NxqL9D70'
- name: Get awx key_pub
get_url:
url: http://s-adm.gsb.adm/gsbstore/id_rsa_awx.pub
dest: /tmp
- name: Set authorized key taken from file /tmp
ansible.posix.authorized_key:
user: awx
state: present
key: "{{ lookup('file', '/tmp/id_rsa_awx.pub') }}"

View File

@ -0,0 +1,20 @@
---
- name: Creation user awx, cle SSH et group sudo
ansible.builtin.user:
name: awx
groups: sudo
append: yes
shell: /bin/bash
generate_ssh_key: yes
#- name: Creation mdp user awx
# ansible.builtin.user:
#name:
#user: awx
# password: '$5$1POIEvs/Q.DHI4/6$RT6nl42XkekxTPKA/dktbnCMxL8Rfk8GAK7NxqL9D70'
- name: Copie cle publique dans gsbstore
copy:
src: /home/awx/.ssh/id_rsa.pub
dest: /var/www/html/gsbstore/id_rsa_awx.pub
remote_src: yes

26
roles/awx/README.md Normal file
View File

@ -0,0 +1,26 @@
# Rôle awx
***
Rôle awx: Configuration d'un serveur AWX avec k3s.
## Tables des matières
1. [Que fait le rôle AWX ?]
2. [Connexion à l'interface WEB du serveur AWX]
**AWX** est l'application développée par **RedHat** permettant de lancer des playbooks **ansible** depuis une interface web évoluée plutôt qu'en ligne de commande. **AWX** utlise kubernetes mise en oeuvre ici avec **k3s**.
## Que fait le rôle AWX ?
Le rôle **awx** installe et configure un serveur **AWX** avec **k3s** pour cela le role:
- Installe **k3s** en spécifiant l'adresse IP ainsi que l'interface d'écoute
- Clone le dépot **Github** **awx-on-k3s**
- Procéde au déploiement du pod **awx-operator**
- Génére un certifiacat auto-signé utlisée par le serveur **AWX** en utilisant **OpenSSL**
- Edite le fichier awx.yaml afin d'y indique le nom d'hote du serveur en accord avec le nom utlisé par les certificats
- Déploie le serveur **AWX**
- Test l'accésibilité du serveur **AWX**.
### Connexions à l'interface WEB du serveur AWX ###
Une fois le role **awx** terminé il est possible de se connecter à l'interface web duserveur depuis un navigateur.
S'assurer que votre machine puisse résoudre **s-awx.gsb.lan**
- Se connecter sur : **https://s-awx.gsb.lan**
- Utlisateur: **admin** / Mot de passe: **Ansible123!**

79
roles/awx/tasks/main.yml Normal file
View File

@ -0,0 +1,79 @@
---
- name: Installation de k3s ...
ansible.builtin.shell: curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.28.5+k3s1 sh -s - --write-kubeconfig-mode 644 --node-ip "{{ awx_ip }}" --flannel-iface "{{ awx_if }}"
- name: Clonage du dépot awx-on-k3s
git:
repo: https://github.com/kurokobo/awx-on-k3s.git
dest: "{{ awx_dir }}"
clone: yes
force: yes
- name: Git checkout
ansible.builtin.shell: "git checkout 2.10.0"
args:
chdir: "{{ awx_dir }}"
- name: Deploiement AWX Operator ...
ansible.builtin.shell: "kubectl apply -k operator"
args:
chdir: "{{ awx_dir }}"
#- name: Git checkout
#ansible.builtin.git:
#repo: 'https://github.com/kurokobo/awx-on-k3s.git'
#dest: "{{ awx_dir }}"
#version: release-2.10.0
- name: Generation de certificat auto-signé avec OpenSSL
ansible.builtin.shell: 'openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -out ./base/tls.crt -keyout ./base/tls.key -subj "/CN={{ awx_host }}/O={{ awx_host }}" -addext "subjectAltName = DNS:{{ awx_host }}"'
args:
chdir: "{{ awx_dir }}"
- name: Changement de la ligne hostname dans le fichier awx.yaml
replace:
path: ~/tools/awx-on-k3s/base/awx.yaml
regexp: 'awx.example.com'
replace: '{{ awx_host }}'
backup: yes
- name: creation du repertoire postgres-13
ansible.builtin.file:
path: /data/postgres-13
state: directory
mode: '0755'
- name: Creation repertoire projects
ansible.builtin.file:
path: /data/projects
state: directory
owner: 1000:0
- name: Deploiement d'AWX ...
ansible.builtin.shell: "kubectl apply -k base"
args:
chdir: "{{ awx_dir }}"
- name: Test d'accésibilité de l'interface web AWX
ansible.builtin.uri:
url: "https://s-awx.gsb.lan"
follow_redirects: none
method: GET
validate_certs: false
register: _result
until: _result.status == 200
retries: 60 # 90*10 seconds = 15 min
delay: 10 # Every 10 seconds
- debug:
msg: "L'installation du serveur AWX est terminée."
- debug:
msg: "Connectez-vous sur: https://s-awx.gsb.lan"
- debug:
msg: "Nom d'utilisateur: admin / mdp: Ansible123!"

View File

@ -1,8 +1,14 @@
---
- name: desactive unatentted upgrade
ansible.builtin.service:
name: unattended-upgrades.service
state: stopped
enabled: false
- name: Copie sources.list
copy:
src: sources.list.{{ ansible_distribution }}
src: sources.list.{{ ansible_distribution_release }}
dest: /etc/apt/sources.list
- name: Copie apt.conf pour proxy
@ -75,8 +81,3 @@
- net.ipv6.conf.default.disable_ipv6
- net.ipv6.conf.lo.disable_ipv6
- name: desactive unatentted upgrade
ansible.builtin.service:
name: unattended-upgrades.service
state: stopped
enabled: false

View File

@ -22,6 +22,9 @@
192.168.99.14 s-nas.gsb.adm
192.168.99.15 s-san.gsb.adm
192.168.99.16 s-fog.gsb.adm
192.168.99.20 s-kea1.gsb.adm
192.168.99.21 s-kea2.gsb.adm
192.168.99.22 s-awx.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm

View File

@ -21,6 +21,9 @@
192.168.99.12 r-int.gsb.adm
192.168.99.13 r-ext.gsb.adm
192.168.99.14 s-nas.gsb.adm
192.168.99.20 s-kea1.gsb.adm
192.168.99.21 s-kea2.gsb.adm
192.168.99.22 s-awx.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm

View File

@ -120,7 +120,7 @@ subnet 172.16.65.0 netmask 255.255.255.0 {
#DHCP pour le réseau USER
subnet 172.16.64.0 netmask 255.255.255.0 {
range 172.16.64.20 172.16.64.120;
range 172.16.64.100 172.16.64.150;
option domain-name-servers 172.16.0.1 ;
option routers 172.16.64.254;
option broadcast-address 172.16.64.255;

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2023051000 ; Serial
2024011900 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -16,9 +16,11 @@ $TTL 604800
@ IN A 127.0.0.1
@ IN AAAA ::1
s-infra IN A 172.16.0.1
s-backup IN A 172.16.0.4
s-proxy IN A 172.16.0.2
s-appli IN A 172.16.0.3
s-backup IN A 172.16.0.4
s-stork IN A 172.16.0.4
s-gotify IN A 172.16.0.4
s-win IN A 172.16.0.6
s-mess IN A 172.16.0.7
s-nxc IN A 172.16.0.7
@ -27,6 +29,9 @@ s-mon IN A 172.16.0.8
s-itil IN A 172.16.0.9
s-elk IN A 172.16.0.11
s-gestsup IN A 172.16.0.17
s-kea1 IN A 172.16.0.20
s-kea2 IN A 172.16.0.21
s-awx IN A 172.16.0.22
r-int IN A 172.16.0.254
r-int-lnk IN A 192.168.200.254
r-ext IN A 192.168.200.253

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2023040501 ; Serial
2024011800 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -21,10 +21,13 @@ $TTL 604800
7.0 IN PTR s-nxc.gsb.lan.
8.0 IN PTR s-mon.gsb.lan.
9.0 IN PTR s-itil.gsb.lan.
20.0 IN PTR s-kea1.gsb.lan.
21.0 IN PTR s-kea2.gsb.lan.
22.0 IN PTR s-awx.gsb.lan.
101.1 IN PTR s-web1
101.2 IN PTR s-web2
100.10 IN PTR s-lb
100.10 IN PTR s-lb.gsb.lan
11.0 IN PTR s-elk.gsb.lan.
17.0 IN PTR s-gestsup.lan
254.0 IN PTR r-int.gsb.lan.
254.0 IN PTR r-int.gsb.lan.

Binary file not shown.

View File

@ -1,16 +1,16 @@
---
- name: Supprime le fichier getdocker.sh si déjà présent
file:
state: absent
path: /tmp/getdocker.sh
- name: on recupere getdocker
get_url:
url: http://s-adm.gsb.adm/gsbstore/getdocker.sh
dest: /usr/local/bin
- name: Télécharge le script d'installation de docker
uri:
url: 'https://get.docker.com'
method: GET
dest: /tmp/getdocker.sh
mode: a+x
register: result
- name: on verifie si docker est installe
stat:
path: /usr/bin/docker
#command: which docker
register: docker_present
- name: Execution du script getdocker
shell: bash /tmp/getdocker.sh
- name: Execution du script getdocker si docker n'est pas deja installe
shell: bash /usr/local/bin/getdocker.sh
#when: docker_present.stdout.find('/usr/bin/docker') == -1
when: not docker_present.stat.exists

View File

@ -1 +1 @@
BEATVER: "8.11.5"
BEATVER: "8.11.4"

View File

@ -1,17 +1,51 @@
---
- name: Récupération de filebeat
get_url:
url: http://s-adm.gsb.adm/gsbstore/filebeat-${BEATVAR}-amd64.deb
url: "http://s-adm.gsb.adm/gsbstore/filebeat-{{ BEATVER }}-amd64.deb"
dest: /tmp/
- name: Installation de filebeat
apt:
deb: /tmp/filebeat-${BEATVEAR}-amd64.deb
deb: "/tmp/filebeat-{{ BEATVER }}-amd64.deb"
<<<<<<< HEAD
- name: Chgt filebeat.yml - localhost:9200 - Elastic
replace:
path: /etc/filebeat/filebeat.yml
regexp: 'localhost:9200'
replace: 's-elk.gsb.adm:9200'
backup: yes
- name: Chgt filebeat.yml - localhost:5601 - Kibana
replace:
path: /etc/filebeat/filebeat.yml
regexp: 'localhost:5601'
replace: 's-elk.gsb.adm:5601'
backup: yes
- name: Chgt filebeat.yml - user - Kibana
replace:
path: /etc/filebeat/filebeat.yml
regexp: 'user:5601'
replace: 's-elk.gsb.adm:5601'
backup: yes
#- name: Changement du fichier de conf
# copy:
# src: filebeat.yml
# dest: /etc/filebeat/filebeat.yml
=======
- name: sorie pou debug
fail:
msg: "packet installe"
- name: Changement du fichier de conf
copy:
src: filebeat.yml
dest: /etc/filebeat/filebeat.yml
>>>>>>> d16ccae (maj pour elk-filebeat-cli)
- name: Configuration de filebeat
shell: filebeat modules enable system

View File

@ -1,9 +1,22 @@
## Principe du rôle elk
ELK 8.5.3
# Le rôle elk
ELK Version 8.5.3
Ce rôle permet de créer un serveur ELK pour centraliser les logs et de des métriques pour simplifier la gestion du parc informatique GSB.
Le principe de ce rôle est d'installer docker, les différentes tâches de ce rôle sont de :
Ce rôle a pour but d'installer un serveur ELK pour centraliser les logs et les métriques pour simplifier la gestion du parc informatique GSB.
Le rôle **elk** installe **docker**, les différentes tâches de ce rôle sont de :
- Vérifier si ELK est déjà installé,
- Importation un docker-compose depuis github,
- Changement la configuration pour passer en version 'basic'
- clonage du depot **devianthony** depuis github,
- Changement de la configuration pour passer en version 'basic'
- Lancement d'ELK avec docker-compose
## Lancement manuel
- depuis le répertoire **nxc** :
````shell
docker compose up setup
docker compose up -d
````

View File

@ -21,7 +21,7 @@
regexp: 'xpack.license.self_generated.type: trial'
replace: 'xpack.license.self_generated.type: basic'
- name: Execution du fichier docker-compose.yml
shell: docker compose up -d
args:
chdir: /root/elk
# - name: Execution du fichier docker-compose.yml
# shell: docker compose pull
# args:
# chdir: /root/elk

26
roles/firewalld/README.md Normal file
View File

@ -0,0 +1,26 @@
# Rôle awx
***
Rôle awx: Configuration d'un serveur AWX avec k3s.
## Tables des matières
1. [Que fait le rôle AWX ?]
2. [Connexion à l'interface WEB du serveur AWX]
**AWX** est l'application développée par **RedHat** permettant de lancer des playbooks **ansible** depuis une interface web évoluée plutôt qu'en ligne de commande. **AWX** utlise kubernetes mise en oeuvre ici avec **k3s**.
## Que fait le rôle AWX ?
Le rôle **awx** installe et configure un serveur **AWX** avec **k3s** pour cela le role:
- Installe **k3s** en spécifiant l'adresse IP ainsi que l'interface d'écoute
- Clone le dépot **Github** **awx-on-k3s**
- Procéde au déploiement du pod **awx-operator**
- Génére un certifiacat auto-signé utlisée par le serveur **AWX** en utilisant **OpenSSL**
- Edite le fichier awx.yaml afin d'y indique le nom d'hote du serveur en accord avec le nom utlisé par les certificats
- Déploie le serveur **AWX**
- Test l'accésibilité du serveur **AWX**.
### Connexions à l'interface WEB du serveur AWX ###
Une fois le role **awx** terminé il est possible de se connecter à l'interface web duserveur depuis un navigateur.
S'assurer que votre machine puisse résoudre **s-awx.gsb.lan**
- Se connecter sur : **https://s-awx.gsb.lan**
- Utlisateur: **admin** / Mot de passe: **Ansible123!**

View File

@ -0,0 +1,91 @@
---
- name: Installation de firewalld
apt:
state: present
name:
- firewalld
- name: affectation de l'interface enp0s3 a la zone external
ansible.posix.firewalld:
zone: external
interface: enp0s3
permanent: true
state: enabled
- name: affectation de l'interface enp0s8 a la zone external
ansible.posix.firewalld:
zone: internal
interface: enp0s8
permanent: true
state: enabled
- name: FirewallD rules pour la zone internal
firewalld:
zone: internal
permanent: yes
immediate: yes
service: "{{ item }}"
state: enabled
with_items:
- http
- https
- dns
- ssh
- rdp
- name: FirewallD rules pour la zone internal
firewalld:
zone: external
permanent: yes
immediate: yes
service: "{{ item }}"
state: enabled
with_items:
- ssh
- rdp
#- ansible.posix.firewalld:
# zone: internal
# service: http
# permanent: true
# state: enabled
#- ansible.posix.firewalld:
# zone: internal
# service: dns
# permanent: true
#state: enabled
#- ansible.posix.firewalld:
# zone: internal
# service: ssh
# permanent: true
# state: enabled
#- ansible.posix.firewalld:
# zone: internal
# service: rdp
#permanent: true
#state: enabled
- ansible.posix.firewalld:
zone: internal
port: 8080/tcp
permanent: true
state: enabled
- ansible.posix.firewalld:
zone: external
port: 3389/tcp
permanent: true
state: enabled
- ansible.posix.firewalld:
port_forward:
- port: 3389
proto: tcp
toaddr: "192.168.99.6"
toport: 3389
state: enabled
immediate: yes

View File

@ -1,16 +1,41 @@
# Fog
Ce rôle permet l'installation et la modification de Fog.
Ce rôle permet **l'installation** et la **configuration** de **Fog**.
**Fog** est une solution open-source de gestion de parc informatique. Il offre des fonctionnalités telles que la **création d'images système**, le **déploiement d'images sur plusieurs machines** et la **gestion des postes de travail** grâce à **PXE**.
## Fog, c'est quoi ?
**PXE** (Preboot eXecution Environment) est un protocole qui permet à un hôte de démarrer via le réseau, plutôt que depuis son disque dur local. Cela facilite le déploiement d'images système à distance.
Dans le contexte de GSB, Fog avec PXE,assure le service **DHCP**.
Ainsi, Fog simplifie le processus de création d'images et du déployement de postes en gérant à la fois le démarrage réseau (PXE) et la configuration réseau (DHCP).
Fog permet le déploiement d'images disque tel que Windows ou bien Linux en utilisant PXE (Preboot Execution Environment).
## Comment l'installer et le configurer ?
### Prérequis:
## Comment l'installer ?
Mettre au moins 4GB de mémoire.
### Etape 1:
Avant toute chose, lancer le fichier goss de s-fog ( présent dans gsb2023/goss/s-fog.yaml ) pour vérifier que la configuration réseau est correct et opérationnel. Une fois l'installation principale effectuée, il faut lancer le playbook ansible s-fog.yaml.
Il faudra se rendre dans le dossier **fog** pour lancer le script **installfog.sh** ( fog/bin/ ). La configuration sera déjà établie via le fichier **.fogsettings**
Lancez le PlayBook Ansible de "pré-installation" nommé **s-fog.yml**.
Il installe la base de **Fog** , l'outil **Goss** , configure le **DHCP** , **SSH** et l'agent **SSH**.
Ce PlayBook fait aussi appel au PlayBook **main.yml** qui se trouve dans **roles/fog/tasks/** qui installe les paquets de base comme **Apache2** , **MariaDb client et serveur** ... (Voir en détail le PlayBook).
Enfin ce PlayBook permet aussi de récupérer l'archive d'installation de Fog depuis le serveur **s-admin** (grâce au PlayBook **main.yml** dans **roles/default/**), puis décompresse cette archive et l'exécute (à partir du moment où on lance le deuxieme PlayBook : voir l'étape 2).
Redémarrer le serveur pour que les interfaces puissent avoir les bonnes adresses IP.
### Etape 2:
Lancez le second PlayBook **install-fog.yml** qui permet de faire appel aux tâches qui exécute le script d'installation **fogsettings** qui permet d'éviter de répondre aux différentes questions manuellement.
### Etape 3:
Il n'y a plus qu'à se rendre sur l'interface en ligne de Fog avec l'URL suivant : **http://172.16.64.16/management/** à partir d'un poste étant dans le bon réseau et suivre les consignes indiquées (Installation ou mise à jour de la base de données) et vous pourrez ainsi vous y connecter et commencer à l'utiliser.
### Etape supplémentaire:
Vous pouvez tester que la configuration est correcte avec Goss (commande : **./agoss -f tap** ) à partir du répertoire **gsb2024**.

View File

@ -1,3 +1,3 @@
depl_url: "http://s-adm.gsb.adm/gsbstore"
depl_fog: "fogproject-1.5.9.tar.gz"
depl_fog: "fogproject-1.5.10.tar.gz"
instructions: "Pour lancer l'installateur Fog, faites : 'bash /root/tools/fog/bin/installfog.sh'. Suivez ensuite les instructions"

View File

@ -0,0 +1,46 @@
## Start of FOG Settings
## Created by the FOG Installer
## Find more information about this file in the FOG Project wiki:
## https://wiki.fogproject.org/wiki/index.php?title=.fogsettings
## Version: 1.5.10
## Install time: mar. 16 janv. 2024 15:27:57
ipaddress='192.168.99.100'
copybackold='0'
interface='enp0s3'
submask='255.255.255.0'
hostname='s-fog.gsb.lan'
routeraddress='192.168.99.99'
plainrouter='192.168.99.99'
dnsaddress='192.168.99.99'
username='fogproject'
password='zbSw#FaGPS7O1bJ5tpfj'
osid='2'
osname='Debian'
dodhcp='Y'
bldhcp='0'
dhcpd='isc-dhcp-server'
blexports='1'
installtype='N'
snmysqluser='fogmaster'
snmysqlpass='cbZjO*gCONbbldV4a6l1'
snmysqlhost='localhost'
mysqldbname='fog'
installlang='0'
storageLocation='/images'
fogupdateloaded=1
docroot='/var/www/html/'
webroot='/fog/'
caCreated='yes'
httpproto='http'
startrange=''
endrange=''
packages='apache2 bc build-essential cpp curl g++ gawk gcc genisoimage git gzip htmldoc isolinux lftp libapache2-mod-php libc6 libcurl4 liblzma-dev m4 mariadb-client mariadb-server net-tools nfs-kernel-server openssh-server php php-bcmath php-cli php-curl php-fpm php-gd php-json php-ldap php-mbstring php-mysql tar tftpd-hpa tftp-hpa unzip vsftpd wget zlib1g'
noTftpBuild=''
tftpAdvOpts=''
sslpath='/opt/fog/snapins/ssl/'
backupPath='/home/'
armsupport=''
php_ver='7.4'
sslprivkey='/opt/fog/snapins/ssl//.srvprivate.key'
sendreports='Y'
## End of FOG Settings

View File

@ -3,17 +3,17 @@
## Find more information about this file in the FOG Project wiki:
## https://wiki.fogproject.org/wiki/index.php?title=.fogsettings
## Version: 1.5.10
## Install time: jeu. 11 janv. 2024 11:41:05
ipaddress='172.16.64.16'
## Install time: Mon Jan 15 23:16:31 2024
ipaddress='172.16.0.16'
copybackold='0'
interface='enp0s9'
submask='255.255.255.0'
hostname='s-fog.gsb.lan'
routeraddress='192.168.99.99'
plainrouter='192.168.99.99'
hostname='s-fog'
routeraddress='172.16.64.254'
plainrouter='172.16.64.254'
dnsaddress='172.16.0.1'
username='fogproject'
password='/7ElC1OHrP47EN2w59xl'
password='0lEyBKxcrQxseHLB#Cbg'
osid='2'
osname='Debian'
dodhcp='y'
@ -22,25 +22,27 @@ dhcpd='isc-dhcp-server'
blexports='1'
installtype='N'
snmysqluser='fogmaster'
snmysqlpass='HHO5vSGqFiHE_9d2lja3'
snmysqlpass='DQG@4PU31F9vOE4bX6V2'
snmysqlhost='localhost'
mysqldbname='fog'
installlang='0'
installlang='1'
storageLocation='/images'
fogupdateloaded=1
docroot='/var/www/html/'
webroot='/fog/'
caCreated='yes'
httpproto='http'
startrange='172.16.64.10'
endrange='172.16.64.254'
httpproto='https'
startrange='172.16.64.120'
endrange='172.16.64.140'
bootfilename='undionly.kpxe'
packages='apache2 bc build-essential cpp curl g++ gawk gcc genisoimage git gzip htmldoc isc-dhcp-server isolinux lftp libapache2-mod-php7.4 libc6 libcurl4 li>
packages='apache2 bc build-essential cpp curl g++ gawk gcc genisoimage gettext git gzip htmldoc isc-dhcp-server isolinux lftp libapache2-mod-php libc6 libcurl4 liblzma-dev m4 mariadb-client mariadb-server net-tools nfs-kernel-server openssh-server php php-bcmath php-cli php-curl php-fpm php-gd php-intl php-json php-ldap php-mbstring php-mysql tar tftp-hpa tftpd-hpa unzip vsftpd wget zlib1g'
noTftpBuild=''
tftpAdvOpts=''
sslpath='/opt/fog/snapins/ssl/'
backupPath='/home/'
#backupPath='/home/'
armsupport='0'
php_ver='7.4'
php_verAdds='-7.4'
sslprivkey='/opt/fog/snapins/ssl//.srvprivate.key'
sendreports='N'
## End of FOG Settings

View File

@ -0,0 +1,49 @@
## Start of FOG Settings
## Created by the FOG Installer
## Find more information about this file in the FOG Project wiki:
## https://wiki.fogproject.org/wiki/index.php?title=.fogsettings
## Version: 1.5.10
## Install time: jeu. 11 janv. 2024
## Install time: jeu. 11 janv. 2024 11:41:05
ipaddress='172.16.64.16'
copybackold='0'
interface='enp0s9'
submask='255.255.255.0'
hostname='s-fog.gsb.lan'
routeraddress='172.16.64.254'
plainrouter='172.16.64.254'
dnsaddress='172.16.0.1'
username='fogproject'
password='/7ElC1OHrP47EN2w59xl'
osid='2'
osname='Debian'
dodhcp='y'
bldhcp='1'
dhcpd='isc-dhcp-server'
blexports='1'
installtype='N'
snmysqluser='fogmaster'
snmysqlpass='HHO5vSGqFiHE_9d2lja3'
snmysqlhost='localhost'
mysqldbname='fog'
installlang='1'
storageLocation='/images'
fogupdateloaded=1
docroot='/var/www/'
webroot='/fog/'
caCreated='yes'
httpproto='https'
startrange='172.16.64.10'
endrange='172.16.64.254'
#bootfilename='undionly.kpxe'
packages='apache2 bc build-essential cpp curl g++ gawk gcc genisoimage gettext git gzip htmldoc isc-dhcp-server isolinux lftp libapache2-mod-php libc6 libcurl4 liblzma-dev m4 mariadb-client mariadb-server net-tools nfs-kernel-server openssh-server php php-bcmath php-cli php-curl php-fpm php-gd php-intl php-json php-ldap php-mbstring php-mysql tar tftpd-hpa tftp-hpa unzip vsftpd wget zlib1g'
noTftpBuild=''
tftpAdvOpts=''
sslpath='/opt/fog/snapins/ssl/'
backupPath='/home/'
armsupport='0'
php_ver='7.4'
#php_verAdds='-7.4'
sslprivkey='/opt/fog/snapins/ssl//.srvprivate.key'
sendreports='Y'
## End of FOG Settings

View File

@ -0,0 +1,51 @@
## Start of FOG Settings
## Created by the FOG Installer
## Find more information about this file in the FOG Project wiki:
## https://wiki.fogproject.org/wiki/index.php?title=.fogsettings
## Version: 1.5.10
## Install time: Mon Jan 15 23:16:31 2024
ipaddress='192.168.56.10'
copybackold='0'
interface='eth2'
submask='255.255.255.0'
hostname='fog'
routeraddress='192.168.1.1'
plainrouter='192.168.1.1'
dnsaddress='192.168.1.1'
username='fogproject'
password='0lEyBKxcrQxseHLB#Cbg'
osid='2'
osname='Debian'
dodhcp='y'
bldhcp='1'
dhcpd='isc-dhcp-server'
blexports='1'
installtype='N'
snmysqluser='fogmaster'
snmysqlpass='DQG@4PU31F9vOE4bX6V2'
snmysqlhost='localhost'
mysqldbname='fog'
installlang='1'
storageLocation='/images'
fogupdateloaded=1
docroot='/var/www/html/'
webroot='/fog/'
caCreated='yes'
httpproto='https'
startrange='192.168.56.10'
endrange='192.168.56.254'
packages='apache2 bc build-essential cpp curl g++ gawk gcc genisoimage gettext git gzip htmldoc i
sc-dhcp-server isolinux lftp libapache2-mod-php libc6 libcurl4 liblzma-dev m4 mariadb-client mari
adb-server net-tools nfs-kernel-server openssh-server php php-bcmath php-cli php-curl php-fpm php
-gd php-intl php-json php-ldap php-mbstring php-mysql tar tftp-hpa tftpd-hpa unzip vsftpd wget zl
ib1g '
noTftpBuild=''
tftpAdvOpts=''
sslpath='/opt/fog/snapins/ssl/'
backupPath='/home/'
armsupport='0'
php_ver='7.4'
sslprivkey='/opt/fog/snapins/ssl//.srvprivate.key'
sendreports='N'
## End of FOG Settings

View File

@ -1,26 +1,54 @@
---
- name: creation d'un repertoire fog
- name: Installation des paquets de base
apt:
state: present
name:
- apache2
- curl
- git
- gzip
- isc-dhcp-server
- mariadb-client
- mariadb-server
- net-tools
- openssh-server
- php
- php-cli
- php-curl
- php-fpm
- php-gd
- php-intl
- php-json
- php-ldap
- php-mbstring
- php-mysql
- tar
- unzip
- vsftpd
- wget
- name: creation /root/tmp
file:
path: /root/tools/fog
path: /root/tmp
state: directory
- name: recuperation de l'archive d'installation fog sur git
git:
repo: https://gitea.lyc-lecastel.fr/gadmin/fog.git
dest: /root/tools/fog/
clone: yes
update: yes
- name: Modification fichier bash (desac UDPCast)
ansible.builtin.lineinfile:
path: /root/tools/fog/lib/common/functions.sh
regexp: '^configureUDPCast\(\).*'
line: "configureUDPCast() {\nreturn"
backup: yes
- name: fichier config fogsettings
command: "cp /root/tools/ansible/roles/fog/files/fogsettings /opt/fog/"
copy:
src: fogsettings
dest: /root/tmp/
- name: fichier fogsettings en .fogsettings
command: "mv /opt/fog/fogsettings /opt/fog/.fogsettings"
- name: Récupération archive d'installation Fog
get_url:
url: "{{ depl_url }}/{{ depl_fog }}"
dest: "/root/tmp/"
- name: Décompression de l'archive
ansible.builtin.unarchive:
src: "/root/tmp/{{ depl_fog }}"
dest: "/root/tmp/"
- name: Exécution du script d'installation Fog
ansible.builtin.shell: sudo bash /root/tmp/fogproject-1.5.10/bin/installfog.sh --recreate-keys -f /root/tmp/fogsettings -y
args:
chdir: "/root/tmp/fogproject-1.5.10/"

View File

@ -1,6 +1,76 @@
Configuration de ferm
# [Ferm](http://ferm.foo-projects.org/)
Modifier l'execution d'iptables [plus d'info ici](https://wiki.debian.org/iptables)
Modifier l'execution d'iptables [plus d'info ici#!/bin/bash
set -u
set -e
# Version Site to Site
AddressAwg=10.0.0.1/32 # Adresse VPN Wireguard cote A
EndpointA=192.168.0.51 # Adresse extremite A
PortA=51820 # Port ecoute extremite A
NetworkA=192.168.1.0/24 # reseau cote A
NetworkC=192.168.200.0/24 #reseau cote A
NetworkD=172.16.0.0/24 #reseau cote A
AddressBwg=10.0.0.2/32 # Adresse VPN Wireguard cote B
EndpointB=192.168.0.52 # Adresse extremite B
PortB=51820 # Port ecoute extremite B
NetworkB=172.16.128.0/24 # reseau cote B
umask 077
wg genkey > endpoint-a.key
wg pubkey < endpoint-a.key > endpoint-a.pub
wg genkey > endpoint-b.key
wg pubkey < endpoint-b.key > endpoint-b.pub
PKA=$(cat endpoint-a.key)
pKA=$(cat endpoint-a.pub)
PKB=$(cat endpoint-b.key)
pKB=$(cat endpoint-b.pub)
cat <<FINI > wg0-a.conf
# local settings for Endpoint A
[Interface]
PrivateKey = $PKA
Address = $AddressAwg
ListenPort = $PortA
# IP forwarding
PreUp = sysctl -w net.ipv4.ip_forward=1
# remote settings for Endpoint B
[Peer]
PublicKey = $pKB
Endpoint = ${EndpointB}:$PortB
AllowedIPs = $AddressBwg, $NetworkB
FINI
cat <<FINI > wg0-b.conf
# local settings for Endpoint B
[Interface]
PrivateKey = $PKB
Address = $AddressBwg
ListenPort = $PortB
# IP forwarding
PreUp = sysctl -w net.ipv4.ip_forward=1
# remote settings for Endpoint A
[Peer]
PublicKey = $pKA
Endpoint = ${EndpointA}:$PortA
AllowedIPs = $AddressAwg, $NetworkA, $NetworkC, $NetworkD
FINI
echo "wg0-a.conf et wg0-b.conf sont generes ..."
echo "copier wg0-b.conf sur la machine b et renommer les fichiers de configuration ..."](https://wiki.debian.org/iptables)
```shell
update-alternatives --set iptables /usr/sbin/iptables-legacy
```

View File

@ -4,7 +4,6 @@
@def $DEV_PRIVATE = enp0s8;
@def $DEV_WORLD = enp0s9;
@def $DEV_WORLD = enp0s9;
@def $DEV_VPN= wg0;
@def $NET_PRIVATE = 172.16.0.0/24;
@ -32,7 +31,7 @@ table filter {
# well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net
# we provide DNS services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;

View File

@ -29,7 +29,7 @@ table filter {
# well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net
# we provide DNS services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;

View File

@ -1,47 +1,92 @@
## Comment fonctionne le rôle
Le rôle installe un serveur GLPI fonctionnant graĉe à php et à nginx.
Ce rôle permet aussi d'installer FusionInventory sur glpi.
Ce rôle permet aussi de télécharger l'agent GLPI sur glpi.
Le rôle permet de créer la base GLPI.
## Comment utiliser GLPI
Après le pull-config, aller sur une machine du réseau n-user et aller sur http://s-itil/install/install.php
Puis lancer l'installation, les paramètres sql à fournir sont :
serveur : localhost
utilisateur : glpi
mot de passe : glpi
Selectionner la base glpi
Après le pull-config, depuis une machine du réseau n-user, se rendre sur l'URL : *http://s-itil.gsb.lan*
Puis lancer l'installation, les paramètres sql à fournir sont les suivant :
* serveur : **localhost**
* utilisateur : **glpi**
* mot de passe : **glpi**
* Selectionner la base **glpi**
Ne pas envoyer de statistique d'usage
## Postfix :
Postfix est utilisé pour renvoyer des messages pour assuré le suivi de l'avancement du ticket.
Aller dans Configuration > Notification, activer le suivi et les notification
Aller dans **Configuration > Notification**, activer le suivi et les notification
Aller dans Configuration des notifications par courriels
Mettre l'adresse mail de supervision dans : Courriel de l'administrateur, Courriel expéditeur et comme adresse de réponse
Le mode d'envoie des courriels est SMTP
l'hôte SMTP est localhost
## Inventorier une machine windows sur le serveur GLPI avec l'agent :
Actuellement la version de l'agent glpi disponible directement sur le serveur est la version 1.7 de celui.
* Télécharger l'agent depuis le serveur GLPI : *http://s-itil/glpicli*
* Installer l'agent : sélectionner l'option "Typical" et entrer l'URL du serveur dans "Remote Targets" : *http://s-itil/*
* Se rendre sur localhost:62354 pour forcer la remonter
*Note: si la machine ne remonte aprés avoir forcer l'interface depuis l'interface web il est possible de rédémarrer le service glpi agent.*
* Actualiser GLPI et la machine sera inventoriée
## Enregistrements A et PTR pour S-WIN :
Les enregistrements "A" et "PTR" sont utilisés pour résoudre les noms des machines nécessaire à la synchronisation de l'annuaire LDAP sur le serveur comme serveur DNS GLPI en utilisant le serveur S-WIN sans passé sur le serveur S-INFRA.
Ajouter les enregistrement "A" et "PTR" sur le DNS de l'Active Directory:
Sur le serveur S-WIN:
**Gestionnaire de serveur** --> **Gestionnaire DNS** --> **Zones Recherches Directes** --> **gsb.lan** --> **Ajouter un hôte (A)**:
* s-infra 172.16.0.1
* s-itil 172.16.0.9
* r-int 172.16.0.254
* s-win 172.16.0.6
Cocher la case **Créer un pointeur d'enregistrement PTR associé**
## LDAP :
Aller dans Configuration > Authentification > Annuaires LDAP.
Ajouter un serveur en cliquant sur le +
Remplisser les cases:
Nom : s-win
Serveur par défaut : oui
Actif : oui
Serveur : s-win.gsb.lan
Filtre de connexion : (&(objectClass=user)(objectCategory=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))
BaseDN : DC=gsb,DC=lan
DN du compte : GSB\Administrateur
Mot de passe : Azerty1+
Champ de l'identifiant : samaccountname
* Nom : **s-win**
* Serveur par défaut : **oui**
* Actif : **oui**
* Serveur : **s-win.gsb.lan**
* Filtre de connexion : (&(objectClass=user)(objectCategory=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))
* BaseDN : **DC=gsb,DC=lan**
* DN du compte : **GSB\Administrateur**
* Mot de passe : **Azerty1+**
* Champ de l'identifiant : **samaccountname**
Pour importer les utilisateurs allez dans Administration > Utilisateur > Liaison annuaire LDAP > Importation de nouveau utilisateurs
Pour importer les utilisateurs allez dans **Administration > Utilisateur > Liaison annuaire LDAP > Importation de nouveau utilisateurs**
Appuyer sur rechercher
Puis sélectionner les utilisateurs afficher, allez dans action et sélectionnez importer.
## Les modification à faire pour un prochaine version de GLPI :
## Rejoindre le domaine gsb.lan depuis un client windows :
Afin de rejoindre le domaine **gsb.lan**, il est nécessaire d'ajouter en guise de serveur DNS sur la machine cliente le serveur DNS de l'Active Directory **s-win.gsb.lan**.
Il est possible d'ainsi rejoindre le domaine **gsb.lan**.
Nous utiliserons pour se connecter à l'Active Directory l'utilisateur **Administrateur**:
* **Login**: **Administrateur@gsb.lan**
* **Mot de passe**: **Azerty1+**
## Les modification à faire pour un prochaine version de GLPI :
Pour les prochaines versions de GLPI et pouvoir utiliser le playbook, voici les modification à faire :
* Changer la version de GLPI
* Changer la version de PHP
* Changer la version de GLPI Agent
*Modification effectué par : jm - ak*

View File

@ -1,9 +1,15 @@
---
- name: restart php-fpm
service: name=php8.2-fpm state=restarted
service:
name: php8.2-fpm
state=: restarted
- name: restart nginx
service: name=nginx state=restarted
service:
name: nginx
state: restarted
- name: restart mariadb-server
service: name=mariadb-server state=restarted
service:
name: mariadb-server
state: restarted

16
roles/gotify/README.md Normal file
View File

@ -0,0 +1,16 @@
# Rôle Gotify
***
Rôle gotify pour la notification Zabbix et pas que
## Que fait le rôle gotify ?
Le rôle gotify va installer gotify en binaire, il s'agit d'une installation basic sans https.
***
## Identifiant
***
Admin
Admin
***

View File

@ -0,0 +1,50 @@
---
- name: Mise a jour apt cache
apt:
update_cache: yes
- name: Creation /etc/gotify
ansible.builtin.file:
path: /etc/gotify
state: directory
mode: '0755'
- name: Creation /opt/gotify
ansible.builtin.file:
path: /opt/gotify
state: directory
mode: '0755'
- name: installation de gotify
get_url:
url: "https://github.com/gotify/server/releases/latest/download/gotify-linux-amd64.zip"
dest: "/tmp/gotify.zip"
- name: Extraction de Gotify
ansible.builtin.unarchive:
src: "/tmp/gotify.zip"
dest: "/opt/gotify"
become: yes
- name: Creation du fichier systemd
template:
src: "gotify.service.j2"
dest: "/etc/systemd/system/gotify.service"
become: yes
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Creation du fichier conf gotify
template:
src: "config.yml.j2"
dest: "/etc/gotify/config.yml"
become: yes
- name: Demarage du gotify
systemd:
name: gotify
state: started
enabled: yes

View File

@ -0,0 +1,4 @@
server:
keepaliveperiodseconds: 0
listenaddr: "" # the address to bind on, leave empty to bind on all addresses
port: 8008

View File

@ -0,0 +1,13 @@
[Unit]
Description=Gotify Server
After=network.target
[Service]
Type=simple
User=root
ExecStart=/opt/gotify/gotify-linux-amd64
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -1,16 +1,30 @@
# Role syslog : installation et configuration de syslog serveur (centralisation des logs)
# Role journald-rcv : installation et configuration du serveur systemd-journal-remote (centralisation des logs)
***
## Fonctionnalitées du rôle:
Ce role a pour objectif de activer le module UDP dans le fichier /etc/rsyslog.conf pour accepter les logs entrants des machines concernées :
on décommente la ligne suivante :
'module(load="imudp"\)'
Ce role a pour objectif d'installer et d'éditer les fichiers de configuration de systemd journal remote afin que les machines lançant ce rôle puissent recevoir les logs des autres machine du parc.
Ensuite le role active l'écoute du module UDP sur le port 514 afin de pouvoir envoyer les logs.
on décommente la ligne suivante dans le même fichier que ci-dessus :
'input\(type="imudp" port="514"\)'
## Opérations réalisées par le role:
Le role réalise les opération suivante:
* installation du paquet **systemd-journal-remote**.
* Démarrage et activation (au démarrage) du service **systemd-journal-remote.socket.
* Création des fichiers de configuration de **systemd-journal-remote** à partir d'une copie du fichier de configuration déja existante.
* Changement du protocole utilisé par journald. Passant du protocole **HTTPS** au protocole **HTTP*** Activation du mode split qui permet d'avoir un fichier de log par machine supervisées.
* Création du répertoire qui accueillera les fichiers de logs.
* Rédémarrage du daemon systemd afin que le système prenne en compte les modifications efféctuées.
pour finir le role va charger le module UDP afin que la machine **s-infra** puissent reçevoir les logs entrants.
Pour faire cela on décommente la ligne suivante dans le fichier /etc/systemd/journald.conf :
'ForwardToSyslog=yes'
## Test du bon fonctionnement du rôle
pour finir le role va redemmarer automatiquement les services journald et rsyslog
Afin de tester le rôle nous éffectuons un test:
**Depuis la machine sur laquelle ce rôle est installé:**
* **journalctl -f -D /var/log/journal/remote/
* S'assurer que le port 19532 (port par défault utilisé par le serviceà) soit ouvert et utilisable sur toutes les machines en entrée.
Afin de consulter les fichiers d'événement.
** Depuis une des machines eméttrices de logs:**
* **logger ok**
Si le message émis par la machine éméttrice et consultable depuis la machine receptrice alors le test est réussi.

21
roles/kea/README.md Normal file
View File

@ -0,0 +1,21 @@
# Rôle Kea
***
Rôle Kea: Configuration de 2 serveurs KEA en mode haute disponbilité.
## Tables des matières
1. [Que fait le rôle Kea ?]
2. [Installation et configuration de ka]
3. [Remarques]
## Que fait le rôle Kea ?
Le rôle KEA permet de configurer 1 serveurs kea (s-kea1 et s-kea2) en mode haute disponibilité.
- Le serveur **s-kea1** sera en mode **primary** il délivrera les baux DHCP sur le réseau n-user.
- Le serveur **s-kea2**, sera en mode **stand-by** le service DHCP basculera donc sur **s-kea2** en cas disponibilité du serveur**s-kea1**.
### Installation et configuration de kea
Le rôle kea installe les packets **kea dhcp4, hooks, admin** une fois les packets installer. Il configure un serveur kea pour qu'il distribue les ips sur le réseau n-user et soit en haute disponibilité.
### Remarquees ###
Une fois le playbook **s-kea** correctement terminé et la machine **s-kea** redemarrée, redémarrée le service **isc-kea-dhcp4.service** afin de prendre en compte les modifications éfféctuées sur la couche réseau par le role POST.

View File

@ -0,0 +1,8 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -0,0 +1,66 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.0.20",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -0,0 +1,226 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea1.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -0,0 +1,12 @@
---
- name: Restart isc-kea-dhcp4-server
ansible.builtin.service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: Restart isc-kea-ctrl-agent
ansible.builtin.service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes

43
roles/kea/tasks/main.yml Normal file
View File

@ -0,0 +1,43 @@
---
- name: Preparation
ansible.builtin.shell: curl -1sLf 'https://dl.cloudsmith.io/public/isc/kea-2-4/setup.deb.sh' | sudo -E bash
- name: Update apt
ansible.builtin.apt:
update_cache: yes
#- name: Installation paquet isc-kea-common
# ansible.builtin.apt:
# deb: isc-kea-common
# state: present
- name: Installation isc-kea-dhcp4
ansible.builtin.apt:
name: isc-kea-dhcp4-server
state: present
- name: Installation isc-kea-ctrl-agent
ansible.builtin.apt:
name: isc-kea-ctrl-agent
state: present
- name: Installation isc-kea-hooks
ansible.builtin.apt:
name: isc-kea-hooks
state: present
- name: Generation ---- du fichier de configuration kea-ctrl-agent
ansible.builtin.template:
src: kea-ctrl-agent.conf.j2
dest: /etc/kea/kea-ctrl-agent.conf
notify:
- Restart isc-kea-ctrl-agent
- name: Generation du fichier de configuration kea-dhcp4.conf
ansible.builtin.template:
src: kea-dhcp4.conf.j2
dest: /etc/kea/kea-dhcp4.conf
notify:
- Restart isc-kea-dhcp4-server

View File

@ -0,0 +1,32 @@
{
"Control-agent":
{
"http-host": "{{ kea_ctrl_address_this }}",
"http-port": 8000,
"control-sockets":
{
"dhcp4":
{
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
},
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
"flush": true,
"maxsize": 204800,
"maxver": 4,
{% raw %} "pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n", {% endraw %}
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -0,0 +1,241 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": ["{{ kea_dhcp_int }}"]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_lease_cmds.so"
},
{
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_stat_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/lib/x86_64-linux-gnu/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "{{ kea_this_server }}",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "{{ kea_srv1 }}",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://{{ kea_ctrl_address1 }}:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "{{ kea_srv2 }}",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://{{ kea_ctrl_address2 }}:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1, 172.16.0.4"
},
{
"name": "domain-name",
"data": "gsb.lan"
},
{
"name": "domain-search",
"data": "gsb.lan"
},
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

10
roles/lb-bd/README.md Normal file
View File

@ -0,0 +1,10 @@
# Role lb-bd
***
Rôle lb-bd pour la mise en place de la base de données du serveur WordPress.
## Tables des matières
1. Que fait le rôle lb-bd ?
## Que fait le rôle lb-bd ?
Ce rôle installe le paquet `mariadb-server` puis créé et configure la base de données nommée **wordpressdb** en ouvrant le port 3306 et en créant l'utilisateur MySQL nommé **wordpressuser** avec le mot de passe **wordpresspasswd**.

View File

@ -0,0 +1,22 @@
# Rôle lb-front
***
Rôle lb-front pour la répartition de charge des serveurs web sur WordPress avec HAProxy
## Tables des matières
1. Que fait le rôle lb-front ?
2. Ordre d'installation des serveurs.
## Que fait le rôle lb-front ?
Le rôle lb-front va installer `haproxy` pour le load balancing/la répartition de charge et va configurer le fichier `/etc/haproxy/haproxy.cfg`.
le fichier va faire du Round-Robin, un algoritme qui va équilibrer le nombre de requêtes entre s-lb-web1 et s-lb-web2.
le site web est accessibe à l'adresse <http://s-lb.gsb.adm>.
## Ordre d'installation des serveurs.
1. Le serveur s-lb avec haproxy qui va "initialiser" les sous-réseaux dans la DMZ.
2. Le serveur s-lb-bd qui va contenir la base de données WordPress utilisée par les serveurs web.
3. Le serveur s-nas qui va stocker la configuration WordPress et la partager aux serveurs web en NFS. Il va aussi utiliser la base de données sur stockée s-lb-bd.
4. Les serveurs s-web1 et s-web2 qui vont installer Apache2, PHP et afficher le serveur WordPress.

View File

@ -0,0 +1,55 @@
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# Default ciphers to use on SSL-enabled listening sockets.
# For more information, see ciphers(1SSL). This list is from:
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS
ssl-default-bind-options no-sslv3
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
frontend proxypublic
bind 192.168.100.10:80
default_backend fermeweb
backend fermeweb
balance roundrobin
option httpclose
option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
listen stats
bind *:8080
stats enable
stats uri /haproxy
stats auth admin:admin

View File

@ -0,0 +1,3 @@
---
- name: restart haproxy
service: name=haproxy state=restarted

View File

@ -0,0 +1,75 @@
- name: install haproxy
apt:
name: haproxy
state: present
- name: Creer le repertoire du certificat
file:
path: /etc/haproxy/crt
state: directory
mode: '0755'
- name: Creer le repertoire de la cle privee
file:
path: /etc/haproxy/crt/private
state: directory
mode: '0755'
- name: Generer une clee privee avec les valeurs par defaut (4096 bits, RSA)
openssl_privatekey:
path: /etc/haproxy/crt/private/haproxy.pem.key
size: 4096
type: RSA
state: present
- name: creer un certificat auto-signé
openssl_certificate:
path: /etc/haproxy/crt/private/haproxy.pem
privatekey_path: /etc/haproxy/crt/private/haproxy.pem.key
provider: selfsigned
state: present
- name: s'assurer que le certificat a les bonnes permissions
file:
path: /etc/haproxy/crt/private/haproxy.pem
owner: root
group: haproxy
mode: '0640'
- name: parametre global
blockinfile:
path: /etc/haproxy/haproxy.cfg
block: |
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
ssl-server-verify none
- name: parametre backend et fontend
blockinfile:
path: /etc/haproxy/haproxy.cfg
block: |
frontend proxypublic
bind 192.168.100.10:80
bind 192.168.100.10:443 ssl crt /etc/haproxy/crt/private/haproxy.pem
http-request redirect scheme https unless { ssl_fc }
default_backend fermeweb
backend fermeweb
balance roundrobin
option httpclose
option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
- name: redemarre haproxy
service:
name: haproxy
# state: restarted
enabled: yes

22
roles/lb-front/README.md Normal file
View File

@ -0,0 +1,22 @@
# Rôle lb-front
***
Rôle lb-front pour la répartition de charge des serveurs web sur WordPress avec HAProxy
## Tables des matières
1. Que fait le rôle lb-front ?
2. Ordre d'installation des serveurs.
## Que fait le rôle lb-front ?
Le rôle lb-front va installer `haproxy` pour le load balancing/la répartition de charge et va configurer le fichier `/etc/haproxy/haproxy.cfg`.
le fichier va faire du Round-Robin, un algoritme qui va équilibrer le nombre de requêtes entre s-lb-web1 et s-lb-web2.
le site web est accessibe à l'adresse <http://s-lb.gsb.adm>.
## Ordre d'installation des serveurs.
1. Le serveur s-lb avec haproxy qui va "initialiser" les sous-réseaux dans la DMZ.
2. Le serveur s-lb-bd qui va contenir la base de données WordPress utilisée par les serveurs web.
3. Le serveur s-nas qui va stocker la configuration WordPress et la partager aux serveurs web en NFS. Il va aussi utiliser la base de données sur stockée s-lb-bd.
4. Les serveurs s-web1 et s-web2 qui vont installer Apache2, PHP et afficher le serveur WordPress.

View File

@ -1,23 +0,0 @@
port:
tcp:80:
listening: true
ip:
- 192.168.100.11
service:
haproxy:
enabled: true
running: true
sshd:
enabled: true
running: true
interface:
enp0s8:
exists: true
addrs:
- 192.168.100.11/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.101.254/24
mtu: 1500

View File

@ -41,7 +41,7 @@ frontend proxypublic
backend fermeweb
balance roundrobin
option httpclose
#option httpchk HEAD / HTTP/1.0
option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check

View File

@ -14,7 +14,7 @@
backend fermeweb
balance roundrobin
option httpclose
#option httpchk HEAD / HTTP/1.0
option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check

View File

@ -1,3 +1,10 @@
##Partage NFS
Ce rôle sert à installer nfs et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.
# Rôle lb-nfs-client
***
Rôle lb-nfs-client pour l'accès au serveur NFS sur les serveurs lb-web1 et lb-web2.
## Tables des matières
1. Que fait le rôle lb-nfs-client ?
## Que fait le rôle lb-nfs-client ?
Ce rôle sert à installer le paquet `nfs-common` et à monter le répertoire /home/wordpress du s-nas dans /var/www/html/wordpress sur les serveurs webs.

View File

@ -1,10 +1,17 @@
# Role s-nas-server
## Installation de nfs-server et mise en oeuvre du partage /home/wordpress
# Role lb-nfs-server
***
Rôle lb-nfs-server pour la mise en place du partage des fichiers de configuration de WordPress.
## Tables des matières
1. Que fait le rôle lb-nfs-server ?
## Que fait le rôle lb-nfs-server ?
Ce rôle :
* installe **nfs-server**
* installe le paquet `nfs-server`
* copie le fichier de configuration **exports** pour exporter le répertoire **/home/wordpress**
* relance le service **nfs-server**
* décompresse wordpress
### Objectif
Le répertoire **/home/wordpress** est exporté par **nfs** sur le réseau **n-dmz-db**
* décompresse WordPress dans **/home/wordpress**
* relance le service `nfs-server`
* Configure l'accès de WordPress à la base de données dans le fichier `wp-config.php`
Le répertoire **/home/wordpress** est exporté par NFS dans le sous-réseau **n-dmz-db**

View File

@ -16,7 +16,7 @@
- name: 20 - decompresse wordpress
unarchive:
src: https://fr.wordpress.org/latest-fr_FR.tar.gz
src: http://s-adm.gsb.adm/gsbstore/wordpress-6.4.2-fr_FR.tar.gz
dest: /home/
remote_src: yes

View File

@ -1,3 +1,12 @@
##Téléchargement et configuration de WordPress
Ce rôle télécharge wordpress depuis s-adm puis configure le fichier wp-config.php pour la situation du gsb.
# Rôle lb-web
***
Rôle lb-web pour l'affichage et l'utilisation du site web.
## Tables des matières
1. Que fait le rôle lb-web ?
## Que fait le rôle lb-web ?
Ce rôle télécharge les paquets nécessaires au fonctionnement du site web (`apache2`, `php` et `mariadb-client`) qui permetront aux serveurs web d'accerder a la base de données de WordPress.
Le site web est accessibe à l'adresse http://s-lb.gsb.adm.

View File

@ -1,2 +1,2 @@
depl_url: "http://s-adm.gsb.adm/gsbstore/"
depl_wordpress: "wordpress-6.1.1-fr_FR.tar.gz"
depl_wordpress: "wordpress-6.4.2-fr_FR.tar.gz"

View File

@ -1,8 +1,16 @@
# Installation de Nextcloud et du proxy inverse Traefik
Nextcloud et Traefik fonctionnent grâce à docker. Pour pouvoir faire fonctionner ce playbook, docker doit être installé.
## Explication de l'installation de Nextcloud
Afin de pouvoir faire fonctionner Nextcloud et Traefik, il faut mettre en place docker. Dans un premier plan, il vas donc falloir lancer le script **getall** sur **s-adm**. Ensuite dans un second temps, il faudra etre dans le fichier **/nxc** sur **s-nxc** et lancer **docker-compose.yaml**. Pour finir, il faudra ajouter l'authentification LDAP au nextcloud grace a l'AD de **s-win**.
## 1.
# <p align="center">Procédure d'installation</p>
***
## 1. Installation docker
Voir: https://gitea.lyc-lecastel.fr/gsb/gsb2024/src/branch/main/roles/docker
## 2. Fonctionnement du playbook s-nxc
Le playbook crée le dossier **nxc** à la racine de root.
@ -10,11 +18,11 @@ Les fichiers "nextcloud.yml" et "traefik.yml" y seront copiés depuis le répert
Enfin, dans le répertoire nxc, sont créés les répertoires **certs** et **config**.
## 2. Copie des fichiers
### 2.1 Copie des fichiers
Le playbook copie les fichiers placés dans "files" et les placer dans les bons répertoires.
Le playbook copie les fichiers placés dans "files" et les places dans les bons répertoires.
## 3. Génération du certificat
### 2.2 Génération du certificat
Le playbook crée un certificat **x509** grâce à **mkcert**, il s'agit d'une solution permettant de créer des certificats auto-signés. Pour cela, il télécharge **mkcert** sur **s-adm** (utiliser le script **getall**).
@ -25,7 +33,7 @@ Pour créer le certificat, le playbook exécute les commandes (lancé depuis nxc
/usr/local/bin/mkcert -install # Installe mkcert
/usr/local/bin/mkcert -key-file key.pem -cert-file cert.pem "hôte.domaine.local" "*.domaine.local" #Crée le certificat le DNS spécifié
```
## 4. Lancement
## 3. Lancement
Le playbook lance les fichiers "docker-compose" à savoir : nextcloud.yml et traefik.yml qui démarrent les deux piles **docker**.
@ -37,22 +45,28 @@ ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin
Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan
## 5. Ajout authentification LDAP
## 4. Ajout authentification LDAP
Pour ajouter l'authentification LDAP au Nextcloud, il faut :
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et Application
Pour ajouter l'authentification LDAP au Nextcloud, depuis **n-user** il faut :
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et "Application"
* Dans vos applications, descendre et activer "LDAP user and group backend"
* Puis cliquer sur le profil, puis Paramètres d'administration et dans Administration cliquer sur Intégration LDAP/AD
* Puis cliquer sur le profil, puis "Paramètres d'administration" et dans "Administration" cliquer sur "Intégration LDAP/AD"
* Une fois sur la page d'intégration LDAP/AD :
* Dans Hôte mettre :
> ldap://s-win.gsb.lan
* Cliquer sur Détecter le port (normalement le port 389 apparait)
> **ldap://s-win.gsb.lan**
* Cliquer sur "Détecter le port" (normalement le port 389 apparait)
* Dans DN Utilisateur mettre :
> CN=nextcloud,CN=Users,DC=GSB,DC=LAN
> **CN=nextcloud,CN=Users,DC=gsb,DC=lan**
* Mot de passe :
> Azerty1+
* Et dans Un DN de base par ligne :
> DC=GSB,DC=LAN
* Après la configuration passe OK
* Une fois la configuration finie, cliquer 3 fois sur continuer
* Une fois arrivé sur Groupes, vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.
> **Azerty1+**
* Et dans "Un DN de base par ligne" :
> **DC=gsb,DC=lan**
* Cliquer sur "Détecter le DN de base" (normalement il apparaitra automatiquement)
* Après la configuration réaliser, cliquer sur "Continuer", puis cliquer 3 fois sur continuer
* Une fois arrivé sur "Groupes", vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.
## Contributeurs
- LG
- CH

View File

@ -53,8 +53,8 @@ services:
image: nextcloud
container_name: app
restart: always
ports:
- 8081:80
#ports:
#- 8081:80
#links:
depends_on:
- db

View File

@ -0,0 +1,19 @@
Ce script Bash a pour objectif d'automatiser le processus de sauvegarde du serveur NextCloud, qui est exécuté dans un environnement Docker.
## 1. Activation du mode maintenance :
- La première commande Docker est utilisée pour mettre le serveur NextCloud en mode maintenance. Cette mesure préventive garantit qu'aucune modification n'est apportée pendant la sauvegarde, assurant ainsi la cohérence des données.
## 2. Copie des fichiers de sauvegarde :
- La commande `cd /root/nxc` change le répertoire de travail vers `/root/nxc`.
- Ensuite, la commande `rsync -Aavx nextcloud/ nextcloud-dirbkp/` effectue une copie récursive des fichiers du dossier `nextcloud/` vers `nextcloud-dirbkp/`. Ceci crée une copie locale des fichiers de NextCloud à des fins de sauvegarde.
## 3. Sauvegarde de la base de données MySQL/MariaDB :
- La ligne suivante utilise `docker compose exec` pour exécuter la commande `mysqldump` dans le conteneur de la base de données. Cela génère une sauvegarde de la base de données NextCloud qui est enregistrée dans le fichier `nextcloud-sqlbkp.bak`.
## 4. Désactivation du mode maintenance :
- Après la sauvegarde, une autre commande Docker est utilisée pour désactiver le mode maintenance de NextCloud, permettant ainsi la reprise normale des opérations.
## 5. Création d'une archive compressée :
- Enfin, la dernière ligne crée une archive compressée `nxc.tgz` qui regroupe la sauvegarde de la base de données (`nextcloud-sqlbkp.bak`) et la copie locale des fichiers NextCloud (`nextcloud-dirbkp/`).
Ce script simplifie et automatise le processus de sauvegarde de NextCloud en mettant en place la mise en mode maintenance, la copie des fichiers locaux, la sauvegarde de la base de données, la désactivation du mode maintenance, et la création d'une archive compressée consolidant l'ensemble des éléments de sauvegarde.

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Mettre le serveur NextCloud en mode maintenance
docker compose exec -u www-data app php occ maintenance:mode --on
# Extraire les dossiers de sauvegarde
cd /root/nxc
# Copie locale de la sauvegarde
rsync -Aavx nextcloud/ nextcloud-dirbkp/
# Base de données MySQL/MariaDB
docker compose exec db mysqldump -u nextcloud -pAzerty1+ nextcloud > nextcloud-sqlbkp.bak
# Sortir du mode maintenance
docker compose exec -u www-data app php occ maintenance:mode --off
# création d'une archive
tar cvfz nxc.tgz nextcloud-sqlbkp.bak nextcloud-dirbkp

View File

@ -21,11 +21,11 @@
- name: Copie de dynamic.yml
copy:
src: dynamic.yml
src: dynamic.yml
dest: /root/nxc/config
- name: Copie de docker-compose.yml
copy:
copy:
src: docker-compose.yml
dest: /root/nxc
@ -69,8 +69,14 @@
args:
chdir: /root/nxc
- name: Creation reseau docker proxy
- name: vérification si le réseau proxy existe
command: docker network ls --filter name=proxy
register: net_proxy
- name: création du réseau proxy
command: docker network create proxy
# when: net_proxy.stdout.find('proxy') == -1
when: "'proxy' not in net_proxy.stdout"
#- name: Démarrage du docker-compose...
#command: /bin/bash docker-compose up -d

View File

@ -0,0 +1,14 @@
# Rôle Kea
***
Rôle du Kea pour la haute disponibilité dhcp
## Tables des matières
1. [Que fait le rôle Kea ?]
## Que fait le rôle Kea ?
Il permet de configurer les serveur kea en mode haute disponibilité.
### Installation et configuration de kea
Le rôle kea va installer les packets kea dhcp4, hook, admin une fois les packets installer. Nous allons configurer les 2 serveurs kea pour qu'il distribut les ip de n-user et soit en haute disponibilité.

View File

@ -0,0 +1,8 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -0,0 +1,66 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.64.20",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -0,0 +1,226 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea1.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -0,0 +1,18 @@
---
- name: restart isc-kea-dhcp4-server
service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: restart isc-kea-ctrl-agent
service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes
- name: restart mariadb-server
service:
name: mariadb-server
state: restarted
enabled: yes

View File

@ -0,0 +1,75 @@
---
- name: installation des dépendances
apt:
name:
- liblog4cplus-2.0.5
- libmariadb3
- libpq5
- mariadb-common
- mysql-common
state: present
- name: telechargemement du paquet isc-kea-common
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-common_2.4.1-isc20231123184533/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-dhcp4
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-dhcp4_2.4.1-isc20231123184533/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-ctrl-agent
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-ctrl-agent_2.4.1-isc20231123184533/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: telechargement du paquet isc-kea-hooks
get_url:
url: "https://dl.cloudsmith.io/public/isc/kea-2-4/deb/debian/pool/bookworm/main/i/is/isc-kea-hooks_2.4.1-isc20231123184533/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
dest: "/tmp"
- name: Update apt
apt:
update_cache: yes
- name: Installation paquet isc-kea-common
apt:
deb: "/tmp/isc-kea-common_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-dhcp4
apt:
deb: "/tmp/isc-kea-dhcp4_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-ctrl-agent
apt:
deb: "/tmp/isc-kea-ctrl-agent_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Installation isc-kea-hooks
apt:
deb: "/tmp/isc-kea-hooks_2.4.1-isc20231123184533_amd64.deb"
state: present
- name: Copie du repertoire des hooks dans le repertoire /usr/local/bin/kea/hooks
copy:
src: /usr/lib/x86_64-linux-gnu/kea/
dest: /usr/local/lib/kea/
- name: Copie du fichier de configuration kea-dhcp4.conf
copy:
src: kea-dhcp4.conf
dest: /etc/kea/kea-dhcp4.conf
notify:
- restart isc-kea-dhcp4-server
- name: Copie du fichier de configuration kea-ctrl-agent
copy:
src: kea-ctrl-agent.conf
dest: /etc/kea/kea-ctrl-agent.conf
notify:
- restart isc-kea-ctrl-agent

View File

@ -0,0 +1,14 @@
# Rôle Kea
***
Rôle du Kea pour la haute disponibilité dhcp
## Tables des matières
1. [Que fait le rôle Kea ?]
## Que fait le rôle Kea ?
Il permet de configurer les serveur kea en mode haute disponibilité.
### Installation et configuration de kea
Le rôle kea va installer les packets kea dhcp4, hook, admin une fois les packets installer. Nous allons configurer les 2 serveurs kea pour qu'il distribut les ip de n-user et soit en haute disponibilité.

View File

@ -0,0 +1,8 @@
#variable kea
kea_ver: "2.4.1"
kea_dbname: ""
kaa_dbuser: ""
kea_dbpasswd: ""
kea_dhcp4_dir: "/etc/kea/kea-dhcp4.conf"
kea_ctrl_dir: "/etc/kea/kea-ctrl-agent.conf"

View File

@ -0,0 +1,66 @@
// This is an example of a configuration for Control-Agent (CA) listening
// for incoming HTTP traffic. This is necessary for handling API commands,
// in particular lease update commands needed for HA setup.
{
"Control-agent":
{
// We need to specify where the agent should listen to incoming HTTP
// queries.
"http-host": "172.16.64.21",
// This specifies the port CA will listen on.
"http-port": 8000,
"control-sockets":
{
// This is how the Agent can communicate with the DHCPv4 server.
"dhcp4":
{
"comment": "socket to DHCPv4 server",
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Location of the DHCPv6 command channel socket.
# "dhcp6":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea6-ctrl-socket"
# },
// Location of the D2 command channel socket.
# "d2":
# {
# "socket-type": "unix",
# "socket-name": "/tmp/kea-ddns-ctrl-socket",
# "user-context": { "in-use": false }
# }
},
// Similar to other Kea components, CA also uses logging.
"loggers": [
{
"name": "kea-ctrl-agent",
"output_options": [
{
"output": "stdout",
// Several additional parameters are possible in addition
// to the typical output. Flush determines whether logger
// flushes output to a file. Maxsize determines maximum
// filesize before the file is rotated. maxver
// specifies the maximum number of rotated files being
// kept.
"flush": true,
"maxsize": 204800,
"maxver": 4,
// We use pattern to specify custom log message layout
"pattern": "%d{%y.%m.%d %H:%M:%S.%q} %-5p [%c/%i] %m\n"
}
],
"severity": "INFO",
"debuglevel": 0 // debug level only applies when severity is set to DEBUG.
}
]
}
}

View File

@ -0,0 +1,226 @@
// This is an example configuration of the Kea DHCPv4 server 1:
//
// - uses High Availability hook library and Lease Commands hook library
// to enable High Availability function for the DHCP server. This config
// file is for the primary (the active) server.
// - uses memfile, which stores lease data in a local CSV file
// - it assumes a single /24 addressing over a link that is directly reachable
// (no DHCP relays)
// - there is a handful of IP reservations
//
// It is expected to run with a standby (the passive) server, which has a very similar
// configuration. The only difference is that "this-server-name" must be set to "server2" on the
// other server. Also, the interface configuration depends on the network settings of the
// particular machine.
{
"Dhcp4": {
// Add names of your network interfaces to listen on.
"interfaces-config": {
// The DHCPv4 server listens on this interface. When changing this to
// the actual name of your interface, make sure to also update the
// interface parameter in the subnet definition below.
"interfaces": [ "enp0s9" ]
},
// Control socket is required for communication between the Control
// Agent and the DHCP server. High Availability requires Control Agent
// to be running because lease updates are sent over the RESTful
// API between the HA peers.
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
// Use Memfile lease database backend to store leases in a CSV file.
// Depending on how Kea was compiled, it may also support SQL databases
// (MySQL and/or PostgreSQL). Those database backends require more
// parameters, like name, host and possibly user and password.
// There are dedicated examples for each backend. See Section 7.2.2 "Lease
// Storage" for details.
"lease-database": {
// Memfile is the simplest and easiest backend to use. It's an in-memory
// database with data being written to a CSV file. It is very similar to
// what ISC DHCP does.
"type": "memfile"
},
// Let's configure some global parameters. The home network is not very dynamic
// and there's no shortage of addresses, so no need to recycle aggressively.
"valid-lifetime": 43200, // leases will be valid for 12h
"renew-timer": 21600, // clients should renew every 6h
"rebind-timer": 32400, // clients should start looking for other servers after 9h
// Kea will clean up its database of expired leases once per hour. However, it
// will keep the leases in expired state for 2 days. This greatly increases the
// chances for returning devices to get the same address again. To guarantee that,
// use host reservation.
// If both "flush-reclaimed-timer-wait-time" and "hold-reclaimed-time" are
// not 0, when the client sends a release message the lease is expired
// instead of being deleted from lease storage.
"expired-leases-processing": {
"reclaim-timer-wait-time": 3600,
"hold-reclaimed-time": 172800,
"max-reclaim-leases": 0,
"max-reclaim-time": 0
},
// HA requires two hook libraries to be loaded: libdhcp_lease_cmds.so and
// libdhcp_ha.so. The former handles incoming lease updates from the HA peers.
// The latter implements high availability feature for Kea. Note the library name
// should be the same, but the path is OS specific.
"hooks-libraries": [
// The lease_cmds library must be loaded because HA makes use of it to
// deliver lease updates to the server as well as synchronize the
// lease database after failure.
{
"library": "/usr/local/lib/kea/hooks/libdhcp_lease_cmds.so"
},
{
// The HA hook library should be loaded.
"library": "/usr/local/lib/kea/hooks/libdhcp_ha.so",
"parameters": {
// Each server should have the same HA configuration, except for the
// "this-server-name" parameter.
"high-availability": [ {
// This parameter points to this server instance. The respective
// HA peers must have this parameter set to their own names.
"this-server-name": "s-kea2.gsb.lan",
// The HA mode is set to hot-standby. In this mode, the active server handles
// all the traffic. The standby takes over if the primary becomes unavailable.
"mode": "hot-standby",
// Heartbeat is to be sent every 10 seconds if no other control
// commands are transmitted.
"heartbeat-delay": 10000,
// Maximum time for partner's response to a heartbeat, after which
// failure detection is started. This is specified in milliseconds.
// If we don't hear from the partner in 60 seconds, it's time to
// start worrying.
"max-response-delay": 30000,
// The following parameters control how the server detects the
// partner's failure. The ACK delay sets the threshold for the
// 'secs' field of the received discovers. This is specified in
// milliseconds.
"max-ack-delay": 5000,
// This specifies the number of clients which send messages to
// the partner but appear to not receive any response.
"max-unacked-clients": 0,
// This specifies the maximum timeout (in milliseconds) for the server
// to complete sync. If you have a large deployment (high tens or
// hundreds of thousands of clients), you may need to increase it
// further. The default value is 60000ms (60 seconds).
"sync-timeout": 60000,
"peers": [
// This is the configuration of this server instance.
{
"name": "s-kea1.gsb.lan",
// This specifies the URL of this server instance. The
// Control Agent must run along with this DHCPv4 server
// instance and the "http-host" and "http-port" must be
// set to the corresponding values.
"url": "http://172.16.64.20:8000/",
// This server is primary. The other one must be
// secondary.
"role": "primary"
},
// This is the configuration of the secondary server.
{
"name": "s-kea2.gsb.lan",
// Specifies the URL on which the partner's control
// channel can be reached. The Control Agent is required
// to run on the partner's machine with "http-host" and
// "http-port" values set to the corresponding values.
"url": "http://172.16.64.21:8000/",
// The other server is secondary. This one must be
// primary.
"role": "standby"
}
]
} ]
}
}
],
// This example contains a single subnet declaration.
"subnet4": [
{
// Subnet prefix.
"subnet": "172.16.64.0/24",
// There are no relays in this network, so we need to tell Kea that this subnet
// is reachable directly via the specified interface.
"interface": "enp0s9",
// Specify a dynamic address pool.
"pools": [
{
"pool": "172.16.64.100-172.16.64.150"
}
],
// These are options that are subnet specific. In most cases, you need to define at
// least routers option, as without this option your clients will not be able to reach
// their default gateway and will not have Internet connectivity. If you have many
// subnets and they share the same options (e.g. DNS servers typically is the same
// everywhere), you may define options at the global scope, so you don't repeat them
// for every network.
"option-data": [
{
// For each IPv4 subnet you typically need to specify at least one router.
"name": "routers",
"data": "172.16.64.254"
},
{
// Using cloudflare or Quad9 is a reasonable option. Change this
// to your own DNS servers is you have them. Another popular
// choice is 8.8.8.8, owned by Google. Using third party DNS
// service raises some privacy concerns.
"name": "domain-name-servers",
"data": "172.16.0.1"
}
],
// Some devices should get a static address. Since the .100 - .199 range is dynamic,
// let's use the lower address space for this. There are many ways how reservation
// can be defined, but using MAC address (hw-address) is by far the most popular one.
// You can use client-id, duid and even custom defined flex-id that may use whatever
// parts of the packet you want to use as identifiers. Also, there are many more things
// you can specify in addition to just an IP address: extra options, next-server, hostname,
// assign device to client classes etc. See the Kea ARM, Section 8.3 for details.
// The reservations are subnet specific.
#"reservations": [
# {
# "hw-address": "1a:1b:1c:1d:1e:1f",
# "ip-address": "192.168.1.10"
# },
# {
# "client-id": "01:11:22:33:44:55:66",
# "ip-address": "192.168.1.11"
# }
#]
}
],
// fichier de logs
"loggers": [
{
// This section affects kea-dhcp4, which is the base logger for DHCPv4 component. It tells
// DHCPv4 server to write all log messages (on severity INFO or higher) to a file. The file
// will be rotated once it grows to 2MB and up to 4 files will be kept. The debuglevel
// (range 0 to 99) is used only when logging on DEBUG level.
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout",
"maxsize": 2048000,
"maxver": 4
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}

View File

@ -0,0 +1,18 @@
---
- name: restart isc-kea-dhcp4-server
service:
name: isc-kea-dhcp4-server.service
state: restarted
enabled: yes
- name: restart isc-kea-ctrl-agent
service:
name: isc-kea-ctrl-agent.service
state: restarted
enabled: yes
- name: restart mariadb-server
service:
name: mariadb-server
state: restarted
enabled: yes

Some files were not shown because too many files have changed in this diff Show More