Compare commits

..

94 Commits

Author SHA1 Message Date
167060157c Mise à jour du playbook Nextcloud 2023-02-01 15:33:22 +01:00
30fd771045 modif script copy distant 2023-02-01 15:24:16 +01:00
ca2f1ca8a1 ajout r-vp2 2023-02-01 15:08:19 +01:00
f185789e08 chgt version 2023-02-01 14:58:01 +01:00
2a8ecb7f18 enleve apt upgrade 2023-02-01 14:36:59 +01:00
206291e753 typo 2023-02-01 14:28:40 +01:00
87c1d8eee6 syntax mkvm 2023-02-01 14:21:31 +01:00
5224ae00cd role ssh-cli 2023-02-01 13:54:13 +01:00
fa000f3116 modif mkvm pour s-adm et rol ssh-cli 2023-02-01 13:53:04 +01:00
ad2dadb0b6 commentaire mkvm.ps1 pour executer le script sous windows 2023-01-31 11:00:05 +01:00
abad0fcdbc mkvm.ps1 avec des function propre s-adm et s-infra OK 2023-01-31 10:56:10 +01:00
a7a9752aa0 normalement ça marche 2023-01-31 10:08:48 +01:00
4490b84c15 ajout script python3 2023-01-31 09:35:35 +01:00
46ad76af18 changement fichier fog 2023-01-31 09:25:26 +01:00
019096fb7e modif 2023-01-31 09:04:14 +01:00
993c34b934 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-31 08:54:45 +01:00
fe914d9894 help me 2023-01-31 08:53:25 +01:00
83bfd34e91 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-31 08:43:43 +01:00
00289e1bcb modification 2023-01-31 08:43:14 +01:00
8f3f364152 please work fine 2023-01-31 08:35:13 +01:00
e36f8af7e6 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-31 08:30:44 +01:00
495546fae7 il faut marcher maintenant 2023-01-31 08:29:37 +01:00
c29549a281 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-31 08:24:56 +01:00
761d3a00bc ajout copie distante 2023-01-31 08:23:21 +01:00
aadc552dc3 role lb-web 2023-01-31 08:21:05 +01:00
3cbae83a73 marche stp 2023-01-31 08:10:32 +01:00
e1323f22b8 nettoyage 2023-01-31 00:55:38 +01:00
f541cebcc2 reorg. passages plabooks dans old 2023-01-30 21:52:35 +01:00
ea4166590b mkvm.ps1 s-adm s-infra r-int r-ext rustique 2023-01-30 11:53:59 +01:00
b04ae4302b erreur yml dans role nlb-web 2023-01-30 11:51:44 +01:00
e328a1e13c Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-30 11:48:46 +01:00
ac65be862f ajout vp1 2023-01-30 11:48:22 +01:00
83f3d14c2a Merge branch 'main' of http://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-30 11:46:03 +01:00
9bda971ff6 marche stp 2023-01-30 11:44:10 +01:00
038e41dd40 marche stp 2023-01-30 11:43:48 +01:00
a4ef2de7e8 modife readme 2023-01-30 11:43:36 +01:00
0dbbaf0751 modif README.md 2023-01-30 11:08:22 +01:00
64f1b74ba7 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-30 10:57:28 +01:00
5ddbedac97 ajout et changementroles fw-vpn 2023-01-30 10:57:23 +01:00
05ddace1af lb-bd rev2 2023-01-30 10:44:18 +01:00
9019c0dbe7 modification de lb-bd 2023-01-30 10:36:52 +01:00
9fd18796a6 modif README.md 2023-01-27 09:49:23 +01:00
6e33ccce33 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-27 09:45:16 +01:00
f38fca4561 ajout et modif README.md 2023-01-27 09:45:11 +01:00
01c2b76936 ajout ferm.conf 2023-01-27 09:43:16 +01:00
a6a35324ba Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-27 09:42:06 +01:00
70950f9e4e ajout README.md 2023-01-27 09:41:55 +01:00
a7f366a124 nmap rvp2 2023-01-27 09:37:04 +01:00
143c3878a3 ajout fichier test nmap 2023-01-27 09:15:49 +01:00
76b4ceabe3 ajout ferm.conf 2023-01-27 09:08:36 +01:00
0988c9729e enieme modif nfs 2023-01-27 09:01:34 +01:00
9bcfcc6305 modif role nfs-server again 2023-01-27 08:42:51 +01:00
4cb8aa49b9 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-26 11:56:37 +01:00
340333d5d1 readme maj s-backup 2023-01-26 11:55:44 +01:00
82f6fdc9c9 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-26 11:55:43 +01:00
17618a8c8e modif nfs-server 2023-01-26 11:55:37 +01:00
3dac065600 crontab desactivé par défaut pour crontab à 5h tout les jours s-backup sur partage smb s-win 2023-01-26 11:43:00 +01:00
acb722461c Correction du script d'installation de GLPI 2023-01-26 11:37:15 +01:00
6200de2cda correc role nfs-server 2023-01-26 11:23:29 +01:00
0074367972 wp tentative 2 2023-01-25 17:35:28 +01:00
3aa4a58252 modification README.md 2023-01-25 17:08:49 +01:00
8fd183998e Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 17:05:42 +01:00
f4b736847e Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 17:02:52 +01:00
5c8efd5e62 modification README.md 2023-01-25 17:02:49 +01:00
ab2cc8da96 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 17:02:42 +01:00
44c8fc32a5 tentative de faire marcher wp 1 2023-01-25 17:02:39 +01:00
385563b4f2 Mise à jour du playbook pour l'installation de GLPI 2023-01-25 16:54:29 +01:00
fff62c5507 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 16:34:11 +01:00
6139095296 MAJ role lb-web 2023-01-25 16:33:56 +01:00
9b609e6418 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 16:26:08 +01:00
332c8a2167 mise a jour goss s-agence 2023-01-25 16:25:40 +01:00
a3c2d85952 erreur dans lb-web 2023-01-25 16:09:44 +01:00
f8e3eabb9d Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 15:59:38 +01:00
043a273589 nouveau role lb-web 2023-01-25 15:59:35 +01:00
5981b67dd9 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 15:33:10 +01:00
36336384e6 haproxy FINAL correc 2023-01-25 15:31:26 +01:00
0da9fc0d5a mise a jour goss r-vp2 2023-01-25 15:25:07 +01:00
62f9591c62 goss s-backup 2023-01-25 15:24:53 +01:00
c32cf92cf5 correction role lb-front 2023-01-25 15:17:18 +01:00
d0ba31e795 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 11:29:48 +01:00
69aa1ac739 update test goss 2023-01-25 11:29:45 +01:00
90222678ce correction haproxy 2023-01-25 11:26:54 +01:00
1fc84c8f19 goss s-mon correction 2023-01-25 11:21:09 +01:00
b17d0fbac1 correction ip s-elk en 99.11 dns-master et compagnie 2023-01-25 11:07:20 +01:00
edbce48966 correc2 2023-01-25 11:02:49 +01:00
56f3780480 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 10:45:47 +01:00
5eae26a67c correction roles lb 2023-01-25 10:45:36 +01:00
7711d023e8 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 10:43:19 +01:00
1777bec595 mise a jour 2023-01-25 10:43:14 +01:00
12621bb60a ajout readme 2023-01-25 10:28:22 +01:00
592843932c modif doc README 2023-01-25 00:23:46 +01:00
abfe277180 script s-backup backup.sh trap 2023-01-24 10:49:32 +01:00
c2eb2b85a4 correction script gsb partage 2023-01-24 10:13:40 +01:00
c20f44ec6e mkusr-backup windows 2023-01-24 09:34:23 +01:00
84 changed files with 962 additions and 615 deletions

View File

@ -1,35 +1,40 @@
# gsb2023
2023-01-18 ps
2023-01-30 ps
Environnement et playbooks ansible pour le projet GSB 2023
## Quickstart
prérequis :
Prérequis :
* une machine Debian Bullseye
* VirtualBox
* fichier machines viruelles ova :
* debian-bullseye-gsb-2023a.ova
* debian-buster-gsb-2023a.ova
* fichier machines viruelles **ova** :
* **debian-bullseye-gsb-2023a.ova**
* **debian-buster-gsb-2023a.ova**
## Les machines
* s-adm : routeur adm, DHCP + NAT, deploiement, proxy squid
* s-infra : DNS maitre
* r-int : routaage, DHCP
* r-ext : routage, NAT
* s-proxy : squid
* s-itil : serveur GLPI
* s-backup : DNS esclave + sauvegarde s-win
* s-mon : supervision avec **Nagios4** et syslog
* s-fog : deploiement postes de travail avec **FOG**
* s-win : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* s-nxc : NextCloud avec **docker**
* s-elk : pile ELK dockerisée
* s-lb : Load Balancer **HaProxy** pour application Wordpress
* r-vp1 : Routeur VPN Wireguard coté siège
* r-vp2 : Routeur VPN Wireguard coté agence, DHCP
* **s-adm** : routeur adm, DHCP + NAT, deploiement, proxy squid
* **s-infra** : DNS maitre, autoconfiguration navigateurs avec **wpad**
* **r-int** : routage, DHCP
* **r-ext** : routage, NAT
* **s-proxy** : squid
* **s-itil** : serveur GLPI
* **s-backup** : DNS esclave + sauvegarde s-win (SMB)
* **s-mon** : supervision avec **Nagios4**, notifications et syslog
* **s-fog** : deploiement postes de travail avec **FOG**
* **s-win** : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* **s-nxc** : NextCloud avec **docker**
* **s-elk** : pile ELK dockerisée
* **s-lb** : Load Balancer **HaProxy** pour application Wordpress (DMZ)
* **r-vp1** : Routeur VPN Wireguard coté siège
* **r-vp2** : Routeur VPN Wireguard coté agence, DHCP
* **s-agence** : Serveur agence
* **s-lb** : Load Balancer **HaProxy** pour application Wordpress
* **s-lb-web1** : Serveur Wordpress 1 Load Balancer
* **s-lb-web2** : Serveur Wordpress 2 Load Balancer
* **s-lb-db** : Serveur Mariadb pour Wordpress
* **s-nas** : Serveur NFS pour application Wordpress avec LB
## Les playbooks
@ -39,7 +44,7 @@ prérequis :
On utilisera l'image de machine virtuelle suivante :
* **debian-bullseye-2023a.ova** (2023-01-06)
* Debian Bullseye 11 - 2 cartes - 1 Go - stockage 20 Go
* Debian Bullseye 11.6 - 2 cartes - 1 Go - stockage 20 Go
### Machine s-adm

12
goss/list-goss Normal file
View File

@ -0,0 +1,12 @@
cd goss/
goss -g r-vp1.yaml v
goss -g r-vp1.yaml aa wireguard
goss add interface enp0s3
goss add interface enp0s8
goss add interface enp0s9
goss add interface wg0
goss aa wireguard
goss add package wireguard-tools
goss add service wg-quick@wg0
goss add command "ping -c4 10.0.0.2"
goss add file "/etc/wireguard/wg0.conf"

View File

@ -1,67 +1,56 @@
file:
/etc/wireguard/wg0.conf:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contains:
- AllowedIPs = 10.0.0.2/32, 172.16.128.0/24
package:
# ferm:
# installed: true
strongswan:
wireguard:
installed: true
port:
udp:68:
listening: true
versions:
- 1.0.20210223-1
wireguard-tools:
installed: true
versions:
- 1.0.20210223-1
service:
# dnsmasq:
# enabled: true
# running: true
strongswan:
enabled: true
running: true
ssh:
wg-quick@wg0:
enabled: true
running: true
command:
sysctl net.ipv4.ip_forward:
host 192.168.99.99:
exit-status: 0
stdout:
- net.ipv4.ip_forward = 1
- 99.99.168.192.in-addr.arpa domain name pointer s-adm.gsb.adm.
stderr: []
timeout: 10000
command:
ping -c 4 192.168.0.52:
ping -c4 10.0.0.2:
exit-status: 0
stdout:
- 4 received = 1
- 0% packet loss
stderr: []
timeout: 10000
command:
ping -c 4 192.168.1.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
command:
ping -c 4 192.168.200.254:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
command:
ping -c 4 172.16.0.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
#process:
# dnsmasq:
# running: true
# squid:
# running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.112/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.0.51/24
- 192.168.1.2/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.1.2/24
- 192.168.0.51/24
mtu: 1500
wg0:
exists: true
addrs:
- 10.0.0.1/32
mtu: 1420

52
goss/r-vp2.yaml Normal file
View File

@ -0,0 +1,52 @@
file:
/etc/wireguard/wg0.conf:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contains: []
package:
wireguard:
installed: true
versions:
- 1.0.20210223-1
wireguard-tools:
installed: true
versions:
- 1.0.20210223-1
service:
isc-dhcp-server:
enabled: true
running: true
wg-quick@wg0:
enabled: true
running: true
command:
ping -c4 10.0.0.1:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.102/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.128.254/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.0.52/24
mtu: 1500
wg0:
exists: true
addrs:
- 10.0.0.2/32
mtu: 1420

View File

@ -1,67 +0,0 @@
package:
ferm:
installed: true
ipsec:
installed: true
port:
tcp:53:
listening: true
udp:67:
listening: true
udp:68:
listening: true
service:
dnsmasq:
enabled: true
running: true
ferm:
enabled: true
running: true
ssh:
enabled: true
running: true
command:
sysctl net.ipv4.ip_forward:
exit-status: 0
stdout:
- net.ipv4.ip_forward = 1
stderr: []
timeout: 10000
sysctl ping -c 4 192.168.0.51:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
sysctl ping -c 4 192.168.1.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
sysctl ping -c 4 192.168.200.254:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
sysctl ping -c 4 172.16.0.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
process:
dnsmasq:
running: true
squid3:
running: true
interface:
enp0s8:
exists: true
addrs:
- 172.16.128.254/24
enp0s9:
exists: true
addrs:
- 192.168.0.52/24

View File

@ -1,39 +1,19 @@
command:
ip r:
ip route |grep default:
exit-status: 0
stdout:
- default via 172.16.128.254 dev enp0s8
- 172.16.128.0/24
- 192.168.99.0/24
stderr: []
timeout: 10000
ping -c 2 172.16.128.254:
ping -c4 172.16.0.1:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 2 192.168.1.2:
ping -c4 172.16.128.254:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 2 192.168.1.1:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 2 192.168.200.254:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
ping -c 2 172.16.0.1:
exit-status: 0
stdout:
- 0% packet loss
- 0% packet loss
stderr: []
timeout: 10000

41
goss/s-backup.yaml Normal file
View File

@ -0,0 +1,41 @@
package:
bind9:
installed: true
cifs-utils:
installed: true
rsync:
installed: true
smbclient:
installed: true
service:
bind9:
enabled: true
running: true
rsync:
enabled: true
running: false
command:
ping -c4 ns.gsb.lan:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
#check si partage windows accesible
smbclient -L //s-win --user=uBackup%Azerty1+ | grep 'public':
exit-status: 0
stdout:
- public
stderr: []
timeout: 10000
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.4/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.4/24
mtu: 1500

View File

@ -49,7 +49,7 @@ interface:
enp0s3:
exists: true
addrs:
- 192.168.99.104/24
- 192.168.99.8/24
enp0s8:
exists: true
addrs:

View File

@ -1,24 +1,32 @@
#!/bin/bash
## aa : 2023-04-18 15:25
## aa : 2023-01-18 15:25
## ps : 2023-02-01 15:25
set -o errexit
set -o pipefail
GITUSR=gitgsb
GITPRJ=gsb2023
apt update && apt upgrade
apt install -y apache2 git
apt-get update
apt-get install -y apache2 git
STOREREP="/var/www/html/gsbstore"
GLPIREL=10.0.5
GLPIREL=10.0.6
str="wget -nc https://github.com/glpi-project/glpi/releases/download/${GLPIREL}/glpi-${GLPIREL}.tgz"
FIREL=10.0.3+1.0
str2="https://github.com/fusioninventory/fusioninventory-for-glpi/releases/download/glpi${FIREL}/fusioninventory-${FIREL}.tar.bz2"
FIAGREL=2.6
str31="wget -nc https://github.com/fusioninventory/fusioninventory-agent/releases/download/${FIAGREL}/fusioninventory-agent_windows-x64_${FIAGREL}.exe"
#Fusion Inventory
#FIREL=10.0.3+1.0
#str2="https://github.com/fusioninventory/fusioninventory-for-glpi/releases/download/glpi${FIREL}/fusioninventory-${FIREL}.tar.bz2"
#GLPI Agent
GLPIAGVER=1.4
str31="wget -nc https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x64.msi"
str32="wget -nc https://github.com/glpi-project/glpi-agent/releases/download/${GLPIAGVER}/GLPI-Agent-${GLPIAGVER}-x86.msi"
str32="wget -nc https://github.com/fusioninventory/fusioninventory-agent/releases/download/${FIAGREL}/fusioninventory-agent_windows-x86_${FIAGREL}.exe"
FOGREL=1.5.9
str4="wget -nc https://github.com/FOGProject/fogproject/archive/${FOGREL}.tar.gz -O fogproject-${FOGREL}.tar.gz"

View File

@ -13,8 +13,9 @@
- goss
# - snmp-agent
# - firewall-vpn-r
- post
- wireguard-r
# - x509-r
- fw-ferm
- ssh-cli
- syslog-cli
- post

View File

@ -18,6 +18,7 @@
# - firewall-vpn-l
- wireguard-l
# - x509-l
- fw-ferm
- ssh-cli
- syslog-cli
- post

View File

@ -10,18 +10,25 @@
192.168.99.3 s-appli.gsb.adm
192.168.99.4 s-backup.gsb.adm
192.168.99.5 s-puppet.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.7 s-nxc.gsb.adm
192.168.99.8 s-mon.gsb.adm
192.168.99.9 s-itil.gsb.adm
192.168.99.10 s-sspec.gsb.adm
192.168.99.11 s-web-ext.gsb.adm
192.168.99.10 s-lb.gsb.adm
192.168.99.11 s-elk.gsb.adm
192.168.99.10 s-dns.gsb.adm
192.168.99.12 r-int.gsb.adm
192.168.99.13 r-ext.gsb.adm
192.168.99.14 s-nas.gsb.adm
192.168.99.15 s-san.gsb.adm
192.168.99.16 s-fog.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm
192.168.99.103 s-lb-web3.gsb.adm
192.168.99.112 r-vp1.gsb.adm
192.168.99.102 r-vp2.gsb.adm
192.168.99.8 syslog.gsb.adm

View File

@ -11,16 +11,22 @@
192.168.99.3 s-appli.gsb.adm
192.168.99.4 s-backup.gsb.adm
192.168.99.5 s-puppet.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.7 s-nxc.gsb.adm
192.168.99.8 s-mon.gsb.adm
192.168.99.9 s-itil.gsb.adm
192.168.99.10 s-sspec.gsb.adm
192.168.99.11 s-web-ext.gsb.adm
192.168.99.10 s-lb.gsb.adm
192.168.99.11 s-elk.gsb.adm
192.168.99.10 s-dns.gsb.adm
192.168.99.12 r-int.gsb.adm
192.168.99.13 r-ext.gsb.adm
192.168.99.14 s-nas.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm
192.168.99.103 s-lb-web3.gsb.adm
192.168.99.112 r-vp1.gsb.adm
192.168.99.102 r-vp2.gsb.adm
192.168.99.8 syslog.gsb.adm

View File

@ -109,12 +109,12 @@ log-facility local7;
#DHCP pour le réseau wifi
#subnet 172.16.65.0 netmask 255.255.255.0 {
# range 172.16.65.1 172.16.65.100;
# option domain-name-servers ns1.internal.example.org;
# option domain-name "internal.example.org";
# option routers 10.5.5.1;
# option broadcast-address 10.5.5.31;
# default-lease-time 600;
# max-lease-time 7200;
# option domain-name-servers ns1.internal.example.org;
# option domain-name "internal.example.org";
# option routers 10.5.5.1;
# option broadcast-address 10.5.5.31;
# default-lease-time 600;
# max-lease-time 7200;
#}
#DHCP pour le réseau USER

View File

@ -10,5 +10,3 @@
copy: src=dhcpd.conf dest=/etc/dhcp/
notify:
- restart isc-dhcp-server

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2022041200 ; Serial
2023012500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -25,7 +25,7 @@ s-nxc IN A 172.16.0.7
s-docker IN A 172.16.0.7
s-mon IN A 172.16.0.8
s-itil IN A 172.16.0.9
s-elk IN A 172.16.0.10
s-elk IN A 172.16.0.11
s-gestsup IN A 172.16.0.17
r-int IN A 172.16.0.254
r-int-lnk IN A 192.168.200.254

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2022041200 ; Serial
2023012500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -20,12 +20,12 @@ $TTL 604800
6.0 IN PTR s-win.gsb.lan.
7.0 IN PTR s-nxc.gsb.lan.
8.0 IN PTR s-mon.gsb.lan.
9.0 IN PTR s-itil.gsb.lan.
9.0 IN PTR s-itil.gsb.lan.
101.1 IN PTR s-web1
101.2 IN PTR s-web2
100.10 IN PTR s-lb
100.10 IN PTR s-lb.gsb.lan
10.0 IN PTR s-elk.gsb.lan.
11.0 IN PTR s-elk.gsb.lan.
17.0 IN PTR s-gestsup.lan
254.0 IN PTR r-int.gsb.lan.

View File

@ -1,17 +1,15 @@
---
- name: creation d'un repertoire fog
file:
path: /root/tools/fog
state: directory
- name: recuperation de l'archive d'installation fog sur git
git:
repo: https://gitea.lyc-lecastel.fr/gadmin/fog.git
dest: /root/tools/fog/
clone: yes
update: yes
force: yes
#- name: Instructions
# tags: msg
# debug: msg='{{instructions}}'
- name: Modification fichier bash (desac UDPCast)
ansible.builtin.lineinfile:
path: /root/tools/fog/lib/common/functions.sh
regexp: '^configureUDPCast\(\).*'
line: "configureUDPCast() {\nreturn"
backup: yes

23
roles/fw-ferm/README.md Normal file
View File

@ -0,0 +1,23 @@
[Ferm]:http://ferm.foo-projects.org/
Modifier l'execution d'iptables [plus d'info ici]:https://wiki.debian.org/iptables
```shell
update-alternatives --set iptables /usr/sbin/iptables-legacy
```
Pour tester utiliser [Nmap]:https://nmap.org/man/fr/man-briefoptions.html
### r-vp1
```shell
sudo nmap -p51820 192.168.0.51
```
### r-vp2
```shell
sudo nmap -p51820 192.168.0.52
```
### Sortie :
```
`PORT STATE SERVICE
51820/tcp filtered unknown`
```
Faire des ping!

View File

@ -0,0 +1,63 @@
# -*- shell-script -*-
#
# Ferm script r-vp1
@def $DEV_PRIVATE = enp0s8;
@def $DEV_WORLD = enp0s9;
@def $NET_PRIVATE = 172.16.0.0/24;
table filter {
chain (INPUT OUTPUT){
# allow VPN
proto udp dport 51820 ACCEPT;
}
chain INPUT {
policy DROP;
# connection tracking
mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT;
# allow local connections
interface lo ACCEPT;
# respond to ping
proto icmp icmp-type echo-request ACCEPT;
# allow SSH connections from the private network and from some
# well-known internet hosts
saddr ($NET_PRIVATE 81.209.165.42) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;
}
# interface réseau
interface $DEV_WORLD {
}
# the rest is dropped by the above policy
}#FIN INPUT
# outgoing connections are not limited
chain OUTPUT policy ACCEPT;
chain FORWARD {
policy ACCEPT;
# connection tracking
mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT;
# connections from the internal net to the internet or to other
# internal nets are allowed
interface $DEV_PRIVATE ACCEPT;
# the rest is dropped by the above policy
}
}

View File

@ -0,0 +1,62 @@
# -*- shell-script -*-
#
# Ferm script r-vp2
@def $DEV_PRIVATE = enp0s9;
@def $DEV_WORLD = enp0s8;
@def $NET_PRIVATE = 172.16.0.0/24;
table filter {
chain (INPUT OUTPUT){
# allow VPN
proto udp dport 51820 ACCEPT;
}
chain INPUT {
policy DROP;
# connection tracking
mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT;
# allow local connections
interface lo ACCEPT;
# respond to ping
proto icmp icmp-type echo-request ACCEPT;
# allow SSH connections from the private network and from some
# well-known internet hosts
saddr ($NET_PRIVATE 81.209.165.42) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE {
proto (udp tcp) dport domain ACCEPT;
proto udp dport bootps ACCEPT;
}
# interface réseau
interface $DEV_WORLD {
}
# the rest is dropped by the above policy
}#FIN INPUT
# outgoing connections are not limited
chain OUTPUT policy ACCEPT;
chain FORWARD {
policy ACCEPT;
# connection tracking
mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT;
# connections from the internal net to the internet or to other
# internal nets are allowed
interface $DEV_PRIVATE ACCEPT;
# the rest is dropped by the above policy
}
}

View File

@ -0,0 +1,15 @@
---
- name: installation de ferm
apt:
name: ferm
state: present
- name: copie du ferm.conf
copy:
src: ferm.conf.{{ ansible_hostname }}
dest: /etc/ferm/ferm.conf
- name: redemarage service ferm
ansible.builtin.service:
name: ferm.service
state: restarted

View File

@ -14,22 +14,6 @@ mot de passe : glpi
Selectionner la base glpi
Ne pas envoyer de statistique d'usage
## Fusion Inventory :
Installer le plugin dans Configuration > Plugins
Activer le plugin
Pour que la remonter de l'agent se fasse, il faut ajouter une crontab (crontab -e) sur s-itil : * * * * * /usr/bin/php7.4 /var/www/glpi/front/cron.php &>/dev/null
Puis éxécuter le tasksheduler dans Configuration > Actions automatiques > taskscheduler
Pour l'agent Windows, récuperer l'agent sur http://s-itil/ficlients
Il faut faire une installation à parti de 0
Selectionner comme type d'installation complète
Dans le mode serveur mettre l'url : http://s-itil/plugins/fusioninventory et cocher la case installation rapide
Pour l'agent Debian il faut installer le paquet fusioninventory-agent
Ajouter la ligne server = http://s-itil/plugins/fusioninventory dans le fichier /etc/fusioninventory/agent.cfg
Redemarrer le service fusioninventory-agent puis faite un reload
Exécuter la commande pkill -USR1 -f -P 1 fusioninventory-agent
## Postfix :

View File

@ -0,0 +1,6 @@
depl_url: "http://s-adm.gsb.adm/gsbstore"
#depl_glpi: "glpi-9.5.6.tgz"
depl_glpi: "glpi-10.0.6.tgz"
#depl_fusioninventory: "fusioninventory-9.5+3.0.tar.bz2"
depl_glpi_agentx64: "GLPI-Agent-1.4-x64.msi"
depl_glpi_agentx86: "GLPI-Agent-1.4-x86.msi"

View File

@ -105,12 +105,12 @@
# - name: copy .my.cnf file with root password credentials
# copy: src=.my.cnf dest=/root/tools/ansible/.my.cnf owner=root mode=0600
- name: Installation de Fusioninventory pour Linux
unarchive:
src: "{{ depl_url }}/{{ depl_fusioninventory }}"
#src: http://depl/gsbstore/fusioninventory-{{ fd_version }}.tar.bz2
dest: /var/www/html/glpi/plugins
remote_src: yes
# - name: Installation de Fusioninventory pour Linux
# unarchive:
# src: "{{ depl_url }}/{{ depl_fusioninventory }}"
#src: http://depl/gsbstore/fusioninventory-{{ fd_version }}.tar.bz2
# dest: /var/www/html/glpi/plugins
# remote_src: yes
- name: Creation de ficlient
file:
@ -127,23 +127,15 @@
group: www-data
mode: 0775
- name: Installation de FusionInventory windows x64
- name: Installation de GLPI Agent windows x64
get_url:
url: "{{ depl_url }}/{{ depl_fusioninventory_agentx64 }}"
url: "{{ depl_url }}/{{ depl_glpi_agentx64 }}"
dest: "/var/www/html/ficlients"
- name: Installation de FusionInventory windows x86
get_url:
url: "{{ depl_url }}/{{ depl_fusioninventory_agentx86 }}"
dest: "/var/www/html/ficlients"
- name: Attribution des permissions sur repertoire /plugins/fusioninventory
file:
path: /var/www/html/glpi/plugins/fusioninventory
owner: www-data
group: www-data
recurse: yes
state: directory
# - name: Installation de GLPI Agent windows x86
# get_url:
# url: "{{ depl_url }}/{{ depl_glpi_agentx86 }}"
# dest: "/var/www/html/ficlients"
- name: Copie du script dbdump
copy:

View File

@ -1,6 +0,0 @@
depl_url: "http://s-adm.gsb.adm/gsbstore"
#depl_glpi: "glpi-9.5.6.tgz"
depl_glpi: "glpi-10.0.5.tgz"
depl_fusioninventory: "fusioninventory-9.5+3.0.tar.bz2"
depl_fusioninventory_agentx64: "fusioninventory-agent_windows-x64_2.6.exe"
depl_fusioninventory_agentx86: "fusioninventory-agent_windows-x86_2.6.exe"

View File

@ -44,7 +44,6 @@ backend fermeweb
#option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
#server s-lb-web3 192.168.101.3:80 check
listen stats

View File

@ -8,18 +8,18 @@
path: /etc/haproxy/haproxy.cfg
block: |
frontend proxypublic
bind 192.168.56.2:80
bind 192.168.100.10:80
default_backend fermeweb
backend fermeweb
balance roundrobin
option httpclose
#option httpchk HEAD / HTTP/1.0
server web1.test 192.168.56.3:80 check
#server web2.test 192.168.56.4:80 check
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
- name: redemarre haproxy
service:
name: haproxy
state: restarted
# state: restarted
enabled: yes

View File

@ -10,4 +10,4 @@
dest: /etc/fstab
regexp: ''
insertafter: EOF
line: '192.168.102.253:/home/wordpress /var/www/html/wordpress nfs soft,timeo=5,intr,rsize=8192,wsize=8192,wsize=8192 0 0'
line: '192.168.102.253:/home/ /var/www/html/wordpress nfs soft,timeo=5,intr,rsize=8192,wsize=8192,wsize=8192 0 0'

View File

@ -5,6 +5,6 @@ Ce rôle :
* installe **nfs-server**
* copie le fichier de configuration **exports** pour exporter le répertoire **/home/wordpress**
* relance le service **nfs-server**
* décompresse wordpress
### Objectif
Le répertoire **/home/wordpress** est exporté par **nfs** sur le réseau **n-dmz-db**

View File

@ -7,4 +7,4 @@
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
/home/wordpress 192.168.102.0/255.255.255.0 (rw,no_root_squash,subtree_check)
/home/wordpress 192.168.102.0/255.255.255.0(rw,no_root_squash,subtree_check)

View File

@ -1,18 +1,70 @@
---
- name: installation des paquets
apt:
name:
- nfs-kernel-server
state: latest
- name: 00 - cree repertoire wordpress pour export nfs
file:
path: /home/wordpress
state: directory
- name: copie exports pour partage nfs wordpress
copy:
src: exports
dest: /etc
- name: 05 - Install nfs-server
apt:
name: nfs-server
state: present
- name: redemarrage du service rpcbind requis pour le service nfs
shell: service rpcbind restart
- name: 10 - creation fichier exports nfs
ansible.builtin.blockinfile:
path: /etc/exports
block: |
/home/wordpress 192.168.102.0/255.255.255.0(rw,no_root_squash,subtree_check)
- name: redemarrage du service nfs-kernel-server
shell: service nfs-kernel-server restart
- name: 20 - decompresse wordpress
unarchive:
src: https://fr.wordpress.org/latest-fr_FR.tar.gz
dest: /home/
remote_src: yes
- name: 22 - change owner et group pour repertoire wordpress
file:
path: /home/wordpress
state: directory
recurse: yes
owner: www-data
group: www-data
- name: 30 - genere fichier de config wordpress
copy:
src: /home/wordpress/wp-config-sample.php
dest: /home/wordpress/wp-config.php
remote_src: yes
- name: 35 - ajuste variable dbname dans fichier de config wp-config.php
replace:
path: /home/wordpress/wp-config.php
regexp: "votre_nom_de_bdd"
replace: "wordpressdb"
backup: yes
- name: 40 ajuste variable dbusername dans fichier de config wp-config.php
replace:
path: /home/wordpress/wp-config.php
regexp: "votre_utilisateur_de_bdd"
replace: "wordpressuser"
backup: yes
- name: 45 - ajuste variable mdp dans fichier de config wp-config.php
replace:
path: /home/wordpress/wp-config.php
regexp: "votre_mdp_de_bdd"
replace: "wordpresspasswd"
backup: yes
- name: 50 - ajuste hostname fichier wp-config.php
replace:
path: /home/wordpress/wp-config.php
regexp: "localhost"
replace: "192.168.102.254"
backup: yes
- name: 55 - relance nfs
service:
name: nfs-server
state: restarted
enabled: yes

View File

@ -1,102 +0,0 @@
<?php
/**
* La configuration de base de votre installation WordPress.
*
* Ce fichier est utilisé par le script de création de wp-config.php pendant
* le processus dinstallation. Vous navez pas à utiliser le site web, vous
* pouvez simplement renommer ce fichier en « wp-config.php » et remplir les
* valeurs.
*
* Ce fichier contient les réglages de configuration suivants :
*
* Réglages MySQL
* Préfixe de table
* Clés secrètes
* Langue utilisée
* ABSPATH
*
* @link https://fr.wordpress.org/support/article/editing-wp-config-php/.
*
* @package WordPress
*/
// ** Réglages MySQL - Votre hébergeur doit vous fournir ces informations. ** //
/** Nom de la base de données de WordPress. */
define( 'DB_NAME', 'wordpress' );
/** Utilisateur de la base de données MySQL. */
define( 'DB_USER', 'wp' );
/** Mot de passe de la base de données MySQL. */
define( 'DB_PASSWORD', 'wp' );
/** Adresse de lhébergement MySQL. */
define( 'DB_HOST', '192.168.102.254' );
/** Jeu de caractères à utiliser par la base de données lors de la création des tables. */
define( 'DB_CHARSET', 'utf8' );
/**
* Type de collation de la base de données.
* Ny touchez que si vous savez ce que vous faites.
*/
define( 'DB_COLLATE', '' );
/**#@+
* Clés uniques dauthentification et salage.
*
* Remplacez les valeurs par défaut par des phrases uniques !
* Vous pouvez générer des phrases aléatoires en utilisant
* {@link https://api.wordpress.org/secret-key/1.1/salt/ le service de clés secrètes de WordPress.org}.
* Vous pouvez modifier ces phrases à nimporte quel moment, afin dinvalider tous les cookies existants.
* Cela forcera également tous les utilisateurs à se reconnecter.
*
* @since 2.6.0
*/
define( 'AUTH_KEY', 'mettez une phrase unique ici' );
define( 'SECURE_AUTH_KEY', 'mettez une phrase unique ici' );
define( 'LOGGED_IN_KEY', 'mettez une phrase unique ici' );
define( 'NONCE_KEY', 'mettez une phrase unique ici' );
define( 'AUTH_SALT', 'mettez une phrase unique ici' );
define( 'SECURE_AUTH_SALT', 'mettez une phrase unique ici' );
define( 'LOGGED_IN_SALT', 'mettez une phrase unique ici' );
define( 'NONCE_SALT', 'mettez une phrase unique ici' );
/**#@-*/
/**
* Préfixe de base de données pour les tables de WordPress.
*
* Vous pouvez installer plusieurs WordPress sur une seule base de données
* si vous leur donnez chacune un préfixe unique.
* Nutilisez que des chiffres, des lettres non-accentuées, et des caractères soulignés !
*/
$table_prefix = 'wp_';
/**
* Pour les développeurs : le mode déboguage de WordPress.
*
* En passant la valeur suivante à "true", vous activez laffichage des
* notifications derreurs pendant vos essais.
* Il est fortement recommandé que les développeurs dextensions et
* de thèmes se servent de WP_DEBUG dans leur environnement de
* développement.
*
* Pour plus dinformation sur les autres constantes qui peuvent être utilisées
* pour le déboguage, rendez-vous sur le Codex.
*
* @link https://fr.wordpress.org/support/article/debugging-in-wordpress/
*/
define( 'WP_DEBUG', false );
/* Cest tout, ne touchez pas à ce qui suit ! Bonne publication. */
/** Chemin absolu vers le dossier de WordPress. */
if ( ! defined( 'ABSPATH' ) )
define( 'ABSPATH', dirname( __FILE__ ) . '/' );
/** Réglage des variables de WordPress et de ses fichiers inclus. */
require_once( ABSPATH . 'wp-settings.php' );
define('DB_NAME', 'wordpress');
define('DB_HOST', '192.168.102.254');
define('DB_USER', 'wp');
define('DB_PASSWORD', 'wp');

View File

@ -1,38 +1,28 @@
---
- name: creation repertoir
file:
path: /home/
state: directory
- name: download and extract wordpress
unarchive:
src: "{{ depl_url }}/{{ depl_wordpress }}"
dest: /home/
remote_src: yes
owner: www-data
group: www-data
---
- name: installation des paquets web
apt:
name:
- apache2
- php
- php-mbstring
- php-mysql
- mariadb-client
state: present
- name: Copy sample config file
command: mv /home/wordpress/wp-config-sample.php /home/wordpress/wp-config.php creates=/home/wordpress/wp-config.php
- name: install nfs-common
apt:
name: nfs-common
state: present
- name: Changement du fichier de conf
copy:
src: wp-config.php
dest: /home/wordpress/wp-config.php
- name: montage nfs pour word press
blockinfile:
path: /etc/fstab
block: |
192.168.102.253:/home/wordpress /var/www/html nfs soft,timeo=5,intr,rsize=8192,wsize=8192,wsize=8192 0 0
- name: Attributions des permissions
file:
path: /home/wordpress
recurse: yes
owner: 33
group: 33
# - name: Fix permissions
# shell: chown -R www-data /var/www/wordpress/*
#
# - name: Update default Apache site
# lineinfile:
# dest=/etc/apache2/sites-enabled/000-default.conf
# regexp="(.)+DocumentRoot /var/www/html"
# line="DocumentRoot /var/www/wordpress"
# notify:
# - restart apache2
#- name: monte export wordpress
# ansible.posix.mount:
# path: /var/www/html
# state: mounted
# fstype: nfs
# src: 192.168.102.253:/exports/wordpress

View File

@ -0,0 +1,80 @@
version: '3'
volumes:
nextcloud:
db:
networks:
proxy:
external: true
nxc:
external: false
services:
reverse-proxy:
# The official v2 Traefik docker image
image: traefik:latest
container_name: traefik
# Enables the web UI and tells Traefik to listen to docker
command: --api.insecure=true --providers.docker
ports:
# The HTTP port
- "80:80"
- "443:443"
# The Web UI (enabled by --api.insecure=true)
- "8080:8080"
volumes:
# So that Traefik can listen to the Docker events
- /var/run/docker.sock:/var/run/docker.sock:ro
# Map the static configuration into the container
- ./config/static.yml:/etc/traefik/traefik.yml:ro
# Map the dynamic configuration into the container
- ./config/dynamic.yml:/etc/traefik/dynamic.yml:ro
# Map the certificats into the container
- ./certs:/etc/certs:ro
networks:
- proxy
db:
image: mariadb:10.5
container_name: db
restart: always
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
volumes:
- db:/var/lib/mysql
networks:
- nxc
environment:
- MYSQL_ROOT_PASSWORD=Azerty1+
- MYSQL_PASSWORD=Azerty1+
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
app:
image: nextcloud
container_name: app
restart: always
ports:
- 8081:80
#links:
depends_on:
- db
volumes:
- ./nextcloud:/var/www/html
networks:
- proxy
- nxc
labels:
# - "traefik.enable=true"
- "traefik.http.routers.app.rule=Host(`s-nxc.gsb.lan`)"
- "traefik.http.routers.app.tls=true"
- "traefik.enable=true"
- "traefik.docker.network=proxy"
# - "traefik.http.routers.app.entrypoints=websecure"
# - "traefik.http.routers.app.rule=Host(`mon.nxc`)"
- "traefik.http.routers.app.service=app-service"
- "traefik.http.services.app-service.loadbalancer.server.port=80"
environment:
- MYSQL_PASSWORD=Azerty1+
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_HOST=db

View File

@ -1,58 +0,0 @@
version: '2'
volumes:
# nextcloud:
db:
services:
db:
image: mariadb
container_name: db
restart: always
#command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
command: --innodb-read-only-compressed=OFF
volumes:
- db:/var/lib/mysql
networks:
- nxc-db
environment:
- MYSQL_ROOT_PASSWORD=blabla
- MYSQL_PASSWORD=blabla
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
nxc:
image: nextcloud
restart: always
container_name: nxc
# ports:
# - 8080:80
# links:
depends_on:
- db
volumes:
- ./nextcloud:/var/www/html
environment:
- MYSQL_PASSWORD=blabla
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_HOST=db
labels:
# Enable this container to be mapped by traefik
# For more information, see: https://docs.traefik.io/providers/docker/#exposedbydefault
- "traefik.enable=true"
# URL to reach this container
- "traefik.http.routers.nxc.rule=Host(`s-nxc.gsb.lan`)"
# Activation of TLS
- "traefik.http.routers.nxc.tls=true"
# If port is different than 80, use the following service:
#- "traefik.http.services.<service_name>.loadbalancer.server.port=<port>"
# - "traefik.http.services.app.loadbalancer.server.port=8080"
networks:
- proxy
- nxc-db
networks:
proxy:
external: true
nxc-db:
external: false

View File

@ -1,6 +1,4 @@
#!/bin/bash
docker-compose -f nextcloud.yml down
docker-compose -f traefik.yml down
docker compose down -v
sleep 1
docker-compose -f traefik.yml up -d --remove-orphans
docker-compose -f nextcloud.yml up -d
docker compose up -d

View File

@ -1,4 +1,6 @@
#!/bin/bash
docker volume prune -f
docker container prune -f
docker image prune -f
docker compose down -v
#docker volume prune -f
#docker container prune -f
#docker image prune -f

View File

@ -1,3 +1,2 @@
#!/bin/bash
docker-compose -f traefik.yml up -d
docker-compose -f nextcloud.yml up -d
docker compose up -d

View File

@ -1,3 +1,2 @@
#!/bin/bash
docker-compose -f nextcloud.yml down
docker-compose -f traefik.yml down
docker compose down

View File

@ -1,28 +0,0 @@
version: '3'
services:
reverse-proxy:
#image: traefik:v2.5
image: traefik
container_name: traefik
restart: always
security_opt:
- no-new-privileges:true
ports:
# Web
- 80:80
- 443:443
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Map the static configuration into the container
- ./config/static.yml:/etc/traefik/traefik.yml:ro
# Map the dynamic configuration into the container
- ./config/dynamic.yml:/etc/traefik/dynamic.yml:ro
# Map the certificats into the container
- ./certs:/etc/certs:ro
networks:
- proxy
networks:
proxy:
external: true

View File

@ -24,14 +24,9 @@
src: dynamic.yml
dest: /root/nxc/config
- name: Copie de nextcloud.yml
- name: Copie de docker-compose.yml
copy:
src: nextcloud.yml
dest: /root/nxc
- name: Copie de traefik.yml
copy:
src: traefik.yml
src: docker-compose.yml
dest: /root/nxc
- name: Copie de nxc-stop.sh
@ -76,3 +71,8 @@
- name: Creation reseau docker proxy
command: docker network create proxy
- name: Démarrage du docker-compose...
command: /bin/bash docker-compose up -d
args:
chdir: /root/nxc

View File

@ -8,13 +8,13 @@ iface lo inet loopback
# cote N-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.10
address 192.168.99.11
netmask 255.255.255.0
gateway 192.168.99.99
# cote N-infra
allow-hotplug enp0s8
iface enp0s8 inet static
address 172.16.0.10
address 172.16.0.11
netmask 255.255.255.0
post-up route add -net 172.16.64.0/24 gw 172.16.0.254

View File

@ -6,6 +6,13 @@ s-backup permet de récupérer les dossiers et fichiers présent dans le répert
# Lancement du script backup.sh
Après la fin de l'installation de s-backup et la fin de la configuration de s-win vous pouvez
lancer le fichier backup.sh pour récuperer l'intégraliter du fichier partagé gsb.lan de s-win
lancer le fichier backupsmb.sh pour récuperer l'intégraliter du fichier partagé gsb.lan de s-win
sur la machine s-backup.
# Crontab
Une crontab a été ajoutée mais désactivée par défaut ( backupsmb.sh executée tout les jours à 5h00)
# Pistes
- Traps à consolider

View File

@ -1,27 +0,0 @@
#!/bin/bash
BDIR=/home/backup
SWIN=/tmp/s-win
[ -d "${BDIR}" ] || mkdir "${BDIR}"
[ -d "${BDIR}" ] || mkdir "${BDIR}/s-win"
[ -d "${SWIN}" ] || mkdir "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=u-backup,password=Azerty1+ //s-win/commun "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage ${SWIN}"
exit 1
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/commun"
umount "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=u-backup,password=Azerty1+ //s-win/public "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage"
exit 2
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/public"
umount "${SWIN}"
exit 0

View File

@ -0,0 +1,51 @@
#!/bin/bash
BDIR=/home/backup
SWIN=/tmp/s-win
LOCK=/tmp/s-backup.lock
#Fonction cleanup pour sortir propre dans tout les cas
cleanup()
{
rm "${LOCK}"
umount "${SWIN}"
echo "nettoyage effectue, sortie tout propre ..."
exit 3
}
#check si pas deja en cours d execution > sortie si fichier de lock existe
if [ -e "${LOCK}" ] ; then
echo "$0 : Verrouillage, deja en cours d execution"
trap cleanup 1 2 3 6
fi
#prepartion des dossiers qui vont accueillir les donnees à sauvegarder
[ -d "${BDIR}" ] || mkdir "${BDIR}"
[ -d "${BDIR}/s-win" ] || mkdir "${BDIR}/s-win"
[ -d "${SWIN}" ] || mkdir "${SWIN}"
#etablissement du lock
touch "${LOCK}"
mount -t cifs -o ro,vers=3.0,username=uBackup,password=Azerty1+ //s-win/commun "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage ${SWIN}"
rm "${LOCK}"
trap cleanup 1 2 3 6
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/commun"
umount "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=uBackup,password=Azerty1+ //s-win/public "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage ${SWIN}"
trap cleanup 1 2 3 6
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/public"
umount "${SWIN}"
#libere le verrou
rm "${LOCK}"
exit 0

View File

@ -2,7 +2,22 @@
apt:
name:
- rsync
- smbclient
- smbclient
- cifs-utils
state: present
- name: copie script backupsmb dans /usr/local/bin
copy:
src: backupsmb.sh
dest: /usr/local/bin
owner: root
group: root
mode: '0755'
- name: crontab backupsmb ( commentee par defaut )
cron:
name: backupsmb
disabled: true
minute: "0"
hour: "5"
job: "/usr/local/bin/backupsmb.sh"

View File

@ -1,10 +1,14 @@
---
- name: Creation de .ssh
file: path=/root/.ssh mode=0700 state=directory
file:
path: /root/.ssh
mode: 0700
state: directory
- name: Copie cle public s-adm
shell: curl 192.168.99.99/id_rsa.pub > ~/.ssh/authorized_keys
- name: Copie cle publiique depuis s-adm
ansible.posix.authorized_key:
user: root
state: present
key: http://s-adm.gsb.adm/id_rsa.pub
#- name: Copie cle public s-spec
# shell: curl 192.168.99.10/id_rsa.pub >> ~/.ssh/authorized_keys

View File

@ -0,0 +1,5 @@
#ajout du sleep 5
éditer "/etc/init.d/isc-dhcp-server"
aller au "case \"$1\" in" et rajouter "sleep 5" avant le "if"

View File

@ -4,18 +4,17 @@
name: wireguard
state: present
- name: installation de ferm
apt:
name: ferm
state: present
- name: installation de wireguard-tools
apt:
name: wireguard-tools
state: present
#- name: installation de sshpass
# apt:
# name: sshpass
# state: present
#- name: copie du fichier de configuration depuis r-vp1
# command: "sshpass -p 'root' scp -r root@192.168.99.112:/root/confwg/wg0-b.conf /etc/wireguard/"
#- name: renommage du fichier de configuration
# command: "mv /etc/wireguard/wg0-b.conf /etc/wireguard/wg0.conf"

View File

@ -1,14 +1,13 @@
#Installation de r-vp1 (Wireguard)
Procédure d'installation de r-vp1 et de copie du fichier wg0-b.conf.
***
***
Ce fichier à pour but de présenter l'installation de r-vp1
***
Depuis r-vp1 se deplacer dans le repertoire **/tools/ansible/gsb2023** pour executer le playbook:
**"ansible-playbook -i localhost, -c local r-vp1.yml"** puis reboot r-vp1.
Attendre la fin de l'installation. Ensuite faire une copie distante du fichier
wg0-b.conf sur r-vp2 **"scp /confwg/wg0-b.conf root@'ip r-vp2':/etc/wireguard/"**.
Se rendre dans le dossier gsb2022 et éxécuter la commande suivante :
_"ansible-playbook -i localhost, -c local r-vp1.yml"_
Attendre la fin de l'installation, puis se rendre dans le dossier confwg
Faites une copie à distance du fichier wg0-b.conf sur r-vp2 et déplacer le fichier wg0-a.conf localement dans /etc/wireguard
Renommer les deux fichiers en wg0.conf
Executer _"systemctl enable wg-quick@wg0"_ puis _"systemctl start wg-quick@wg0"_ sur r-vp1 et r-vp2
Entrer la commande _"wg"_ si des paquets sont envoyés et reçus votre VPN fonctionne.
Lorsque votre infrastructure est prête rendez vous dans gsb2022 et éxécuter le **fichier ping-sagence** afin vérifier le bon fonctionnement.
Renommer les fichiers en **wg0.conf**
Executer **"systemctl enable wg-quick@wg0"** puis **"systemctl start wg-quick@wg0"** sur r-vp1 et r-vp2.
Entrer la commande **"wg"** pour voir si l'interface wg0 est correctement montée.

View File

@ -4,6 +4,11 @@
name: wireguard
state: present
- name: installation de ferm
apt:
name: ferm
state: present
- name: installation de wireguard-tools
apt:
name: wireguard-tools
@ -27,12 +32,11 @@
- name: copie du fichier de configuration
copy:
src: /root/confwg/wg0-a.conf
dest: /etc/wireguard
dest: /etc/wireguard/wg0.conf
- name: renommage fichier de configuration
command: "mv /etc/wireguard/wg0-a.conf /etc/wireguard/wg0.conf"
- name: Restart service httpd, in all cases
ansible.builtin.service:
name: wg-quick@wg0
enabled: yes
state: restarted
- name: demarrage du service wireguard
tags: aaaa
command: "systemctl enable wg-quick@wg0"
command: "systemctl restart wg-quick@wg0"

View File

@ -4,6 +4,7 @@
roles:
- base
- goss
# - proxy3
- snmp-agent
# - ssh-cli

View File

@ -3,11 +3,7 @@
connection: local
vars:
glpi_version: "9.4.5"
fd_version: "9.4+1.1"
fd_version64: "x64_2.5.2"
fd_version86: "x86_2.5.2"
glpi_version: "10.0.6"
glpi_dir: "/var/www/html/glpi"
glpi_dbhost: "127.0.0.1"
glpi_dbname: "glpi"
@ -18,7 +14,7 @@
- base
- goss
- snmp-agent
- itil
- glpi
- ssh-cli
- syslog-cli
- post

View File

@ -1,24 +1,49 @@
---
- hosts: localhost
connection: local
vars:
maria_dbhost: "192.168.102.254"
maria_dbname: "wordpress"
maria_dbuser: "wp"
maria_dbpasswd: "wp"
- hosts: all
become: true
tasks:
- name: modules python pour
apt:
name: python3-pymysql
state: present
roles:
- base
- goss
- post
#- s-lb-bd-ab
- mariadb-ab
# - role: db-user
# cli_ip: "192.168.102.1"
# - role: db-user
# cli_ip: "192.168.102.2"
# - role: db-user
# cli_ip: "192.168.102.3"
- snmp-agent
# - post
- name: install mariadb-server
apt:
name: mariadb-server
state: present
- name: Cree Bd wordpress
mysql_db:
db: wordpressdb
login_unix_socket: /var/run/mysqld/mysqld.sock
state: present
- name: Ouvre port 3306 mariadb-server
replace:
path: /etc/mysql/mariadb.conf.d/50-server.cnf
regexp: '^bind-address.*'
replace: '#bind-adress = 127.0.0.1'
backup: yes
notify: restart mariadb
- name: Create MySQL user for wordpress
mysql_user:
name: wordpressuser
password: wordpresspasswd
priv: "wordpressdb.*:ALL"
host: '%'
state: present
login_unix_socket: /var/run/mysqld/mysqld.sock
handlers:
- name: restart mariadb
ansible.builtin.service:
name: mariadb
state: restarted
roles:
- base
- goss
- post
- snmp-agent

View File

@ -4,8 +4,6 @@
roles:
- base
- s-lb-web-ab
- snmp-agent
- s-nas-client
- post
- lb-web
- snmp-agent

View File

@ -4,8 +4,6 @@
roles:
- base
- s-lb-web-ab
- snmp-agent
- s-nas-client
- post
- lb-web
- snmp-agent

View File

@ -5,7 +5,7 @@
roles:
- base
- goss
- s-lb-ab
- lb-front
- snmp-agent
- post

View File

@ -10,8 +10,7 @@
roles:
- base
- snmp-agent
- s-lb-wordpress
- s-nas-server
- lb-nfs-server
- ssh-cli
- syslog-cli
- post

View File

@ -1,6 +1,6 @@
#!/bin/bash
mkvmrelease="v1.2"
mkvmrelease="v1.2.1"
ovarelease="2023a"
ovafogrelease="2023a"
@ -11,7 +11,7 @@ deletemode=0
usage () {
echo "$0 - version ${mkvmrelease} - Ova version ${ovarelease}"
echo "$0 : creation VM et parametrage interfaces"
echo "usage : $0 [-r] <s-infra|r-int|r-ext|s-proxy|s-mon|s-appli|s-backup|s-itil|s-ncx|s-fog>"
echo "usage : $0 [-r] <s-adm|s-infra|r-int|r-ext|s-proxy|s-mon|s-appli|s-backup|s-itil|s-ncx|s-fog>"
echo " option -r : efface vm existante avant creation nouvelle"
exit 1
}
@ -33,7 +33,6 @@ create_vm () {
}
setif () {
VBoxManage modifyvm "$1" --nic"${2}" intnet
VBoxManage modifyvm "$1" --intnet"${2}" "$3"
VBoxManage modifyvm "$1" --nictype"${2}" 82540EM
@ -66,7 +65,9 @@ fi
vm="$1"
create_vm "${vm}"
if [[ "${vm}" == "s-infra" ]] ; then
if [[ "${vm}" == "s-adm" ]] ; then
bash addint.s-adm
elif [[ "${vm}" == "s-infra" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-proxy" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
@ -97,11 +98,11 @@ elif [[ "${vm}" == "s-nxc" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-lb" ]] ; then
create_if "${vm}" "n-adm" "n-dmz" "n-dmz-lb"
elif [[ "${vm}" == "s-web1" ]] ; then
elif [[ "${vm}" == "s-lb-web1" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-web2" ]] ; then
elif [[ "${vm}" == "s-lb-web2" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-web3" ]] ; then
elif [[ "${vm}" == "s-lb-web3" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-lb-bd" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db"

158
scripts/mkvm.ps1 Normal file
View File

@ -0,0 +1,158 @@
# POUR POUVOIR EXECUTER DES SCRIPTS POWERSHELL SOUS WINDOWS LANCER COMMANDE SUIVANTE EN ADMIN SOUS POWERSHELL
# set-executionpolicy unrestricted
#mkvm pour toutes les vms
$mkvmrelease="v1.2"
$ovarelease="2023a"
$ovafogrelease="2023a"
$ovafile="$HOME\Downloads\debian-bullseye-gsb-${ovarelease}.ova"
$ovafilefog="$HOME\Downloads\debian-buster-gsb-${ovafogrelease}.ova"
$vboxmanage="C:\Program Files\Oracle\VirtualBox\VBoxManage.exe"
$deletemode=0
#FONCTIONS
function create_vm{ param([string]$nomvm)
#Importation depuis l'ova
& "$vboxmanage" import "$ovafile" --vsys 0 --vmname "$nomvm"
Write-Host "Machine $nomvm importée"
}
function create_if{ param([string]$nomvm, [string]$nic, [int]$rang, [string]$reseau)
#Création d'une interface
if ($nomvm -and $nic -and $rang -and $reseau) {
#if ("1" -eq "1") {
if ($nic -eq "bridge") {
#Création d'une interface en pont
& "$vboxmanage" modifyvm "$nomvm" --nic"$rang" bridged
& "$vboxmanage" modifyvm "$nomvm" --nictype"$rang" 82540EM
& "$vboxmanage" modifyvm "$nomvm" --cableconnected"$rang" on
& "$vboxmanage" modifyvm "$nomvm" --nicpromisc"$rang" allow-all
Write-Host "$nomvm : IF$rang $nic"
}
elseif ($nic -eq "int") {
#Création d'une interface en reseau interne
& "$vboxmanage" modifyvm "$nomvm" --nic"$rang" intnet
& "$vboxmanage" modifyvm "$nomvm" --intnet"$rang" "$reseau"
& "$vboxmanage" modifyvm "$nomvm" --nictype"$rang" 82540EM
& "$vboxmanage" modifyvm "$nomvm" --cableconnected"$rang" on
& "$vboxmanage" modifyvm "$nomvm" --nicpromisc"$rang" allow-all
Write-Host "$nomvm : IF$rang $nic $reseau"
}
}
}
if ($args[0] -eq "s-adm") {
create_vm $args[0]
create_if $args[0] "bridge" 1 "null"
create_if $args[0] "int" 2 "n-adm"
}
elseif ($args[0] -eq "s-infra") {
create_vm $args[0]
create_if $args[0] "int" 1 "n-adm"
create_if $args[0] "int" 2 "n-infra"
#création de la première interface
& "$vboxmanage" modifyvm "s-infra" --nic1 intnet
& "$vboxmanage" modifyvm "s-infra" --intnet1 "n-adm"
& "$vboxmanage" modifyvm "s-infra" --nictype1 82540EM
& "$vboxmanage" modifyvm "s-infra" --cableconnected1 on
& "$vboxmanage" modifyvm "s-infra" --nicpromisc1 allow-all
#création de la deuxième interface
& "$vboxmanage" modifyvm "s-infra" --nic2 intnet
& "$vboxmanage" modifyvm "s-infra" --intnet2 "n-infra"
& "$vboxmanage" modifyvm "s-infra" --nictype2 82540EM
& "$vboxmanage" modifyvm "s-infra" --cableconnected2 on
& "$vboxmanage" modifyvm "s-infra" --nicpromisc2 allow-all
}
elseif ($args[0] -eq "r-int") {
create_vm($args[0])
#interface 1
& "$vboxmanage" modifyvm "r-int" --nic1 intnet
& "$vboxmanage" modifyvm "r-int" --intnet1 "n-adm"
& "$vboxmanage" modifyvm "r-int" --nictype1 82540EM
& "$vboxmanage" modifyvm "r-int" --cableconnected1 on
& "$vboxmanage" modifyvm "r-int" --nicpromisc1 allow-all
#interface 2
& "$vboxmanage" modifyvm "r-int" --nic2 intnet
& "$vboxmanage" modifyvm "r-int" --intnet2 "n-link"
& "$vboxmanage" modifyvm "r-int" --nictype2 82540EM
& "$vboxmanage" modifyvm "r-int" --cableconnected2 on
& "$vboxmanage" modifyvm "r-int" --nicpromisc2 allow-all
#interface 3
& "$vboxmanage" modifyvm "r-int" --nic3 intnet
& "$vboxmanage" modifyvm "r-int" --intnet3 "n-wifi"
& "$vboxmanage" modifyvm "r-int" --nictype3 82540EM
& "$vboxmanage" modifyvm "r-int" --cableconnected3 on
& "$vboxmanage" modifyvm "r-int" --nicpromisc3 allow-all
#interface 4
& "$vboxmanage" modifyvm "r-int" --nic4 intnet
& "$vboxmanage" modifyvm "r-int" --intnet4 "n-user"
& "$vboxmanage" modifyvm "r-int" --nictype4 82540EM
& "$vboxmanage" modifyvm "r-int" --cableconnected4 on
& "$vboxmanage" modifyvm "r-int" --nicpromisc4 allow-all
#interface 5
& "$vboxmanage" modifyvm "r-int" --nic5 intnet
& "$vboxmanage" modifyvm "r-int" --intnet5 "n-infra"
& "$vboxmanage" modifyvm "r-int" --nictype5 82540EM
& "$vboxmanage" modifyvm "r-int" --cableconnected5 on
& "$vboxmanage" modifyvm "r-int" --nicpromisc5 allow-all
}
elseif ($args[0] -eq "r-ext") {
create_vm($args[0])
#interface 1
& "$vboxmanage" modifyvm "r-ext" --nic1 intnet
& "$vboxmanage" modifyvm "r-ext" --intnet1 "n-adm"
& "$vboxmanage" modifyvm "r-ext" --nictype1 82540EM
& "$vboxmanage" modifyvm "r-ext" --cableconnected1 on
& "$vboxmanage" modifyvm "r-ext" --nicpromisc1 allow-all
#interface 2
& "$vboxmanage" modifyvm "r-ext" --nic2 intnet
& "$vboxmanage" modifyvm "r-ext" --intnet2 "n-dmz"
& "$vboxmanage" modifyvm "r-ext" --nictype2 82540EM
& "$vboxmanage" modifyvm "r-ext" --cableconnected2 on
& "$vboxmanage" modifyvm "r-ext" --nicpromisc2 allow-all
#interface 3
& "$vboxmanage" modifyvm "r-ext" --nic3 bridged
& "$vboxmanage" modifyvm "r-ext" --nictype3 82540EM
& "$vboxmanage" modifyvm "r-ext" --cableconnected3 on
& "$vboxmanage" modifyvm "r-ext" --nicpromisc3 allow-all
#interface 4
& "$vboxmanage" modifyvm "r-ext" --nic4 intnet
& "$vboxmanage" modifyvm "r-ext" --intnet4 "n-linkv"
& "$vboxmanage" modifyvm "r-ext" --nictype4 82540EM
& "$vboxmanage" modifyvm "r-ext" --cableconnected4 on
& "$vboxmanage" modifyvm "r-ext" --nicpromisc4 allow-all
#interface 5
& "$vboxmanage" modifyvm "r-ext" --nic5 intnet
& "$vboxmanage" modifyvm "r-ext" --intnet5 "n-link"
& "$vboxmanage" modifyvm "r-ext" --nictype5 82540EM
& "$vboxmanage" modifyvm "r-ext" --cableconnected5 on
& "$vboxmanage" modifyvm "r-ext" --nicpromisc5 allow-all
}
elseif ($args[0] -eq "test") {
fonction1 $args[0] $args[1]
}

5
scripts/r-vp1-post.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/bash
#stoper le fw
systemctl stop ferm
#ouverture du service web pour copie distante
cd /root/confwg/ && python3 -m http.server 8000 &

5
scripts/r-vp2-post.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/bash
#recuperation du fichier de config
wget http://r-vp1.gsb.adm:8000/wg0-b.qconf
#renomage fichier et mv
mv ./wg0-b.conf /etc/wireguard/wg0.conf

View File

@ -2,14 +2,14 @@ mkdir C:\gsb\partages
cd C:\gsb\partages
mkdir compta
mkdir compta
mkdir ventes
mkdir public
mkdir commun
mkdir users
cd C:\gsb
mkdir users

3
windows/mkusr-backup.cmd Normal file
View File

@ -0,0 +1,3 @@
net group gg-backup /ADD
call mkusr uBackup "u-backup" gg-backup
icacls "C:\gsb\partages\public" /Grant:r uBackup:M /T