Compare commits

..

31 Commits

Author SHA1 Message Date
36336384e6 haproxy FINAL correc 2023-01-25 15:31:26 +01:00
62f9591c62 goss s-backup 2023-01-25 15:24:53 +01:00
c32cf92cf5 correction role lb-front 2023-01-25 15:17:18 +01:00
d0ba31e795 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 11:29:48 +01:00
69aa1ac739 update test goss 2023-01-25 11:29:45 +01:00
90222678ce correction haproxy 2023-01-25 11:26:54 +01:00
1fc84c8f19 goss s-mon correction 2023-01-25 11:21:09 +01:00
b17d0fbac1 correction ip s-elk en 99.11 dns-master et compagnie 2023-01-25 11:07:20 +01:00
edbce48966 correc2 2023-01-25 11:02:49 +01:00
56f3780480 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 10:45:47 +01:00
5eae26a67c correction roles lb 2023-01-25 10:45:36 +01:00
7711d023e8 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-25 10:43:19 +01:00
1777bec595 mise a jour 2023-01-25 10:43:14 +01:00
12621bb60a ajout readme 2023-01-25 10:28:22 +01:00
592843932c modif doc README 2023-01-25 00:23:46 +01:00
abfe277180 script s-backup backup.sh trap 2023-01-24 10:49:32 +01:00
c2eb2b85a4 correction script gsb partage 2023-01-24 10:13:40 +01:00
c20f44ec6e mkusr-backup windows 2023-01-24 09:34:23 +01:00
0c7d48caf3 Merge branch 'main' of https://gitea.lyc-lecastel.fr/gadmin/gsb2023 2023-01-24 09:23:40 +01:00
12de1c8891 commenter erreur 2023-01-24 09:23:21 +01:00
5fffbc77e2 ajout echo pour ping 2023-01-24 08:50:27 +01:00
b1e87cdd1e modification ping infra 2023-01-23 11:32:54 +01:00
7f7207cf46 ortho 2023-01-21 17:37:36 +01:00
1187a5e28d doc... 2023-01-21 17:36:02 +01:00
8cef3cbf6b doc ... 2023-01-21 17:08:20 +01:00
49ca8325e8 doc ... 2023-01-21 16:57:44 +01:00
08973e83b3 doc role nxc-traefik 2023-01-21 16:44:56 +01:00
79c7bd34e6 Maj lb-front 2023-01-20 09:46:08 +01:00
0cd9f1bb4c Correction fichier interface 2023-01-20 09:38:46 +01:00
4dd5b711e2 nettoyage 2023-01-20 09:15:54 +01:00
f4361d9ecb maj doc 2023-01-19 12:48:44 +01:00
71 changed files with 403 additions and 182 deletions

View File

@ -1,35 +1,40 @@
# gsb2023
2023-01-18 ps
2023-01-25 ps
Environnement et playbooks ansible pour le projet GSB 2023
## Quickstart
prérequis :
Prérequis :
* une machine Debian Bullseye
* VirtualBox
* fichier machines viruelles ova :
* debian-bullseye-gsb-2023a.ova
* debian-buster-gsb-2023a.ova
* fichier machines viruelles **ova** :
* **debian-bullseye-gsb-2023a.ova**
* **debian-buster-gsb-2023a.ova**
## Les machines
* s-adm : routeur adm, DHCP + NAT, deploiement, proxy squid
* s-infra : DNS maitre
* r-int : routaage, DHCP
* r-ext : routage, NAT
* s-proxy : squid
* s-itil : serveur GLPI
* s-backup : DNS esclave + sauvegarde s-win
* s-mon : supervision avec **Nagios4** et syslog
* s-fog : deploiement postes de travail avec **FOG**
* s-win : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* s-nxc : NextCloud avec **docker**
* s-elk : pile ELK dockerisée
* s-lb : Load Balancer **HaProxy** pour application Wordpress
* r-vp1 : Routeur VPN Wireguard coté siège
* r-vp2 : Routeur VPN Wireguard coté agence, DHCP
* **s-adm** : routeur adm, DHCP + NAT, deploiement, proxy squid
* **s-infra** : DNS maitre, autoconfiguration navigateurs avec **wpad**
* **r-int** : routage, DHCP
* **r-ext** : routage, NAT
* **s-proxy** : squid
* **s-itil** : serveur GLPI
* **s-backup** : DNS esclave + sauvegarde s-win (SMB)
* **s-mon** : supervision avec **Nagios4**, notifications et syslog
* **s-fog** : deploiement postes de travail avec **FOG**
* **s-win** : Windows Server 2019, AD, DNS, DHCP, partage fichiers
* **s-nxc** : NextCloud avec **docker**
* **s-elk** : pile ELK dockerisée
* **s-lb** : Load Balancer **HaProxy** pour application Wordpress (DMZ)
* **r-vp1** : Routeur VPN Wireguard coté siège
* **r-vp2** : Routeur VPN Wireguard coté agence, DHCP
* **s-agence** : Serveur agence
* **s-lb** : Load Balancer **HaProxy** pour application Wordpress
* **s-lb-web1** : Serveur Wordpress 1 Load Balancer
* **s-lb-web2** : Serveur Wordpress 2 Load Balancer
* **s-lb-db** : Serveur Mariadb pour Wordpress
* **s-lb-nfs** : Serveur NFS pour application Wordpress
## Les playbooks
@ -39,7 +44,7 @@ prérequis :
On utilisera l'image de machine virtuelle suivante :
* **debian-bullseye-2023a.ova** (2023-01-06)
* Debian Bullseye 11 - 2 cartes - 1 Go - stockage 20 Go
* Debian Bullseye 11.6 - 2 cartes - 1 Go - stockage 20 Go
### Machine s-adm

View File

@ -1,67 +1,56 @@
file:
/etc/wireguard/wg0.conf:
exists: true
mode: "0644"
owner: root
group: root
filetype: file
contains:
- AllowedIPs = 10.0.0.2/32, 172.16.128.0/24
package:
# ferm:
# installed: true
strongswan:
wireguard:
installed: true
port:
udp:68:
listening: true
versions:
- 1.0.20210223-1
wireguard-tools:
installed: true
versions:
- 1.0.20210223-1
service:
# dnsmasq:
# enabled: true
# running: true
strongswan:
enabled: true
running: true
ssh:
wg-quick@wg0:
enabled: true
running: true
command:
sysctl net.ipv4.ip_forward:
host 192.168.99.99:
exit-status: 0
stdout:
- net.ipv4.ip_forward = 1
- 99.99.168.192.in-addr.arpa domain name pointer s-adm.gsb.adm.
stderr: []
timeout: 10000
command:
ping -c 4 192.168.0.52:
ping -c4 10.0.0.2:
exit-status: 0
stdout:
- 4 received = 1
- 0% packet loss
stderr: []
timeout: 10000
command:
ping -c 4 192.168.1.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
command:
ping -c 4 192.168.200.254:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
command:
ping -c 4 172.16.0.1:
exit-status: 0
stdout:
- 4 received = 1
stderr: []
timeout: 10000
#process:
# dnsmasq:
# running: true
# squid:
# running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.112/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 192.168.0.51/24
- 192.168.1.2/24
mtu: 1500
enp0s9:
exists: true
addrs:
- 192.168.1.2/24
- 192.168.0.51/24
mtu: 1500
wg0:
exists: true
addrs:
- 10.0.0.1/32
mtu: 1420

41
goss/s-backup.yaml Normal file
View File

@ -0,0 +1,41 @@
package:
bind9:
installed: true
cifs-utils:
installed: true
rsync:
installed: true
smbclient:
installed: true
service:
bind9:
enabled: true
running: true
rsync:
enabled: true
running: false
command:
ping -c4 ns.gsb.lan:
exit-status: 0
stdout:
- 0% packet loss
stderr: []
timeout: 10000
#check si partage windows accesible
smbclient -L //s-win --user=uBackup%Azerty1+ | grep 'public':
exit-status: 0
stdout:
- public
stderr: []
timeout: 10000
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.4/24
mtu: 1500
enp0s8:
exists: true
addrs:
- 172.16.0.4/24
mtu: 1500

View File

@ -49,7 +49,7 @@ interface:
enp0s3:
exists: true
addrs:
- 192.168.99.104/24
- 192.168.99.8/24
enp0s8:
exists: true
addrs:

View File

@ -10,18 +10,23 @@
192.168.99.3 s-appli.gsb.adm
192.168.99.4 s-backup.gsb.adm
192.168.99.5 s-puppet.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.7 s-nxc.gsb.adm
192.168.99.8 s-mon.gsb.adm
192.168.99.9 s-itil.gsb.adm
192.168.99.10 s-sspec.gsb.adm
192.168.99.11 s-web-ext.gsb.adm
192.168.99.10 s-lb.gsb.adm
192.168.99.11 s-elk.gsb.adm
192.168.99.10 s-dns.gsb.adm
192.168.99.12 r-int.gsb.adm
192.168.99.13 r-ext.gsb.adm
192.168.99.14 s-nas.gsb.adm
192.168.99.15 s-san.gsb.adm
192.168.99.16 s-fog.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm
192.168.99.103 s-lb-web3.gsb.adm
192.168.99.8 syslog.gsb.adm

View File

@ -11,16 +11,20 @@
192.168.99.3 s-appli.gsb.adm
192.168.99.4 s-backup.gsb.adm
192.168.99.5 s-puppet.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.6 s-win.gsb.adm
192.168.99.7 s-nxc.gsb.adm
192.168.99.8 s-mon.gsb.adm
192.168.99.9 s-itil.gsb.adm
192.168.99.10 s-sspec.gsb.adm
192.168.99.11 s-web-ext.gsb.adm
192.168.99.10 s-lb.gsb.adm
192.168.99.11 s-elk.gsb.adm
192.168.99.10 s-dns.gsb.adm
192.168.99.12 r-int.gsb.adm
192.168.99.13 r-ext.gsb.adm
192.168.99.14 s-nas.gsb.adm
192.168.99.50 s-lb-bd.gsb.adm
192.168.99.101 s-lb-web1.gsb.adm
192.168.99.102 s-lb-web2.gsb.adm
192.168.99.103 s-lb-web3.gsb.adm
192.168.99.8 syslog.gsb.adm

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2022041200 ; Serial
2023012500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -25,7 +25,7 @@ s-nxc IN A 172.16.0.7
s-docker IN A 172.16.0.7
s-mon IN A 172.16.0.8
s-itil IN A 172.16.0.9
s-elk IN A 172.16.0.10
s-elk IN A 172.16.0.11
s-gestsup IN A 172.16.0.17
r-int IN A 172.16.0.254
r-int-lnk IN A 192.168.200.254

View File

@ -5,7 +5,7 @@
;
$TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2022041200 ; Serial
2023012500 ; Serial
7200 ; Refresh
86400 ; Retry
8419200 ; Expire
@ -20,12 +20,12 @@ $TTL 604800
6.0 IN PTR s-win.gsb.lan.
7.0 IN PTR s-nxc.gsb.lan.
8.0 IN PTR s-mon.gsb.lan.
9.0 IN PTR s-itil.gsb.lan.
9.0 IN PTR s-itil.gsb.lan.
101.1 IN PTR s-web1
101.2 IN PTR s-web2
100.10 IN PTR s-lb
100.10 IN PTR s-lb.gsb.lan
10.0 IN PTR s-elk.gsb.lan.
11.0 IN PTR s-elk.gsb.lan.
17.0 IN PTR s-gestsup.lan
254.0 IN PTR r-int.gsb.lan.

View File

@ -44,7 +44,6 @@ backend fermeweb
#option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
#server s-lb-web3 192.168.101.3:80 check
listen stats

View File

@ -0,0 +1,25 @@
- name: install haproxy
apt:
name: haproxy
state: present
- name: parametre backend et fontend
blockinfile:
path: /etc/haproxy/haproxy.cfg
block: |
frontend proxypublic
bind 192.168.100.10:80
default_backend fermeweb
backend fermeweb
balance roundrobin
option httpclose
#option httpchk HEAD / HTTP/1.0
server s-lb-web1 192.168.101.1:80 check
server s-lb-web2 192.168.101.2:80 check
- name: redemarre haproxy
service:
name: haproxy
# state: restarted
enabled: yes

View File

@ -6,5 +6,5 @@ Ce rôle :
* copie le fichier de configuration **exports** pour exporter le répertoire **/home/wordpress**
* relance le service **nfs-server**
*** Objectif
### Objectif
Le répertoire **/home/wordpress** est exporté par **nfs** sur le réseau **n-dmz-db**

View File

@ -2,34 +2,36 @@
Nextcloud et Traefik fonctionnent grâce à docker. Pour pouvoir faire fonctionner ce playbook, docker doit être installé.
## Premièrement
## 1.
Le playbook va créer le dossier nxc à la racine de root. Deux fichier docker-compose "nextcloud.yml" et "traefik.yml" y seront copiés depuis le répertoire "files" du playbook.
Enfin, dans le répertoire nxc, seront créé les dossier certs et config.
Le playbook crée le dossier **nxc** à la racine de root.
### Deuxièmement
Les fichiers "nextcloud.yml" et "traefik.yml" y seront copiés depuis le répertoire "files" du playbook.
Le playbook va copier les fichiers placés dans "files" et les placer dans les bons répertoires.
Enfin, dans le répertoire nxc, sont créés les répertoires **certs** et **config**.
#### Troisièmement
## 2. Copie des fichiers
Le playbook va créer un certificat x509 grâce à mkcert, il s'agit d'une solution permettant de créer
des certificats auto-signés. Pour cela il télécharge mkcert sur s-adm (utiliser le getall).
Le playbook copie les fichiers placés dans "files" et les placer dans les bons répertoires.
mkcert sera placé dans : /usr/local/bin/
## 3. Génération du certificat
Pour créer le certificat le playbook va executer des lignes de commandes (lancé depuis nxc/) :
Le playbook crée un certificat **x509** grâce à **mkcert**, il s'agit d'une solution permettant de créer des certificats auto-signés. Pour cela, il télécharge **mkcert** sur **s-adm** (utiliser le script **getall**).
**mkcert** est placé dans : /usr/local/bin/
Pour créer le certificat, le playbook exécute les commandes (lancé depuis nxc/) :
```
/usr/local/bin/mkcert -install # Installe mkcert
/usr/local/bin/mkcert -key-file key.pem -cert-file cert.pem "hôte.domaine.local" "*.domaine.local" #Crée le certificat le DNS spécifié
```
##### Quatrièmement
## 4. Lancement
Le playbook va lancer les fichier "docker-compose" à savoir : nextcloud.yml et traefik.yml.
Cela va installer les solutions automatiquement. Nextcloud est alors fonctionnel avec
un proxy inverse qui va rediriger en HTTPS.
Le playbook lance les fichiers "docker-compose" à savoir : nextcloud.yml et traefik.yml qui démarrent les deux piles **docker**.
Nextcloud est alors fonctionnel avec le proxy inverse **traefik** assurant la redirection vers HTTPS.
ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin d'installer les piles applicatives.
Une fois le script fini, accedez au site :
https://s-nxc.gsb.lan
ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin d'installer les piles applicatives.
Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan

View File

@ -8,37 +8,30 @@ iface lo inet loopback
# Reseau N-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.12
netmask 255.255.255.0
address 192.168.99.12/24
# Reseau liaison avec r-ext
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.200.254
netmask 255.255.255.0
address 192.168.200.254/24
gateway 192.168.200.253
up ip route add default via 192.168.200.253
# Reseau wifi
allow-hotplug enp0s9
iface enp0s9 inet static
address 172.16.65.254
netmask 255.255.255.0
address 172.16.65.254/24
# Reseau user
allow-hotplug enp0s10
iface enp0s10 inet static
address 172.16.64.254
netmask 255.255.255.0
address 172.16.64.254/24
# Reseau infra
allow-hotplug enp0s16
iface enp0s16 inet static
address 172.16.0.254
netmask 255.255.255.0
up /root/routagenat
address 172.16.0.254/24

View File

@ -8,13 +8,13 @@ iface lo inet loopback
# cote N-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.10
address 192.168.99.11
netmask 255.255.255.0
gateway 192.168.99.99
# cote N-infra
allow-hotplug enp0s8
iface enp0s8 inet static
address 172.16.0.10
address 172.16.0.11
netmask 255.255.255.0
post-up route add -net 172.16.64.0/24 gw 172.16.0.254

View File

@ -3,11 +3,11 @@
- name: Copie du fichier sysctl.conf
copy: src=sysctl.conf dest=/etc/
- name: copier le script de routage
copy: src=routagenat dest=/root/
#- name: copier le script de routage
# copy: src=routagenat dest=/root/
- name: rendre executabe le script
shell: chmod +x /root/routagenat
#- name: rendre executabe le script
# shell: chmod +x /root/routagenat
#- name: exectuer le script
# script: /root/routagenat

View File

@ -1,11 +0,0 @@
---
- name: Installation d'HAproxy
apt: pkg=haproxy state=present update_cache=yes
- name: Fichier de configuration
copy: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
notify:
- restart haproxy
- name: Copie de goss
copy: src=goss.yaml dest=/root

View File

@ -1,27 +1,51 @@
#!/bin/bash
BDIR=/home/backup
SWIN=/tmp/s-win
LOCK=/tmp/s-backup.lock
#Fonction cleanup pour sortir propre dans tout les cas
cleanup()
{
rm "${LOCK}"
umount "${SWIN}"
echo "nettoyage effectue, sortie tout propre ..."
exit 3
}
#check si pas deja en cours d execution > sortie si fichier de lock existe
if [ -e "${LOCK}" ] ; then
echo "$0 : Verrouillage, deja en cours d execution"
trap cleanup 1 2 3 6
fi
#prepartion des dossiers qui vont accueillir les donnees à sauvegarder
[ -d "${BDIR}" ] || mkdir "${BDIR}"
[ -d "${BDIR}" ] || mkdir "${BDIR}/s-win"
[ -d "${BDIR}/s-win" ] || mkdir "${BDIR}/s-win"
[ -d "${SWIN}" ] || mkdir "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=u-backup,password=Azerty1+ //s-win/commun "${SWIN}"
#etablissement du lock
touch "${LOCK}"
mount -t cifs -o ro,vers=3.0,username=uBackup,password=Azerty1+ //s-win/commun "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage ${SWIN}"
exit 1
rm "${LOCK}"
trap cleanup 1 2 3 6
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/commun"
umount "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=u-backup,password=Azerty1+ //s-win/public "${SWIN}"
mount -t cifs -o ro,vers=3.0,username=uBackup,password=Azerty1+ //s-win/public "${SWIN}"
if [ $? != 0 ] ; then
echo "$0 : erreur montage"
exit 2
echo "$0 : erreur montage ${SWIN}"
trap cleanup 1 2 3 6
fi
rsync -av "${SWIN}/" "${BDIR}/s-win/public"
umount "${SWIN}"
#libere le verrou
rm "${LOCK}"
exit 0

View File

@ -0,0 +1,5 @@
#ajout du sleep 5
éditer "/etc/init.d/isc-dhcp-server"
aller au "case \"$1\" in" et rajouter "sleep 5" avant le "if"

View File

@ -17,5 +17,5 @@
#- name: copie du fichier de configuration depuis r-vp1
# command: "sshpass -p 'root' scp -r root@192.168.99.112:/root/confwg/wg0-b.conf /etc/wireguard/"
- name: renommage du fichier de configuration
command: "mv /etc/wireguard/wg0-b.conf /etc/wireguard/wg0.conf"
#- name: renommage du fichier de configuration
# command: "mv /etc/wireguard/wg0-b.conf /etc/wireguard/wg0.conf"

View File

@ -4,6 +4,7 @@
roles:
- base
- goss
# - proxy3
- snmp-agent
# - ssh-cli

View File

@ -4,8 +4,8 @@
roles:
- base
- s-lb-web-ab
- lb-web
- snmp-agent
- s-nas-client
- lb-nfs-client
- post

View File

@ -4,8 +4,8 @@
roles:
- base
- s-lb-web-ab
- lb-web
- snmp-agent
- s-nas-client
- lb-nfs-client
- post

View File

@ -1,11 +0,0 @@
---
- hosts: localhost
connection: local
roles:
- base
- s-lb-web-ab
- snmp-agent
- s-nas-client
- post

View File

@ -1,18 +0,0 @@
---
- hosts: localhost
connection: local
vars:
wp_mysql_db: "wordpress"
wp_mysql_user: "wp"
wp_mysql_password: "wp"
wp_mysql_host: "192.168.102.50"
roles:
- base
- goss
- apache2
- s-lb-wordpress
- snmp-agent
- post
- mysql
- php-fpm

View File

@ -5,7 +5,7 @@
roles:
- base
- goss
- s-lb-ab
- lb-front
- snmp-agent
- post

View File

@ -10,8 +10,8 @@
roles:
- base
- snmp-agent
- s-lb-wordpress
- s-nas-server
- lb-web
- lb-nfs-server
- ssh-cli
- syslog-cli
- post

View File

@ -97,11 +97,11 @@ elif [[ "${vm}" == "s-nxc" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-lb" ]] ; then
create_if "${vm}" "n-adm" "n-dmz" "n-dmz-lb"
elif [[ "${vm}" == "s-web1" ]] ; then
elif [[ "${vm}" == "s-lb-web1" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-web2" ]] ; then
elif [[ "${vm}" == "s-lb-web2" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-web3" ]] ; then
elif [[ "${vm}" == "s-lb-web3" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-lb-bd" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db"

135
scripts/mkvm.pl Executable file
View File

@ -0,0 +1,135 @@
#!/usr/pbin/perl
use strict;
use warnings;
use v5.10;
my $ovarelease = "2023a";
my $ovafogrelease = "2023a";
my $home=$ENV{'HOME'};
my $ovafile="$home/Téléchargements/debian-bullseye-gsb-".$ovarelease.".ova";
my $ovafilefog="$home/Téléchargements/debian-buster-gsb-i".$ovafogrelease.".ova";
my %vmtab = (
#name => "s-adm", params => ":1024:"
name => "s-adm", params => ":1024:n-adm:n-infra",
name => "s-infra", params => ":1024:n-adm:n-infra",
name => "s-proxy", params => ":1024:n-adm:n-infra",
name => "r-int", params => ":1024:n-adm:n-link:n-wifi:n-user:n-infra",
name => "r-ext", params => ":1024:n-adm:n-dmz:eno1:n-linkvi:n-link",
name => "s-mon", params => ":1024:n-adm:n-infra",
name => "s-itil", params => ":1024:n-adm:n-infra",
name => "s-appli", params => ":1024:n-adm:n-infra",
name => "s-backup", params => ":1024:n-adm:n-infra",
name => "s-fog", params => ":1024:n-adm:n-infra:n-user",
name => "s-nxc", params => ":1024:n-adm:n-infra",
)
sub usage () {
say "$0 - version ${ovarelease}";
say "$0 : creation VM et parametrage interfaces";
say "usage : $0 <s-infra|r-int|r-ext|s-proxy|s-mon>";
exit 1 ;
}
sub create_vm () {
my $nom = shift ;
my $nomova = shift ;
if ( $nom == "s-fog" ) {
$nomova = $ovafilefog;
}
if ( ! -r $nomova ) {
say "$0 : erreur ouverture fichier $nomova ..."
exit 3
}
qx (vboxmanage import $nomova --vsys 0 --vmname $nom);
}
sub setif () {
my ($vm, $interf, $parm) = @ARGV;
qx(VBoxManage modifyvm $vm --nic"${2}" intnet);
qx(VBoxManage modifyvm $vm --intnet"${2}" "$3");
qx(VBoxManage modifyvm $vm --nictype"${2}" 82540EM);
qx(VBoxManage modifyvm $vm --cableconnected"${2}" on);
qx(VBoxManage modifyvm $vm --nicpromisc"${2}" allow-all);
}
sub create_if () {
# enp0s3
setif "$1" 1 "$2"
setif "$1" 2 "$3"
#(enp0s8)
}
create_vm "${vm}"
if [[ "${vm}" == "s-infra" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-proxy" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "r-int" ]] ; then
# n-adm, n-link, n-wifi, n-user, n-infra
create_if "${vm}" "n-adm" "n-infra"
setif "${vm}" 2 "n-link"
setif "${vm}" 3 "n-wifi"
setif "${vm}" 4 "n-user"
setif "${vm}" 5 "n-infra"
elif [[ "${vm}" == "r-ext" ]] ; then
./addint.r-ext
elif [[ "${vm}" == "s-mon" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-appli" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-backup" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-itil" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-nxc" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-fog" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
setif "${vm}" 3 "n-user"
elif [[ "${vm}" == "s-DNS-ext" ]] ; then
create_if "${vm}" "n-adm" "n-dmz"
elif [[ "${vm}" == "s-web-ext" ]] ; then
create_if "${vm}" "n-adm" "n-dmz"
elif [[ "${vm}" == "s-nxc" ]] ; then
create_if "${vm}" "n-adm" "n-infra"
elif [[ "${vm}" == "s-lb" ]] ; then
create_if "${vm}" "n-adm" "n-dmz" "n-dmz-lb"
elif [[ "${vm}" == "s-web1" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
# setif "${vm}" 3 "n-dmz-lb"
elif [[ "${vm}" == "s-web2" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
elif [[ "${vm}" == "s-web3" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-lb" "n-dmz-db"
# setif "${vm}" 3 "n-dmz-lb"
elif [[ "${vm}" == "s-lb-bd" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db"
elif [[ "${vm}" == "s-nas" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db"
elif [[ "${vm}" == "r-vp1" ]] ; then
./addint.r-vp1
elif [[ "${vm}" == "r-vp2" ]] ; then
./addint.r-vp2
elif [[ "${vm}" == "s-agence" ]] ; then
create_if "${vm}" "n-adm" "n-agence"
else
echo "$0 : vm ${vm} non prevu"
exit 2
fi
sub usage () {
print "usage : mkvm.pl <vm-name>\n";
print "cree une VM a partir d'un fichier OVA et parametre les interfaces";
exit 1 ;
}
sub main () {
my $vm = shift;
usage unless ( $vm);
}

View File

@ -2,14 +2,14 @@ mkdir C:\gsb\partages
cd C:\gsb\partages
mkdir compta
mkdir compta
mkdir ventes
mkdir public
mkdir commun
mkdir users
cd C:\gsb
mkdir users

3
windows/mkusr-backup.cmd Normal file
View File

@ -0,0 +1,3 @@
net group gg-backup /ADD
call mkusr uBackup "u-backup" gg-backup
icacls "C:\gsb\partages\public" /Grant:r uBackup:M /T

View File

@ -1,14 +1,22 @@
#!/bin/bash
echo ping interface paserelle r-vp2
ping -c3 172.16.128.254
echo ping r-vp1 interface n-linkv
ping -c3 192.168.1.2
echo ping r-ext interface n-linkv
ping -c3 192.168.1.1
echo ping r-ext interface n-link
ping -c3 192.168.200.253
echo ping r-int interface n-link
ping -c3 192.168.200.254
echo ping r-int interface s-infra
ping -c3 172.16.0.254
echo ping s-infra
ping -c3 172.16.0.1

View File

@ -1,14 +1,22 @@
#!/bin/bash
echo ping s-infra
ping -c3 172.16.0.1
echo ping r-int interface n-infra
ping -c3 172.16.0.254
echo ping r-int interface n-link
ping -c3 192.168.200.254
echo ping r-ext interface n-linkv
ping -c3 192.168.1.1
echo ping r-vp1 interface n-linkv
ping -c3 192.168.1.2
echo ping r-vp2 interface n-ag
ping -c3 172.16.128.254
echo ping s-agence
ping -c3 172.16.128.10

View File

@ -1,12 +1,19 @@
#!/bin/bash
echo ping s-infra
ping -c3 172.16.0.1
echo ping r-ext interface n-link
ping -c3 192.168.200.253
echo ping r-ext interface n-linkv
ping -c3 192.168.1.1
echo ping r-vp1 interface n-link
ping -c3 192.168.1.2
echo ping r-vp2 interface n-ag
ping -c3 172.16.128.254
echo ping s-agence
ping -c3 172.16.128.10

View File

@ -1,14 +1,21 @@
#!/bin/bash
echo ping vers r-int
ping -c3 172.16.0.254
echo ping r-int interface externe
ping -c3 192.168.200.254
echo ping r-ext interface interne
ping -c3 192.168.200.253
echo ping r-ext interface liaison
ping -c3 192.168.1.1
echo ping r-vp1 interface liaison n-linkv
ping -c3 192.168.1.2
ping -c3 172.16.125.254
echo ping r-vp2 interface interface interne
ping -c3 172.16.128.254
ping -c3 172.16.128.10
echo ping s-agence
ping -c3 172.16.128.11