Compare commits

..

No commits in common. "main" and "v0.0.5s-em" have entirely different histories.

60 changed files with 164 additions and 532 deletions

View File

@ -1,6 +1,6 @@
# gsb2023 # gsb2023
2023-02-02 ps 2023-02-01 ps
Environnement et playbooks ansible pour le projet GSB 2023 Environnement et playbooks ansible pour le projet GSB 2023
@ -53,15 +53,15 @@ On utilsera le script (bash) **mkvm** ou (PowerShell) **mkvm.ps1** pour créeer
```shell ```shell
gsb2023> gsb2023>
cd scripts cd pre
$ mkvm -r s-adm $ mkvm -r s-adm
``` ```
### Machine s-adm ### Machine s-adm
* créer la machine virtuelle **s-adm** avec **mkvm** comme décrit plus haut. * créer la machine virtuelle **s-adm** avec **mkvm * comme décrit plus haut.
* utiliser le script de renommage comme suit --> `bash chname <nouveau_nom_de_machine>` , puis redémarrer * utiliser le script de renommage comme suit --> bash chname [nouveau_nom_de_machine] puis redémarrer
* utiliser le script **s-adm-start** : `bash s-adm-start` , puis redémarrer * utiliser le script s-adm-start --> bash s-adm-start, redémarrer
* ou sinon : * ou sinon :
```shell ```shell
mkdir -p tools/ansible ; cd tools/ansible mkdir -p tools/ansible ; cd tools/ansible
@ -79,9 +79,9 @@ $ mkvm -r s-adm
### Pour chaque machine ### Pour chaque machine
- créer la machine avec **mkvm -r**, les cartes réseau sont paramétrées par **mkvm** selon les spécifications - créer la machine avec **mkvm -r**, les cartes réseau sont paramétrées par **mkvm** selon les spécifications
- utiliser le script de renommage comme suit : `bash chname <nouveau_nom_de_machine>` - utiliser le script de renommage comme suit --> bash chname [nouveau_nom_de_machine]
- redémarrer - redémarrer
- utiliser le script **gsb-start** : `bash gsb-start` - utiliser le script gsb-start --> bash gsb-start
- ou sinon: - ou sinon:
```shell ```shell
mkdir -p tools/ansible ; cd tools/ansible mkdir -p tools/ansible ; cd tools/ansible
@ -94,16 +94,3 @@ bash pull-config
``` ```
- redémarrer - redémarrer
- **Remarque** : une machine doit avoir été redémarrée pour prendre en charge la nouvelle configuration - **Remarque** : une machine doit avoir été redémarrée pour prendre en charge la nouvelle configuration
## Les tests
Il peuvent êtres mis en oeuvre avec **goss** de la façon suivante : chaque machine installée dispose d'un fichier de test ad-hoc portant le nom de la machine elle-même (machine.yml).
```
cd tools/ansible/gsb2023
bash agoss # lance le test portant le nom de la machine
```
`bash agoss -f tap` permet de lancer le test avec le détail d'exécution

View File

@ -1,26 +0,0 @@
port:
tcp:5044:
listening: true
ip:
- 0.0.0.0
tcp:5601:
listening: true
ip:
- 0.0.0.0
tcp:9200:
listening: true
ip:
- 0.0.0.0
service:
docker:
enabled: true
running: true
interface:
enp0s3:
exists: true
addrs:
- 192.168.99.11/24
enp0s8:
exists: true
addrs:
- 172.16.0.11/24

View File

@ -1,13 +0,0 @@
---
- hosts: localhost
connection: local
vars:
- ip1: 192.168.0.51
- remip: 192.168.0.52
- mynet: 192.168.1.0
- remnet: 172.16.128.0
roles:
- fw-ferm

View File

@ -12,8 +12,10 @@
- base - base
- goss - goss
# - snmp-agent # - snmp-agent
# - firewall-vpn-r
- post - post
- wireguard-r - wireguard-r
- fw-ferm
- ssh-cli - ssh-cli
- syslog-cli - syslog-cli

View File

@ -1,12 +0,0 @@
---
- hosts: localhost
connection: local
vars:
- ip1: 192.168.0.52
- remip: 192.168.0.51
- mynet: 172.16.128.0
- remnet: 192.168.1.0
roles:
- fw-ferm

View File

@ -15,7 +15,10 @@
- dns-agence - dns-agence
- ssh-root-access - ssh-root-access
# - snmp-agent # - snmp-agent
# - firewall-vpn-l
- wireguard-l - wireguard-l
- post # - x509-l
- fw-ferm
- ssh-cli - ssh-cli
- syslog-cli - syslog-cli
- post

View File

@ -28,7 +28,7 @@
192.168.99.103 s-lb-web3.gsb.adm 192.168.99.103 s-lb-web3.gsb.adm
192.168.99.112 r-vp1.gsb.adm 192.168.99.112 r-vp1.gsb.adm
192.168.99.102 r-vp2.gsb.adm 192.168.99.102 r-vp2.gsb.adm
192.168.99.120 s-peertube.gsb.adm
192.168.99.8 syslog.gsb.adm 192.168.99.8 syslog.gsb.adm

View File

@ -27,6 +27,6 @@
192.168.99.103 s-lb-web3.gsb.adm 192.168.99.103 s-lb-web3.gsb.adm
192.168.99.112 r-vp1.gsb.adm 192.168.99.112 r-vp1.gsb.adm
192.168.99.102 r-vp2.gsb.adm 192.168.99.102 r-vp2.gsb.adm
192.168.99.120 s-peertube.gsb.adm
192.168.99.8 syslog.gsb.adm 192.168.99.8 syslog.gsb.adm

View File

@ -0,0 +1,23 @@
// 0.2 - putconf - vendredi 12 avril 2013, 08:54:33 (UTC+0200)
options {
directory "/var/cache/bind";
// If there is a firewall between you and nameservers you want
// to talk to, you may need to fix the firewall to allow multiple
// ports to talk. See http://www.kb.cert.org/vuls/id/800113
// If your ISP provided one or more IP addresses for stable
// nameservers, you probably want to use them as forwarders.
// Uncomment the following block, and insert the addresses replacing
// the all-0's placeholder.
forwarders {
172.16.0.1;
};
auth-nxdomain no; # conform to RFC1035
listen-on-v6 { any; };
};

View File

@ -0,0 +1,4 @@
---
- name: restart bind9
service: name=bind9 state=restarted

View File

@ -0,0 +1,11 @@
---
- name: Installation bind9
apt: name=bind9 state=present update_cache=yes
- name: Copie named.conf.options
copy: src=named.conf.options dest=/etc/bind
notify:
- restart bind9

View File

@ -5,7 +5,7 @@
; ;
$TTL 604800 $TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. ( @ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2023051000 ; Serial 2023012500 ; Serial
7200 ; Refresh 7200 ; Refresh
86400 ; Retry 86400 ; Retry
8419200 ; Expire 8419200 ; Expire
@ -36,5 +36,3 @@ s-web2 IN A 192.168.101.2
s-lb.gsb.lan IN A 192.168.100.10 s-lb.gsb.lan IN A 192.168.100.10
ns IN CNAME s-infra.gsb.lan. ns IN CNAME s-infra.gsb.lan.
wpad IN CNAME s-infra.gsb.lan. wpad IN CNAME s-infra.gsb.lan.
s-peertube IN A 192.168.100.20
peertube IN CNAME s-peertube

View File

@ -5,7 +5,7 @@
; ;
$TTL 604800 $TTL 604800
@ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. ( @ IN SOA s-infra.gsb.lan. root.s-infra.gsb.lan. (
2023040501 ; Serial 2023012500 ; Serial
7200 ; Refresh 7200 ; Refresh
86400 ; Retry 86400 ; Retry
8419200 ; Expire 8419200 ; Expire
@ -28,3 +28,4 @@ $TTL 604800
11.0 IN PTR s-elk.gsb.lan. 11.0 IN PTR s-elk.gsb.lan.
17.0 IN PTR s-gestsup.lan 17.0 IN PTR s-gestsup.lan
254.0 IN PTR r-int.gsb.lan. 254.0 IN PTR r-int.gsb.lan.

View File

@ -1,16 +1,11 @@
# Fog # Fog
Ce rôle permet l'installation et la modification de Fog. Ce rôle permet l'installation et la modification de Fog.
## Fog, c'est quoi ? ## Fog, c'est quoi ?
Fog permet le déploiement d'images disque tel que Windows ou bien Linux en utilisant PXE (Preboot Execution Environment). Fog permet le déploiement d'images disque tel que Windows ou bien Linux en utilisant PXE (Preboot Execution Environment).
## Comment l'installer ? ## Comment l'installer ?
Avant toute chose, lancer le fichier goss de s-fog ( présent dans gsb2023/goss/s-fog.yaml ) pour vérifier que la configuration réseau est correct et opérationel. Une fois l'installation principal effectué, il faut lancé le playbook ansible s-fog.yaml.
Avant toute chose, lancer le fichier goss de s-fog ( présent dans gsb2023/goss/s-fog.yaml ) pour vérifier que la configuration réseau est correct et opérationnel. Une fois l'installation principale effectuée, il faut lancer le playbook ansible s-fog.yaml. Il faudra se rendre dans le dossier **fog** pour lancer le script **installfog.sh** ( fog/bin/ ). La configuration sera déjà établi via le fichier **.fogsettings**
Il faudra se rendre dans le dossier **fog** pour lancer le script **installfog.sh** ( fog/bin/ ). La configuration sera déjà établie via le fichier **.fogsettings**

View File

@ -22,5 +22,5 @@
command: "cp /root/tools/ansible/roles/fog/files/fogsettings /opt/fog/" command: "cp /root/tools/ansible/roles/fog/files/fogsettings /opt/fog/"
- name: fichier fogsettings en .fogsettings - name: fichier fogsettings en .fogsettings
command: "mv /opt/fog/fogsettings /opt/fog/.fogsettings" command: "mv /opt/fog/fogsettings /opt/fog/.fogsettings"

View File

@ -4,12 +4,10 @@
@def $DEV_PRIVATE = enp0s8; @def $DEV_PRIVATE = enp0s8;
@def $DEV_WORLD = enp0s9; @def $DEV_WORLD = enp0s9;
@def $DEV_WORLD = enp0s9;
@def $DEV_VPN= wg0;
@def $NET_PRIVATE = 172.16.0.0/24; @def $NET_PRIVATE = 172.16.0.0/24;
table filter { table filter {
chain (INPUT OUTPUT){ chain (INPUT OUTPUT){
# allow VPN # allow VPN
proto udp dport 51820 ACCEPT; proto udp dport 51820 ACCEPT;
@ -30,7 +28,7 @@ table filter {
# allow SSH connections from the private network and from some # allow SSH connections from the private network and from some
# well-known internet hosts # well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT; saddr ($NET_PRIVATE 81.209.165.42) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net # we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE { interface $DEV_PRIVATE saddr $NET_PRIVATE {
@ -38,21 +36,20 @@ table filter {
proto udp dport bootps ACCEPT; proto udp dport bootps ACCEPT;
} }
# the rest is dropped by the above policy # interface réseau
interface $DEV_WORLD {
}
# the rest is dropped by the above policy
}#FIN INPUT }#FIN INPUT
# outgoing connections are not limited # outgoing connections are not limited
chain OUTPUT policy ACCEPT;
chain OUTPUT {
policy ACCEPT;
}#FIN OUTPUT
chain FORWARD { chain FORWARD {
policy ACCEPT; policy ACCEPT;
proto icmp icmp-type echo-request ACCEPT;
# connection tracking # connection tracking
mod state state INVALID DROP; mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT; mod state state (ESTABLISHED RELATED) ACCEPT;
@ -61,9 +58,6 @@ table filter {
# internal nets are allowed # internal nets are allowed
interface $DEV_PRIVATE ACCEPT; interface $DEV_PRIVATE ACCEPT;
interface $DEV_VPN daddr $NET_PRIVATE {
proto tcp dport ssh DROP;
}
# the rest is dropped by the above policy # the rest is dropped by the above policy
} }
} }

View File

@ -4,7 +4,7 @@
@def $DEV_PRIVATE = enp0s9; @def $DEV_PRIVATE = enp0s9;
@def $DEV_WORLD = enp0s8; @def $DEV_WORLD = enp0s8;
@def $DEV_VPN= wg0;
@def $NET_PRIVATE = 172.16.0.0/24; @def $NET_PRIVATE = 172.16.0.0/24;
table filter { table filter {
@ -27,7 +27,7 @@ table filter {
# allow SSH connections from the private network and from some # allow SSH connections from the private network and from some
# well-known internet hosts # well-known internet hosts
saddr ($NET_PRIVATE) proto tcp dport ssh ACCEPT; saddr ($NET_PRIVATE 81.209.165.42) proto tcp dport ssh ACCEPT;
# we provide DNS and SMTP services for the internal net # we provide DNS and SMTP services for the internal net
interface $DEV_PRIVATE saddr $NET_PRIVATE { interface $DEV_PRIVATE saddr $NET_PRIVATE {
@ -35,7 +35,6 @@ table filter {
proto udp dport bootps ACCEPT; proto udp dport bootps ACCEPT;
} }
# interface réseau # interface réseau
interface $DEV_WORLD { interface $DEV_WORLD {
@ -45,13 +44,11 @@ table filter {
}#FIN INPUT }#FIN INPUT
# outgoing connections are not limited # outgoing connections are not limited
chain OUTPUT { chain OUTPUT policy ACCEPT;
policy ACCEPT;
}
chain FORWARD { chain FORWARD {
policy ACCEPT; policy ACCEPT;
proto icmp icmp-type echo-request ACCEPT;
# connection tracking # connection tracking
mod state state INVALID DROP; mod state state INVALID DROP;
mod state state (ESTABLISHED RELATED) ACCEPT; mod state state (ESTABLISHED RELATED) ACCEPT;
@ -60,9 +57,6 @@ table filter {
# internal nets are allowed # internal nets are allowed
interface $DEV_PRIVATE ACCEPT; interface $DEV_PRIVATE ACCEPT;
interface $DEV_VPN daddr $NET_PRIVATE {
proto tcp dport ssh DROP;
}
# the rest is dropped by the above policy # the rest is dropped by the above policy
} }
} }

View File

@ -13,3 +13,16 @@
apt: apt:
name: nfs-common name: nfs-common
state: present state: present
- name: montage nfs pour word press
blockinfile:
path: /etc/fstab
block: |
192.168.102.253:/home/wordpress /var/www/html nfs soft,timeo=5,intr,rsize=8192,wsize=8192,wsize=8192 0 0
#- name: monte export wordpress
# ansible.posix.mount:
# path: /var/www/html
# state: mounted
# fstype: nfs
# src: 192.168.102.253:/exports/wordpress

View File

@ -26,7 +26,6 @@ define host {
host_name localhost host_name localhost
alias localhost alias localhost
address 127.0.0.1 address 127.0.0.1
parents r-int
} }

View File

@ -9,6 +9,5 @@ define host{
host_name s-adm host_name s-adm
alias debian-servers alias debian-servers
address 192.168.99.99 address 192.168.99.99
parents r-int
} }

View File

@ -9,6 +9,5 @@ define host{
host_name s-appli host_name s-appli
alias debian-servers alias debian-servers
address 172.16.0.3 address 172.16.0.3
parents r-int
} }

View File

@ -9,6 +9,5 @@ define host{
host_name s-backup host_name s-backup
alias serveur proxy alias serveur proxy
address 172.16.0.4 address 172.16.0.4
parents r-int
} }

View File

@ -9,7 +9,6 @@ define host{
host_name s-fog host_name s-fog
alias serveur proxy alias serveur proxy
address 172.16.0.16 address 172.16.0.16
parents r-int
} }

View File

@ -9,6 +9,5 @@ define host{
host_name s-infra host_name s-infra
alias debian-servers alias debian-servers
address 172.16.0.1 address 172.16.0.1
parents r-int
} }

View File

@ -9,7 +9,6 @@ define host{
host_name s-itil host_name s-itil
alias serveur proxy alias serveur proxy
address 172.16.0.9 address 172.16.0.9
parents r-int
} }

View File

@ -9,6 +9,5 @@ define host{
host_name s-nxc host_name s-nxc
alias debian-servers alias debian-servers
address 172.16.0.7 address 172.16.0.7
parents r-int
} }

View File

@ -9,7 +9,6 @@ define host{
host_name s-proxy host_name s-proxy
alias serveur proxy alias serveur proxy
address 172.16.0.2 address 172.16.0.2
parents r-int
} }

View File

@ -9,7 +9,6 @@ define host{
host_name s-win host_name s-win
alias serveur proxy alias serveur proxy
address 172.16.0.6 address 172.16.0.6
parents r-int
} }

View File

@ -35,24 +35,3 @@ Nextcloud est alors fonctionnel avec le proxy inverse **traefik** assurant la re
ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin d'installer les piles applicatives. ATTENTION : Après avoir relancé la VM, executez le script "nxc-start.sh" afin d'installer les piles applicatives.
Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan Une fois le script terminé, le site est disponible ici : https://s-nxc.gsb.lan
## 5. Ajout authentification LDAP
Pour ajouter l'authentification LDAP au Nextcloud, il faut :
* Une fois l'installation de Nextcloud terminé, cliquez sur le profil et Application
* Dans vos applications, descendre et activer "LDAP user and group backend"
* Puis cliquer sur le profil, puis Paramètres d'administration et dans Administration cliquer sur Intégration LDAP/AD
* Une fois sur la page d'intégration LDAP/AD :
* Dans Hôte mettre :
> ldap://s-win.gsb.lan
* Cliquer sur Détecter le port (normalement le port 389 apparait)
* Dans DN Utilisateur mettre :
> CN=nextcloud,CN=Users,DC=GSB,DC=LAN
* Mot de passe :
> Azerty1+
* Et dans Un DN de base par ligne :
> DC=GSB,DC=LAN
* Après la configuration passe OK
* Une fois la configuration finie, cliquer 3 fois sur continuer
* Une fois arrivé sur Groupes, vous pouvez vous déconnecter du compte Admin et vous connecter avec un compte qui est dans l'AD.

View File

@ -1,22 +0,0 @@
---
- name: mise a jour de resolv.conf...
copy:
src: /root/tools/ansible/gsb2023/roles/peertube/files/resolv.conf
dest: /etc/
mode: '0644'
- name: installation de docker...
shell: curl https://releases.rancher.com/install-docker/20.10.sh | sh
- name: attente de l'installation de docker...
wait_for:
timeout: 30
host: localhost
- name: installation de k3s...
shell: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-ip=192.168.100.20 --flannel-iface=enp0s8" sh -s - --docker
- name: attente de l'installation de k3s...
wait_for:
timeout: 25
host: localhost

View File

@ -1,9 +0,0 @@
MYHOST=peertube.gsb.lan;
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml;
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.cert -subj /CN="${MYHOST}"/O="${MYHOST}" -addext "subjectAltName = DNS:${MYHOST}";
kubectl create secret tls tls-peertube --key tls.key --cert tls.cert;
helm repo add postgresql https://charts.bitnami.com/bitnami;
helm repo add redis https://charts.bitnami.com/bitnami;
helm repo add mail https://bokysan.github.io/docker-postfix;
helm install --create-namespace -n peertube peertube-gsb /root/tools/peertube/helm/ ;
kubectl config view --raw > ~/.kube/config

View File

@ -1,4 +0,0 @@
domain gsb.lan
search gsb.lan
nameserver 172.16.0.1
nameserver 192.168.99.99

View File

@ -1,139 +0,0 @@
replicaCount: 1
image:
repository: chocobozzz/peertube
pullPolicy: IfNotPresent
tag: "v5.0.1-bullseye"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: false
annotations: {}
name: ""
podAnnotations: {}
podSecurityContext: {}
securityContext: {}
service:
type: ClusterIP
port: 9000
nginxPort: 9001
## default config for postgresql should work, but feel free to modify it if required.
# must stay consistent with peertube configuration, otherwise peertube will crash
postgresql:
enabled: true
primary:
persistence:
enabled: true
existingClaim: "pvc-postgres"
global:
postgresql:
auth:
postgrePassword: "admin"
username: "user"
password: "user"
database: "peertube"
## the main list of variables tha will be applied in the peertube container
# any error or misconfiguration will make peertube crash.
peertube:
env:
dbUser: user # must be consistent with postgresql configuration
dbPasswd: user # must be consistent with postgresql configuration
dbSsl: false # disabled by default WARNING: ssl connection feature not tested, use at your own risk
dbHostname: peertube-gsb-postgresql # must be consistent with postgresql configuration
webHostname: peertube.gsb.lan # must be changed to your local setup
secret: b2753b0f37444974de0e81f04815e6a889fcf8960bd203a01b624d8fa8a37683
smtpHostname: peertube-gsb-mail # must be consistent with mail configuration
smtpPort: 587 # must be consistent with mail configuration
smtpFrom: noreply@lan.lan # not configured by default, add something meaningfull if you want
smtpTls: false # disabled by default WARNING: tls connection feature not tested, use at your own risk
smtpDisableStartTls: false # unless crashes related to tls/ssl, this should be unchanged
adminEmail: root@localhost.lan # use this if you want peopleto be able to reach you
redisHostname: peertube-gsb-redis-master # must be consistent with redis configuration
redisAuth: peertube # must be consistent with redis configuration
app:
userCanRegister: true # control if people can register by themselves
rootPasswd: rootroot # CHANGE THIS! the default admin username is 'root' this variable define the password
## the next section configure at wich quality videos will be transcoded
transcoding360: true
transcoding480: true
transcoding720: true
transcoding1080: false
transcoding2160: false
## the configuration of the postfix server called 'mail' here
# change these settings if you know what you are doing
mail:
enbled: true
config:
general:
ALLOWED_SENDER_DOMAINS: "yes"
DKIM_AUTOGENERATE: "yes"
opendkim:
RequireSafeKeys: "no"
postfix:
smtp_tls_security_level: "secure" # works by default, any other tls level is untested
persistence:
enabled: false
service:
port: 587
## the configuration of the redis server
redis:
master:
persistence:
enabled: true
existingClaim: "pvc-redis"
replica:
persistence:
enabled: true
existingClaim: "pvc-redis"
auth:
enbled: true
password: "peertube"
## ingress configuration is very specific this part must be configured or else you'll get 503 or 404 errors
ingress:
enabled: true
className: ""
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/proxy-body-size: 6G # this caps the size of imported videos, if set low this might prevent you from uploading videos
# kubernetes.io/tls-acme: "true"
hosts:
- host: peertube.gsb.lan
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: tls-peertube
- hosts:
- peertube.gsb.lan
resources: {}
autoscaling:
enabled: false
minimumReplicas: 3
maximumReplicas: 20
targetCPUUtilizationPercentage: 90
targetMemoryUtilizationPercentage: 75
windowSeconds: 120
minCPUPercentage: 20
minMemoryPercentage: 30
## this section should be configured to match your needs and available ressources
persistence:
enabled: true
reclaimPolicy: Retain
redisVolumeStorage: 1Gi
peertubeVolumeStorage: 5Gi
postgresqlVolumeStorage: 1Gi
accessMode: ReadWriteOnce
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,28 +0,0 @@
---
- name: création du répertoire du dépot peertube...
file:
path: /root/tools/peertube
state: directory
mode: '0755'
- name: clonage du dépot peertube...
git:
repo: https://github.com/Elam-Monnot/Peertube-helm.git
dest: /root/tools/peertube
clone: yes
force: yes
- name: copie de values.yaml...
copy:
src: /root/tools/ansible/gsb2023/roles/peertube/files/values.yaml
dest: /root/tools/peertube/helm/
mode: '0644'
- name: copie du script finish...
copy:
src: /root/tools/ansible/gsb2023/roles/peertube/files/finish
dest: /root
mode: '0644'
- name: installation de helm...
shell: curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash

View File

@ -1,7 +0,0 @@
# Rôle Post
Le rôle "post" copie la configuration des interfaces des cartes réseaux nécessaires selon la machine sur laquelle on exécute le rôle. Il place cette configuration dans /etc/network/interfaces.
Ensuite, on copie le fichier "resolv.conf" dans /etc/ lorsque que la machine qui exécute le rôle n'est pas "s-adm", "s-proxy" ou "r-vp2".
Cependant, si la machine qui exécute le rôle est "s-proxy", on copie le fichier "resolv.conf.s-proxy" dans /etc/resolv.conf

View File

@ -1,21 +0,0 @@
### 0.2 - putconf - jeudi 7 janvier 2016, 16:18:49 (UTC+0100)
# The loopback network interface
auto lo
iface lo inet loopback
# carte n-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.101/24
# Réseau n-dmz-lb
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.101.1/24
# réseau n-dmz-db
allow-hotplug enp0s9
iface enp0s9 inet static
address 192.168.102.1/24
post-up mount -o rw 192.168.102.253:/home/wordpress /var/www/html

View File

@ -1,21 +0,0 @@
### 0.2 - putconf - jeudi 7 janvier 2016, 16:18:49 (UTC+0100)
# The loopback network interface
auto lo
iface lo inet loopback
# carte n-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.101/24
# Réseau n-dmz-lb
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.101.1/24
# réseau n-dmz-db
allow-hotplug enp0s9
iface enp0s9 inet static
address 192.168.102.1/24
post-up mount -o rw 192.168.102.253:/home/wordpress /var/www/html

View File

@ -1,4 +0,0 @@
search gsb.lan
domain gsb.lan
nameserver 172.16.0.1

View File

@ -1,24 +0,0 @@
---
- name: Copie interfaces
copy: src=interfaces.{{ ansible_hostname }} dest=/etc/network/interfaces
- name: Copie resolv.conf
copy: src=resolv.conf dest=/etc/
when: ansible_hostname != "s-adm" and ansible_hostname != "s-proxy"
- name: pas de chgt resolv.conf pour r-vp2
meta: end_play
when: ansible_hostname == "r-vp2"
- name: Copie resolv.conf pour s-proxy
copy: src=resolv.conf.s-proxy dest=/etc/resolv.conf
when: ansible_hostname == "s-proxy"
#- name: Confirm
# prompt: "<Entree> pour redemarrer ..."
#- name: Reboot
# shell: reboot

View File

@ -34,4 +34,4 @@ iface enp0s10 inet static
allow-hotplug enp0s16 allow-hotplug enp0s16
iface enp0s16 inet static iface enp0s16 inet static
address 172.16.0.254/24 address 172.16.0.254/24
post-up sleep 10 && systemctl restart isc-dhcp-server

View File

@ -0,0 +1,26 @@
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
#auto lo
#iface lo inet loopback
#cote N-adm
allow-hotplug enp0s3
iface enp0s3 inet dhcp
# reseau entre vpn
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.0.51
netmask 255.255.255.0
# reseau interne n-linkv
allow-hotplug enp0s9
iface enp0s9 inet static
address 192.168.1.2
netmask 255.255.255.0
up route add -net 172.16.128.0/24 gw 192.168.1.2
up route add default gw 192.168.1.1
# post-up /bin/bash /root/iptables-vpn
post-up /etc/init.d/ipsec restart

View File

@ -0,0 +1,25 @@
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
#auto lo
#iface lo inet loopback
# cote N-adm
allow-hotplug enp0s3
iface enp0s3 inet dhcp
# cote Agence
allow-hotplug enp0s8
iface enp0s8 inet static
address 172.16.128.254
netmask 255.255.255.0
# cote VPN
allow-hotplug enp0s9
iface enp0s9 inet static
address 192.168.0.52
netmask 255.255.255.0
up route add -net 192.168.1.0/24 gw 172.16.128.254
# post-up /bin/bash /root/iptables-vpn
post-up /etc/init.d/ipsec restart

View File

@ -1,17 +0,0 @@
### 0.1 - putconf - jeudi 30 mars 2023, 8:11:30 (UTC+0100)
# The loopback network interface
auto lo
iface lo inet loopback
# carte n-adm
allow-hotplug enp0s3
iface enp0s3 inet static
address 192.168.99.120/24
gateway 192.168.99.99
# Réseau n-dmz
allow-hotplug enp0s8
iface enp0s8 inet static
address 192.168.100.20/24
post-up systemctl start k3s

View File

@ -21,3 +21,4 @@
#- name: Reboot #- name: Reboot
# shell: reboot # shell: reboot

View File

@ -17,13 +17,3 @@
#- name: extraction fog.tar.gz #- name: extraction fog.tar.gz
#unarchive: src=/tmp/fog.tar.gz dest=/var/www/ copy=no #unarchive: src=/tmp/fog.tar.gz dest=/var/www/ copy=no
#- name: delais 2 secondes isc-dhcp-service
# become: yes
# lineinfile:
# path: /etc/init.d/isc-dhcp-server
# insertafter: '^\s+start\)$'
# line: " sleep 2"
# firstmatch: yes
# state: present
# backup: yes

View File

@ -1,5 +1,5 @@
# ajout du sleep 5 #ajout du sleep 5
~~éditer "/etc/init.d/isc-dhcp-server"~~ éditer "/etc/init.d/isc-dhcp-server"
~~aller au "case \"$1\" in" et rajouter "sleep 5" avant le "if"~~ aller au "case \"$1\" in" et rajouter "sleep 5" avant le "if"

View File

@ -14,15 +14,7 @@
name: wireguard-tools name: wireguard-tools
state: present state: present
- name: delais 2 secondes isc-dhcp-service
become: yes
lineinfile:
path: /etc/init.d/isc-dhcp-server
insertafter: '^\s+start\)$'
line: " sleep 2"
firstmatch: yes
state: present
backup: yes
#- name: renommage du fichier de configuration #- name: renommage du fichier de configuration
# command: "mv /etc/wireguard/wg0-b.conf /etc/wireguard/wg0.conf" # command: "mv /etc/wireguard/wg0-b.conf /etc/wireguard/wg0.conf"

View File

@ -1,32 +1,19 @@
Procédure d'installation de **r-vp1** et de copie du fichier wg0-b.conf.
# <p align="center">Procédure d'installation </p>
de **r-vp1** et de copie du fichier wg0-b.conf.
*** ***
## Sur **r-vp1**:
Attendre la fin de l'installation. Ensuite lancer un serveur http avec python3 pour récuperer le fichier wg0-b.conf sur **r-vp2** .
### 🛠️ Lancer le script
```bash
cd /tools/ansible/gsb2023/Scripts
```
```bash
bash r-vp1-post.sh
```
## Sur **r-vp2**:
Lancer le script r-vp2-post.sh pour récuperer le fichier de configuration et activer l'interface wg0. Depuis **r-vp1** se deplacer dans le repertoire **/tools/ansible/gsb2023** pour executer le playbook:
### 🛠️ Lancer le script **"ansible-playbook -i localhost, -c local r-vp1.yml"** puis reboot **r-vp1**.
```bash
cd /tools/ansible/gsb2023/Scripts
``` Sur **r-vp1**:
```bash
bash r-vp2-post.sh Attendre la fin de l'installation. Ensuite lancer un serveur http avec python3 pour récuperer le fichier
``` wg0-b.conf sur **r-vp2** . Lancer le script **r-vp1-post.sh** dans **/tools/ansible/gsb2023/Scripts**.
## Fin
Sur **r-vp2**:
Lancer le script r-vp2-post.sh dans **/tools/ansible/gsb2023/Scripts** pour recuperer wg0-b.conf
et qui renomme le fichier en **wg0.conf** . Il redémarre et active le service **wg-quick@wg0**.
redemarer les machines
```bash
reboot
```

View File

@ -4,7 +4,7 @@
roles: roles:
- base - base
- post-lb - post
- lb-web - lb-web
- snmp-agent - snmp-agent
- ssh-cli - ssh-cli

View File

@ -4,8 +4,7 @@
roles: roles:
- base - base
- post-lb - post
- lb-web - lb-web
- snmp-agent - snmp-agent
- ssh-cli - ssh-cli

View File

@ -1,11 +0,0 @@
---
- hosts: localhost
connection: local
roles:
- base
- post
- snmp-agent
- ssh-cli
- peertube-k3s
- peertube

View File

@ -1,18 +0,0 @@
#!/bin/bash
nom=s-peertube
# N-adm (enp0s3)
VBoxManage modifyvm $nom --nic1 intnet
VBoxManage modifyvm $nom --intnet1 "n-adm"
VBoxManage modifyvm $nom --nictype1 82540EM
VBoxManage modifyvm $nom --cableconnected1 on
VBoxManage modifyvm $nom --nicpromisc1 allow-all
# N-dmz (enp0s8)
VBoxManage modifyvm $nom --nic2 intnet
VBoxManage modifyvm $nom --intnet2 "n-dmz"
VBoxManage modifyvm $nom --nictype2 82540EM
VBoxManage modifyvm $nom --cableconnected2 on
VBoxManage modifyvm $nom --nicpromisc2 allow-all

View File

@ -110,8 +110,6 @@ elif [[ "${vm}" == "s-lb-bd" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db" create_if "${vm}" "n-adm" "n-dmz-db"
elif [[ "${vm}" == "s-nas" ]] ; then elif [[ "${vm}" == "s-nas" ]] ; then
create_if "${vm}" "n-adm" "n-dmz-db" create_if "${vm}" "n-adm" "n-dmz-db"
elif [[ "${vm}" == "s-peertube" ]] ; then
./addint.s-peertube
elif [[ "${vm}" == "r-vp1" ]] ; then elif [[ "${vm}" == "r-vp1" ]] ; then
./addint.r-vp1 ./addint.r-vp1
elif [[ "${vm}" == "r-vp2" ]] ; then elif [[ "${vm}" == "r-vp2" ]] ; then

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
#recuperation du fichier de config #recuperation du fichier de config
wget http://r-vp1.gsb.adm:8000/wg0-b.conf wget http://r-vp1.gsb.adm:8800/wg0-b.conf
#renomage fichier et mv #renomage fichier et mv
mv ./wg0-b.conf /etc/wireguard/wg0.conf mv ./wg0-b.conf /etc/wireguard/wg0.conf
#activation interface wg0 #activation interface wg0

View File

@ -1,4 +1,3 @@
@echo off
net group gg-backup /ADD net group gg-backup /ADD
call mkusr uBackup "u-backup" gg-backup call mkusr uBackup "u-backup" gg-backup
icacls "C:\gsb\partages\public" /Grant:r uBackup:M /T icacls "C:\gsb\partages\public" /Grant:r uBackup:M /T

View File

@ -1,4 +1,3 @@
@echo off
call mkusr aDupont "Albert Dupon" gg-compta call mkusr aDupont "Albert Dupon" gg-compta
call mkusr cSeum "Claire Seum" gg-compta call mkusr cSeum "Claire Seum" gg-compta
call mkusr nPaul "Nicolas Paul" gg-compta call mkusr nPaul "Nicolas Paul" gg-compta

View File

@ -1,2 +0,0 @@
@echo off
call mkusr nextcloud "nextcloud" nextcloud