Compare commits
7 Commits
4069d8a77a
...
450d8fcb7a
Author | SHA1 | Date | |
---|---|---|---|
450d8fcb7a | |||
01c0e21f94 | |||
c11e492f8f | |||
e298d5afa2 | |||
bcbdd75185 | |||
8ac7e0f5a3 | |||
197157b830 |
@ -1,7 +1,7 @@
|
||||
---
|
||||
all:
|
||||
vars:
|
||||
skylab_state_dir: /var/run/skylab
|
||||
skylab_state_dir: /var/lib/skylab
|
||||
skylab_ansible_venv: "{{ skylab_state_dir }}/ansible-runtime"
|
||||
skylab_pip_version: 19.3.1
|
||||
ansible_user: ansible
|
||||
@ -34,6 +34,13 @@ en1:
|
||||
- server
|
||||
- docker-swarm
|
||||
- datastore
|
||||
skylab_direct_peers:
|
||||
- hostname: pegasus.local jupiter.svr.local
|
||||
address: 192.168.42.10
|
||||
- hostname: saturn.local remus.svr.local
|
||||
address: 192.168.42.20
|
||||
- hostname: orion.local
|
||||
address: 192.168.42.30
|
||||
|
||||
hosts:
|
||||
pegasus: # jupiter
|
||||
@ -50,6 +57,7 @@ en1:
|
||||
bond: bond0
|
||||
bond0:
|
||||
type: bond
|
||||
firewall: internal
|
||||
gateway: 10.42.101.1/24
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
@ -59,6 +67,7 @@ en1:
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
type: vlan
|
||||
firewall: trusted
|
||||
addresses:
|
||||
- 192.168.42.10/24
|
||||
dhcp: false
|
||||
@ -77,6 +86,7 @@ en1:
|
||||
bond: bond0
|
||||
bond0:
|
||||
type: bond
|
||||
firewall: internal
|
||||
dhcp: false
|
||||
gateway: 10.42.101.1/24
|
||||
addresses:
|
||||
@ -86,6 +96,7 @@ en1:
|
||||
- 10.42.101.1
|
||||
bond0.99:
|
||||
type: vlan
|
||||
firewall: trusted
|
||||
dhcp: false
|
||||
addresses:
|
||||
- 192.168.42.20/24
|
||||
@ -97,6 +108,7 @@ en1:
|
||||
skylab_cluster:
|
||||
address: 10.42.101.12/24
|
||||
interface: bond0
|
||||
skylab_datastore_device: sdb
|
||||
skylab_networking:
|
||||
eno1:
|
||||
bond: bond0
|
||||
@ -104,6 +116,7 @@ en1:
|
||||
bond: bond0
|
||||
bond0:
|
||||
type: bond
|
||||
firewall: internal
|
||||
gateway: 10.42.101.1/24
|
||||
dns:
|
||||
- 10.42.101.1
|
||||
@ -113,6 +126,7 @@ en1:
|
||||
dhcp: false
|
||||
bond0.99:
|
||||
type: vlan
|
||||
firewall: trusted
|
||||
addresses:
|
||||
- 192.168.42.30/24
|
||||
dhcp: false
|
||||
|
@ -21,3 +21,5 @@
|
||||
roles:
|
||||
- role: server
|
||||
when: "'server' in skylab_roles | default([])"
|
||||
- role: datastore
|
||||
when: "'datastore' in skylab_roles | default([])"
|
||||
|
67
roles/datastore/meta/argument_specs.yaml
Normal file
67
roles/datastore/meta/argument_specs.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
description: >-
|
||||
This role makes several assumptions about the local storage configuration of the server:
|
||||
|
||||
1. There is one block device on the server that will be used for data storage
|
||||
2. That block device will be joined to a glusterfs volume
|
||||
3. The block device is encrypted with LUKS
|
||||
|
||||
This role mostly serves to perform housekeeping tasks and validation of expected configs.
|
||||
Automating disk configuration seems like a really good way to lose all my data, so I decided
|
||||
to leave that to the much more reliable manual configuration for the time being.
|
||||
|
||||
To that end, here is a quick cheatsheet of commands that might be useful in setting up
|
||||
storage device(s) for this role (replace `DEVICE` with the block device for storage):
|
||||
|
||||
```bash
|
||||
# Encrypt a block device, provide encryption key when prompted
|
||||
cryptsetup luksFormat --type luks2 /dev/DEVICE
|
||||
|
||||
# Unlock encrypted block device and mount under a mapper
|
||||
cryptsetup luksOpen /dev/DEVICE LABEL
|
||||
|
||||
# Lock an encrypted block device
|
||||
cryptsetup luksClose LABEL
|
||||
|
||||
# Create and format a partition on the encrypted block device
|
||||
mkfs.xfs /dev/mapper/LABEL -L LABEL
|
||||
|
||||
# Run from an existing server already in the gluster pool
|
||||
# Add server to the gluster pool
|
||||
gluster peer probe HOSTNAME
|
||||
|
||||
# To replace a brick from an already offline'd node, the old brick first needs to be force
|
||||
# removed, replication reduced, and (if arbiter is enabled) any arbiter nodes removed
|
||||
#
|
||||
# Remove arbiter brick
|
||||
gluster volume remove-brick VOLUME replica 2 HOSTNAME:/EXPORT force
|
||||
# Remove dead data brick
|
||||
gluster volume remove-brick VOLUME replica 1 HOSTNAME:/EXPORT force
|
||||
# Remove dead node
|
||||
gluster peer detach HOSTNAME
|
||||
# Add new data brick
|
||||
gluster volume add-brick VOLUME replica 2 HOSTNAME:/EXPORT start
|
||||
#
|
||||
# To re-add the arbiter you might need to clean up the `.glusterfs` directory and remove
|
||||
# directory parametes from the old brick. These next commands need to be run on the host
|
||||
# with the arbiter brick physically attached
|
||||
#
|
||||
rm -rf /EXPORT/.glusterfs
|
||||
setfattr -x trusted.gfid /EXPORT
|
||||
setfattr -x trusted.glusterfs.volume-id /EXPORT
|
||||
# Re-add arbiter brick
|
||||
gluster volume add-brick VOLUME replica 3 arbiter 1 HOSTNAME:/EXPORT
|
||||
# Trigger a resync
|
||||
gluster volume heal datastore
|
||||
|
||||
# General gluster debug info
|
||||
gluster volume info VOLUME
|
||||
gluster volume status VOLUME
|
||||
```
|
||||
options:
|
||||
skylab_datastore_device:
|
||||
description: The block device under `/dev/` that should be configured as datastore storage
|
||||
type: str
|
||||
required: true
|
45
roles/datastore/tasks/gluster.yaml
Normal file
45
roles/datastore/tasks/gluster.yaml
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
- name: Allow gluster through firewall
|
||||
become: true
|
||||
ansible.posix.firewalld:
|
||||
service: glusterfs
|
||||
state: enabled
|
||||
zone: trusted
|
||||
immediate: true
|
||||
permanent: true
|
||||
|
||||
- name: Create datastore directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /mnt/brick/datastore
|
||||
state: directory
|
||||
|
||||
- name: Fetch peer status
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: gluster peer status
|
||||
changed_when: false
|
||||
register: _gluster_peer_status_raw
|
||||
|
||||
- name: Check peer status
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- not _gluster_peer_status_raw.stdout_lines[0].strip().endswith('0')
|
||||
fail_msg: >-
|
||||
ERROR: Datastore host '{{ inventory_hostname }}' is not joined to the gluster pool. Run the
|
||||
command 'gluster peer probe {{ inventory_hostname }}.local' from another datastore host to
|
||||
add it.
|
||||
success_msg: >-
|
||||
Datastore host {{ inventory_hostname }} is joined to the gluster pool
|
||||
|
||||
- name: Mount gluster volume
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: /mnt/datastore
|
||||
src: localhost:/datastore
|
||||
state: mounted
|
||||
fstype: glusterfs
|
||||
# Note that this just needs to be any path *other* than the actual
|
||||
# fstab. This is done just to prevent the devices from being
|
||||
# automatically mounted at boot
|
||||
fstab: "{{ skylab_state_dir }}/mounts"
|
9
roles/datastore/tasks/main.yaml
Normal file
9
roles/datastore/tasks/main.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Install datastore packages
|
||||
ansible.builtin.import_tasks: packages.yaml
|
||||
|
||||
- name: Configure mounting
|
||||
ansible.builtin.import_tasks: mounts.yaml
|
||||
|
||||
- name: Configure glusterfs
|
||||
ansible.builtin.import_tasks: gluster.yaml
|
108
roles/datastore/tasks/mounts.yaml
Normal file
108
roles/datastore/tasks/mounts.yaml
Normal file
@ -0,0 +1,108 @@
|
||||
---
|
||||
- name: Create mount points
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: "{{ ansible_user }}"
|
||||
loop:
|
||||
- /mnt/datastore
|
||||
- /mnt/brick
|
||||
|
||||
- name: Determine current mounts
|
||||
vars:
|
||||
_current_mounts: []
|
||||
ansible.builtin.set_fact:
|
||||
_current_mounts: "{{ _current_mounts + [item.mount] }}"
|
||||
loop: "{{ ansible_mounts }}"
|
||||
loop_control:
|
||||
label: "{{ item.mount }}"
|
||||
|
||||
- name: Ensure mount points are empty when unmounted
|
||||
when: item not in _current_mounts
|
||||
ansible.builtin.command:
|
||||
cmd: "/usr/bin/ls {{ item }}"
|
||||
changed_when: false
|
||||
failed_when: _mountpoint_ls_raw.stdout
|
||||
register: _mountpoint_ls_raw
|
||||
loop:
|
||||
- /mnt/datastore
|
||||
- /mnt/brick
|
||||
|
||||
- name: Fetch block device information
|
||||
ansible.builtin.command:
|
||||
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
|
||||
changed_when: false
|
||||
register: _lsblk_info_raw
|
||||
|
||||
- name: Process block device information
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
|
||||
|
||||
- name: Check state of the datastore device
|
||||
ansible.builtin.assert:
|
||||
that: _datastore_device_info.fstype == "crypto_LUKS"
|
||||
fail_msg: >-
|
||||
ERROR: Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }}
|
||||
must be LUKS encrypted
|
||||
success_msg: >-
|
||||
Datastore block device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is
|
||||
LUKS encrypted
|
||||
|
||||
- name: Determine whether datastore block is decrypted
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_is_decrypted: "{{ _datastore_device_info.children is defined }}"
|
||||
|
||||
- name: Decrypt datastore block
|
||||
when: not _datastore_device_is_decrypted
|
||||
block:
|
||||
- name: Prompt for decryption key
|
||||
no_log: true
|
||||
ansible.builtin.pause:
|
||||
prompt: >-
|
||||
Datastore device {{ inventory_hostname }}:/dev/{{ skylab_datastore_device }} is not
|
||||
decrypted. Enter decryption passphrase to continue GlusterFS brick configuration
|
||||
echo: false
|
||||
register: _luks_decryption_key
|
||||
|
||||
- name: Open LUKS device
|
||||
become: true
|
||||
community.crypto.luks_device:
|
||||
device: /dev/{{ skylab_datastore_device }}
|
||||
state: opened
|
||||
name: brick
|
||||
passphrase: "{{ _luks_decryption_key.user_input }}"
|
||||
|
||||
- name: Fetch updated block device information
|
||||
ansible.builtin.command:
|
||||
cmd: lsblk /dev/{{ skylab_datastore_device }} --fs --json
|
||||
changed_when: false
|
||||
register: _lsblk_info_raw
|
||||
|
||||
- name: Process updated block device information
|
||||
ansible.builtin.set_fact:
|
||||
_datastore_device_info: "{{ (_lsblk_info_raw.stdout | from_json).blockdevices[0] }}"
|
||||
|
||||
- name: Create dummy fstab
|
||||
ansible.builtin.file:
|
||||
state: touch
|
||||
path: "{{ skylab_state_dir }}/mounts"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
||||
access_time: preserve
|
||||
modification_time: preserve
|
||||
|
||||
- name: Mount datastore block
|
||||
become: true
|
||||
ansible.posix.mount:
|
||||
path: /mnt/brick
|
||||
src: UUID={{ _datastore_device_info.children[0].uuid }}
|
||||
state: mounted
|
||||
fstype: "{{ _datastore_device_info.children[0].fstype }}"
|
||||
# Note that this just needs to be any path *other* than the actual
|
||||
# fstab. This is done just to prevent the devices from being
|
||||
# automatically mounted at boot
|
||||
fstab: "{{ skylab_state_dir }}/mounts"
|
31
roles/datastore/tasks/packages.yaml
Normal file
31
roles/datastore/tasks/packages.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Install gluster repository
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
name: centos-release-gluster9
|
||||
state: present
|
||||
register: _datastore_repo_gluster
|
||||
|
||||
- name: Enable required repositories
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/yum.repos.d/{{ item }}.repo
|
||||
line: enabled=1
|
||||
state: present
|
||||
regexp: "#?enabled=(0|1)"
|
||||
loop:
|
||||
- Rocky-AppStream
|
||||
- Rocky-PowerTools
|
||||
register: _datastore_repo_powertools
|
||||
|
||||
- name: Install datastore packages
|
||||
become: true
|
||||
when: ansible_distribution == "Rocky"
|
||||
ansible.builtin.dnf:
|
||||
state: present
|
||||
update_cache: "{{ _datastore_repo_powertools.changed or _datastore_repo_gluster.changed }}"
|
||||
name:
|
||||
- cryptsetup-luks
|
||||
- glusterfs
|
||||
- glusterfs-fuse
|
||||
- glusterfs-server
|
29
roles/server/tasks/firewalld.yaml
Normal file
29
roles/server/tasks/firewalld.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Enable systemd-firewalld
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: firewalld
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Configure firewall interface zones
|
||||
become: true
|
||||
when: item.value.firewall is defined
|
||||
ansible.posix.firewalld:
|
||||
interface: "{{ item.key }}"
|
||||
zone: "{{ item.value.firewall }}"
|
||||
state: enabled
|
||||
permanent: true
|
||||
immediate: true
|
||||
loop: "{{ skylab_networking | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
|
||||
- name: Configure firewall for docker interface
|
||||
become: true
|
||||
when: "'docker0' in ansible_interfaces"
|
||||
ansible.posix.firewalld:
|
||||
interface: docker0
|
||||
zone: dmz
|
||||
permanent: true
|
||||
immediate: true
|
34
roles/server/tasks/hosts.yaml
Normal file
34
roles/server/tasks/hosts.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Retrieve current hostsfile contents
|
||||
ansible.builtin.command:
|
||||
cmd: cat /etc/hosts
|
||||
changed_when: false
|
||||
register: _existing_hostsfile_raw
|
||||
|
||||
- name: Assemble hostsfile lines
|
||||
vars:
|
||||
_hostsfile_lines: []
|
||||
ansible.builtin.set_fact:
|
||||
_hostsfile_lines: "{{ _hostsfile_lines + [item.address + ' ' + item.hostname] }}"
|
||||
loop: "{{ skylab_direct_peers }}"
|
||||
loop_control:
|
||||
label: "{{ item.hostname }}"
|
||||
|
||||
- name: Configure directly connected peers
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
line: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ _hostsfile_lines }}"
|
||||
loop_control:
|
||||
label: "{{ item.partition(' ')[0] }}"
|
||||
|
||||
- name: Remove unmanaged peer aliases
|
||||
become: true
|
||||
when: "'localhost' not in item and item not in _hostsfile_lines"
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
line: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ _existing_hostsfile_raw.stdout_lines }}"
|
@ -8,3 +8,11 @@
|
||||
- name: Configure network settings
|
||||
when: skylab_networking is defined
|
||||
ansible.builtin.include_tasks: networkd.yaml
|
||||
|
||||
- name: Configure firewall settings
|
||||
when: skylab_networking is defined
|
||||
ansible.builtin.include_tasks: firewalld.yaml
|
||||
|
||||
- name: Configure hostsfile
|
||||
when: skylab_direct_peers is defined
|
||||
ansible.builtin.include_tasks: hosts.yaml
|
||||
|
@ -1,11 +1,11 @@
|
||||
|
||||
/######## /## /## /## /## /## /####### /#####
|
||||
/## /##___/## /##__/## /## /##___/## /##__/##
|
||||
/##____ /####### /###### /## /######## /######
|
||||
\####### /## |## |## /## /## /## /##__/##
|
||||
/## /## |## \### /## /## /## /######
|
||||
___/## /##_____
|
||||
/##### ``````````````````` /######## ``````````````
|
||||
/####### /## /## /## /## /## /####### /#####
|
||||
/## /##___/## /##__/## /## /##___/## /##__/##
|
||||
/##____ /####### /###### /## /######## /######
|
||||
######## /## |## /## /## /## /## /##__/##
|
||||
/## /## |## \### /## /## /## /######
|
||||
____/## /##_____
|
||||
/###### ```````````````````` /######## ``````````````
|
||||
|
||||
✨ {{ skylab_description }} @{{ skylab_location }}
|
||||
{{ ' ' }}
|
||||
|
@ -9,6 +9,12 @@
|
||||
- python3-policycoreutils
|
||||
- python3-firewall
|
||||
|
||||
- name: Remove legacy state directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /var/run/skylab
|
||||
state: absent
|
||||
|
||||
- name: Create state directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
|
Reference in New Issue
Block a user