---
|
|
- hosts: localhost
|
|
connection: local
|
|
gather_facts: False
|
|
|
|
####################
|
|
#
|
|
# run with
|
|
# ANSIBLE_HOST_KEY_CHECKING=False AWS_PROFILE=myaws ap aws.yml --ssh-common-args="-i /home/m/.ssh/myaws.pem"
|
|
#
|
|
####################
|
|
|
|
vars:
|
|
region: eu-west-1
|
|
VPC: vpc-c732c7a3
|
|
|
|
tasks:
|
|
- name: rules for my aws security group
|
|
ec2_group:
|
|
name: myaws
|
|
description: allow incomming traffic only from hetzner.osuv.de
|
|
region: "{{ region }}"
|
|
state: present
|
|
vpc_id: "{{ VPC }}"
|
|
rules:
|
|
- proto: all
|
|
rule_desc: hetzner.osuv.de
|
|
cidr_ip: "{{ lookup('dig', 'hetzner.osuv.de') }}/32"
|
|
- proto: all
|
|
rule_desc: internal traffic
|
|
cidr_ip: "172.0.0.0/8"
|
|
tags:
|
|
Name: myaws
|
|
|
|
- name: add workstation
|
|
ec2:
|
|
region: "{{ region }}"
|
|
keypair: myaws
|
|
group: myaws
|
|
instance_type: t3a.small
|
|
image: ami-0035184034468cd86 # fedora 30
|
|
wait: yes
|
|
vpc_subnet_id: subnet-41d30025 # eu-west-1c
|
|
assign_public_ip: yes
|
|
count: 1
|
|
instance_tags:
|
|
Name: ws
|
|
register: ws
|
|
|
|
- name: create 3 spot instances
|
|
ec2:
|
|
region: "{{ region }}"
|
|
spot_price: 0.0036
|
|
spot_wait_timeout: 60
|
|
keypair: myaws
|
|
group: myaws
|
|
instance_type: t3a.micro
|
|
image: ami-0035184034468cd86 # fedora 30
|
|
wait: yes
|
|
vpc_subnet_id: subnet-41d30025 # eu-west-1c
|
|
assign_public_ip: yes
|
|
count: 3
|
|
instance_tags:
|
|
Name: spot
|
|
register: ec2
|
|
|
|
- name: Wait for ws SSH to come up
|
|
wait_for:
|
|
host: "{{ item.public_ip }}"
|
|
port: 22
|
|
state: started
|
|
with_items: "{{ ws.instances }}"
|
|
|
|
- name: Wait for spot SSH to come up
|
|
wait_for:
|
|
host: "{{ item.public_ip }}"
|
|
port: 22
|
|
state: started
|
|
with_items: "{{ ec2.instances }}"
|
|
|
|
- name: add all instances to launched host group
|
|
add_host:
|
|
hostname: "{{ item.public_ip }}"
|
|
groupname: launched
|
|
with_items: "{{ ec2.instances }}"
|
|
|
|
- name: add ws instances to launched host group
|
|
add_host:
|
|
hostname: "{{ item.public_ip }}"
|
|
groupname: ws
|
|
with_items: "{{ ws.instances }}"
|
|
|
|
- name: add one spot instance to the seed host group
|
|
add_host:
|
|
hostname: "{{ ec2.instances[0].public_ip }}"
|
|
groupname: seed
|
|
|
|
- name: copy private ips for 2nd play
|
|
set_fact:
|
|
private_ips: "{{ private_ips | default([]) + [item.private_ip] }}"
|
|
with_items: "{{ ec2.instances }}"
|
|
|
|
- name: create glustefs ebs volumes
|
|
ec2_vol:
|
|
region: "{{ region }}"
|
|
instance: "{{ item.id }}"
|
|
volume_size: 5
|
|
device_name: /dev/sdf
|
|
with_items: "{{ ec2.instances }}"
|
|
|
|
############
|
|
#
|
|
# configure all
|
|
# spot instances
|
|
#
|
|
############
|
|
- name: Configure all instances
|
|
hosts: launched
|
|
become: True
|
|
user: fedora
|
|
gather_facts: False
|
|
|
|
tasks:
|
|
- name: bootstrap ansible usage by checking availabilty of python
|
|
raw: test -e /usr/bin/python || (ln -s /usr/bin/python3 /usr/bin/python)
|
|
|
|
- name: Create a xfs filesystem on /dev/nvme1n1
|
|
filesystem:
|
|
fstype: xfs
|
|
dev: /dev/nvme1n1
|
|
|
|
- name: Mount up xfs device
|
|
mount:
|
|
path: /mnt
|
|
src: /dev/nvme1n1
|
|
fstype: xfs
|
|
opts: noatime
|
|
state: present
|
|
|
|
- name: install glusterfs
|
|
dnf:
|
|
name: glusterfs-server
|
|
state: present
|
|
update_cache: yes
|
|
|
|
- name: start and enable glusterfs
|
|
systemd:
|
|
name: glusterd
|
|
state: started
|
|
enabled: yes
|
|
|
|
- name: Create directory on xfs mount ebs volume
|
|
file:
|
|
path: /mnt/vol1
|
|
state: directory
|
|
|
|
#############
|
|
#
|
|
# configure one
|
|
# spot instance as seed
|
|
#############
|
|
- name: setup gluster cluster
|
|
hosts: seed
|
|
become: True
|
|
user: fedora
|
|
gather_facts: False
|
|
|
|
tasks:
|
|
- name: Create a trusted storage pool
|
|
gluster_peer:
|
|
state: present
|
|
nodes: "{{ hostvars['localhost']['private_ips'] }}"
|
|
|
|
- name: create gluster volume
|
|
gluster_volume:
|
|
state: present
|
|
name: test1
|
|
bricks: /mnt/vol1
|
|
replicas: 3
|
|
force: yes
|
|
cluster: "{{ hostvars['localhost']['private_ips'] }}"
|
|
options:
|
|
{
|
|
performance.cache-size: 128MB,
|
|
write-behind: "off",
|
|
quick-read: "on",
|
|
}
|
|
run_once: true
|
|
|
|
- name: start gluster volume
|
|
gluster_volume:
|
|
state: started
|
|
name: test1
|
|
|
|
#############
|
|
#
|
|
# configure ws
|
|
#############
|
|
- name: setup ws
|
|
hosts: ws
|
|
become: True
|
|
user: fedora
|
|
gather_facts: False
|
|
|
|
tasks:
|
|
- name: bootstrap ansible usage by checking availabilty of python
|
|
raw: test -e /usr/bin/python || (ln -s /usr/bin/python3 /usr/bin/python)
|
|
|
|
- name: install packages
|
|
become: True
|
|
yum:
|
|
name: "{{ packages }}"
|
|
state: installed
|
|
vars:
|
|
packages:
|
|
- htop
|
|
- nano
|
|
- nmap
|
|
- screen
|
|
- git
|
|
- lsof
|
|
- docker
|
|
- ncdu
|
|
- nfs-utils
|
|
- glustefs-client
|
|
|
|
- name: install python packages
|
|
become: True
|
|
pip:
|
|
name: "{{ packages }}"
|
|
executable: pip-3.7
|
|
vars:
|
|
packages:
|
|
- ansible
|
|
- boto3
|
|
- boto
|
|
- awslogs
|
|
- mycli
|
|
- docker-py
|
|
|
|
- name: create directory for code-server
|
|
file:
|
|
path: /home/fedora/code-server
|
|
state: directory
|
|
|
|
- name: Download code server
|
|
unarchive:
|
|
src: https://github.com/cdr/code-server/releases/download/1.939-vsc1.33.1/code-server1.939-vsc1.33.1-linux-x64.tar.gz
|
|
dest: /home/fedora/code-server
|
|
remote_src: yes
|
|
# - name: apply code-server start on reboot
|
|
# cron:
|
|
# name: "apply code-server start on reboot"
|
|
# special_time: reboot
|
|
# job: "cd /home/fedora/code-server/code-server1.939-vsc1.33.1-linux-x64; nohup ./code-server /home/fedora/ --password=password </dev/null >/dev/null 2>&1 &"
|
|
# mount -t glusterfs 172.31.4.217:/test1 /oi
|
|
|
|
# Shrink and increase
|
|
|
|
# remove brick and change decr replica
|
|
# sudo gluster volume remove-brick test1 replica 2 172.31.5.97:/mnt/vol1 force
|
|
# detach peer
|
|
# sudo gluster peer detach 172.31.5.97
|
|
# add new peer
|
|
# sudo gluster peer probe 172.31.5.97
|
|
# add brick to new paar and incr replica
|
|
# sudo gluster volume add-brick test1 replica 3 172.31.5.97:/mnt/vol1 force
|