You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

aws.yml 6.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. ---
  2. - hosts: localhost
  3. connection: local
  4. gather_facts: False
  5. ####################
  6. #
  7. # run with
  8. # ANSIBLE_HOST_KEY_CHECKING=False AWS_PROFILE=myaws ap aws.yml --ssh-common-args="-i /home/m/.ssh/myaws.pem"
  9. #
  10. ####################
  11. vars:
  12. region: eu-west-1
  13. VPC: vpc-c732c7a3
  14. tasks:
  15. - name: rules for my aws security group
  16. ec2_group:
  17. name: myaws
  18. description: allow incomming traffic only from hetzner.osuv.de
  19. region: "{{ region }}"
  20. state: present
  21. vpc_id: "{{ VPC }}"
  22. rules:
  23. - proto: all
  24. rule_desc: hetzner.osuv.de
  25. cidr_ip: "{{ lookup('dig', 'hetzner.osuv.de') }}/32"
  26. - proto: all
  27. rule_desc: internal traffic
  28. cidr_ip: "172.0.0.0/8"
  29. tags:
  30. Name: myaws
  31. - name: add workstation
  32. ec2:
  33. region: "{{ region }}"
  34. keypair: myaws
  35. group: myaws
  36. instance_type: t3a.small
  37. image: ami-0035184034468cd86 # fedora 30
  38. wait: yes
  39. vpc_subnet_id: subnet-41d30025 # eu-west-1c
  40. assign_public_ip: yes
  41. count: 1
  42. instance_tags:
  43. Name: ws
  44. register: ws
  45. - name: create 3 spot instances
  46. ec2:
  47. region: "{{ region }}"
  48. spot_price: 0.0036
  49. spot_wait_timeout: 60
  50. keypair: myaws
  51. group: myaws
  52. instance_type: t3a.micro
  53. image: ami-0035184034468cd86 # fedora 30
  54. wait: yes
  55. vpc_subnet_id: subnet-41d30025 # eu-west-1c
  56. assign_public_ip: yes
  57. count: 3
  58. instance_tags:
  59. Name: spot
  60. register: ec2
  61. - name: Wait for ws SSH to come up
  62. wait_for:
  63. host: "{{ item.public_ip }}"
  64. port: 22
  65. state: started
  66. with_items: "{{ ws.instances }}"
  67. - name: Wait for spot SSH to come up
  68. wait_for:
  69. host: "{{ item.public_ip }}"
  70. port: 22
  71. state: started
  72. with_items: "{{ ec2.instances }}"
  73. - name: add all instances to launched host group
  74. add_host:
  75. hostname: "{{ item.public_ip }}"
  76. groupname: launched
  77. with_items: "{{ ec2.instances }}"
  78. - name: add ws instances to launched host group
  79. add_host:
  80. hostname: "{{ item.public_ip }}"
  81. groupname: ws
  82. with_items: "{{ ws.instances }}"
  83. - name: add one spot instance to the seed host group
  84. add_host:
  85. hostname: "{{ ec2.instances[0].public_ip }}"
  86. groupname: seed
  87. - name: copy private ips for 2nd play
  88. set_fact:
  89. private_ips: "{{ private_ips | default([]) + [item.private_ip] }}"
  90. with_items: "{{ ec2.instances }}"
  91. - name: create glustefs ebs volumes
  92. ec2_vol:
  93. region: "{{ region }}"
  94. instance: "{{ item.id }}"
  95. volume_size: 5
  96. device_name: /dev/sdf
  97. with_items: "{{ ec2.instances }}"
  98. ############
  99. #
  100. # configure all
  101. # spot instances
  102. #
  103. ############
  104. - name: Configure all instances
  105. hosts: launched
  106. become: True
  107. user: fedora
  108. gather_facts: False
  109. tasks:
  110. - name: bootstrap ansible usage by checking availabilty of python
  111. raw: test -e /usr/bin/python || (ln -s /usr/bin/python3 /usr/bin/python)
  112. - name: Create a xfs filesystem on /dev/nvme1n1
  113. filesystem:
  114. fstype: xfs
  115. dev: /dev/nvme1n1
  116. - name: Mount up xfs device
  117. mount:
  118. path: /mnt
  119. src: /dev/nvme1n1
  120. fstype: xfs
  121. opts: noatime
  122. state: present
  123. - name: install glusterfs
  124. dnf:
  125. name: glusterfs-server
  126. state: present
  127. update_cache: yes
  128. - name: start and enable glusterfs
  129. systemd:
  130. name: glusterd
  131. state: started
  132. enabled: yes
  133. - name: Create directory on xfs mount ebs volume
  134. file:
  135. path: /mnt/vol1
  136. state: directory
  137. #############
  138. #
  139. # configure one
  140. # spot instance as seed
  141. #############
  142. - name: setup gluster cluster
  143. hosts: seed
  144. become: True
  145. user: fedora
  146. gather_facts: False
  147. tasks:
  148. - name: Create a trusted storage pool
  149. gluster_peer:
  150. state: present
  151. nodes: "{{ hostvars['localhost']['private_ips'] }}"
  152. - name: create gluster volume
  153. gluster_volume:
  154. state: present
  155. name: test1
  156. bricks: /mnt/vol1
  157. replicas: 3
  158. force: yes
  159. cluster: "{{ hostvars['localhost']['private_ips'] }}"
  160. options:
  161. {
  162. performance.cache-size: 128MB,
  163. write-behind: "off",
  164. quick-read: "on",
  165. }
  166. run_once: true
  167. - name: start gluster volume
  168. gluster_volume:
  169. state: started
  170. name: test1
  171. #############
  172. #
  173. # configure ws
  174. #############
  175. - name: setup ws
  176. hosts: ws
  177. become: True
  178. user: fedora
  179. gather_facts: False
  180. tasks:
  181. - name: bootstrap ansible usage by checking availabilty of python
  182. raw: test -e /usr/bin/python || (ln -s /usr/bin/python3 /usr/bin/python)
  183. - name: install packages
  184. become: True
  185. yum:
  186. name: "{{ packages }}"
  187. state: installed
  188. vars:
  189. packages:
  190. - htop
  191. - nano
  192. - nmap
  193. - screen
  194. - git
  195. - lsof
  196. - docker
  197. - ncdu
  198. - nfs-utils
  199. - glustefs-client
  200. - name: install python packages
  201. become: True
  202. pip:
  203. name: "{{ packages }}"
  204. executable: pip-3.7
  205. vars:
  206. packages:
  207. - ansible
  208. - boto3
  209. - boto
  210. - awslogs
  211. - mycli
  212. - docker-py
  213. - name: create directory for code-server
  214. file:
  215. path: /home/fedora/code-server
  216. state: directory
  217. - name: Download code server
  218. unarchive:
  219. src: https://github.com/cdr/code-server/releases/download/1.939-vsc1.33.1/code-server1.939-vsc1.33.1-linux-x64.tar.gz
  220. dest: /home/fedora/code-server
  221. remote_src: yes
  222. # - name: apply code-server start on reboot
  223. # cron:
  224. # name: "apply code-server start on reboot"
  225. # special_time: reboot
  226. # job: "cd /home/fedora/code-server/code-server1.939-vsc1.33.1-linux-x64; nohup ./code-server /home/fedora/ --password=password </dev/null >/dev/null 2>&1 &"
  227. # mount -t glusterfs 172.31.4.217:/test1 /oi
  228. # Shrink and increase
  229. # remove brick and change decr replica
  230. # sudo gluster volume remove-brick test1 replica 2 172.31.5.97:/mnt/vol1 force
  231. # detach peer
  232. # sudo gluster peer detach 172.31.5.97
  233. # add new peer
  234. # sudo gluster peer probe 172.31.5.97
  235. # add brick to new paar and incr replica
  236. # sudo gluster volume add-brick test1 replica 3 172.31.5.97:/mnt/vol1 force