Clean up old roles.

master
josiah 2 years ago
parent fe8c075819
commit 116b28a80f

@ -1,25 +0,0 @@
- name: Deploy Arke
hosts: dockerhosts
remote_user: josiah
tasks:
- name: Get hostname for troubleshooting
command: hostname
register: hostname
- debug: msg="{{ hostname.stdout }}"
- name: Pull latest version from github
shell: cd /home/josiah/arke/ && git pull
- name: Build from the latest version
shell: cd ~/arke/ && docker build -t arke -f dockerfile .
- name: Check if there are any existing arke containers running
shell: docker ps | grep 'arke' | awk '{print $1}'
register: result
- name: and kill it
# this has to be a shell because command: isn't interactive
shell: docker ps | grep 'arke' | awk '{print $1}' | xargs docker kill
when: result.stdout != ""
- name: Remove any existing containers using the arke name, its mine and i must have it
# this has to be a shell because command: isn't interactive
shell: docker container rm arke
- name: Run the newly built docker container
command: docker run -d --name arke --mount source=towervol,target=/shared/ arke:latest

@ -1,19 +0,0 @@
---
- name: Ensure the apps/gitea directory exists
file: state=directory path=/home/josiah/apps/gitea/ owner=josiah group=josiah mode=0700
- name: copy over mediaserver config files
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0777
with_items:
- {src: 'gitea-compose.yml', dest: '/home/josiah/apps/gitea/docker-compose.yml'}
- name: Deploy gitea stack
docker_compose:
state: present
# name: gitea
project_src: "/home/josiah/apps/gitea/"

@ -1,39 +0,0 @@
version: "3"
networks:
gitea:
external: false
services:
gitea-server:
image: gitea/gitea:latest
environment:
- USER_UID=1000
- USER_GID=1000
- DB_TYPE=postgres
- DB_HOST=db:5432
- DB_NAME={{ GITEA_DB_NAME }}
- DB_USER={{ GITEA_DB_USER }}
- DB_PASSWD={{ GITEA_DB_PASSWD }}
restart: always
networks:
- gitea
volumes:
- /mnt/volume_sfo2_01/gitea:/data
ports:
- "3000:3000"
- "222:22"
depends_on:
- db
gitea-db:
image: postgres:9.6
restart: always
environment:
- POSTGRES_USER={{ GITEA_POSTGRES_USER }}
- POSTGRES_PASSWORD={{ GITEA_POSTGRES_USER }}
- POSTGRES_DB={{ GITEA_POSTGRES_USER }}
networks:
- gitea
volumes:
- /mnt/volume_sfo2_01/psql:/var/lib/postgresql/data

@ -1,42 +0,0 @@
- name: setup mojo repo
hosts: prod
tasks:
- name: print hostname
command: hostname
register: hostname
- debug: msg="{{ hostname.stdout }}"
- name: try to update directory
shell: cd /home/josiah/mojojojo-bot && git pull
register: result
ignore_errors: True
- name: if the folder doesn't exist, clone from remote
shell: cd /home/josiah/ && git clone http://git.awful.club/hosted/mojojojo-bot.git
when: result is failed
- name: Deploy mojo webclient
hosts: prod
vars_files:
- vault-vars-mojo.yml
tasks:
- name: Build from the latest version
shell: cd /home/josiah/mojojojo-bot/mojo-web && docker build -t mojo-web -f dockerfile .
- name: Check if there are any existing mojo-web containers running and kill them
# this has to be a shell because command: isn't interactive
shell: docker ps | grep 'mojo-web' | awk '{print $1}' | xargs docker kill
ignore_errors: True
- name: Run the newly built docker container
command: docker container run -e "SLACK_BOT_TOKEN={{ VAULT_SLACK_BOT_TOKEN }}" --restart "always" -d --mount type="bind",source="/mnt/volume_sfo2_znc",target=/shared/ mojo-web
- name: Deploy mojo rtmclient
hosts: prod
vars_files:
- vault-vars-mojo.yml
tasks:
- name: Build from the latest version
shell: cd /home/josiah/mojojojo-bot/mojo-rtm && docker build -t mojo-rtm -f dockerfile .
- name: Check if there are any existing mojo-rtm containers running and kill them
# this has to be a shell because command: isn't interactive
shell: docker ps | grep 'mojo-rtm' | awk '{print $1}' | xargs docker kill
ignore_errors: True
- name: Run the newly built docker container
command: docker container run -e "SLACK_BOT_TOKEN={{ VAULT_SLACK_BOT_TOKEN }}" --restart "always" -d --mount type="bind",source="/mnt/volume_sfo2_znc",target=/shared/ mojo-rtm

@ -1,18 +0,0 @@
---
- name: Ensure the apps/nextcloud directory exists
file: state=directory path=/home/josiah/apps/nextcloud/ owner=josiah group=josiah mode=0700
- name: copy over nextcloud config files
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0777
with_items:
- {src: 'nextcloud-compose.yml', dest: '/home/josiah/apps/nextcloud/docker-compose.yml'}
- name: Deploy the nextcloud stack.
docker_compose:
state: present
project_src: "/home/josiah/apps/nextcloud/"

@ -1,45 +0,0 @@
version: '3'
volumes:
nextcloud:
db:
services:
nextcloud-db:
image: postgres:9.6
ports:
- 5432:5432
restart: always
volumes:
- /mnt/shared_document_store/nextcloud/sql:/var/lib/postgresql
environment:
- USER_UID=1000
- USER_GID=1000
- POSTGRES_ROOT_PASSWORD={{ NEXTCLOUD_POSTGRES_ROOT_PASSWORD }}
- POSTGRES_PASSWORD={{ NEXTCLOUD_POSTGRES_PASSWORD }}
- POSTGRES_DB={{ NEXTCLOUD_POSTGRES_DATABASE }}
- POSTGRES_USER={{ NEXTCLOUD_POSTGRES_USER }}
- POSTGRES_HOST={{ NEXTCLOUD_POSTGRES_HOST }}
- NEXTCLOUD_ADMIN_USER={{ NEXTCLOUD_ADMIN_USER }}
- NEXTCLOUD_ADMIN_PASSWORD={{ NEXTCLOUD_ADMIN_PASSWORD }}
nextcloud-app:
image: nextcloud:18
ports:
- 8080:80
links:
- db
volumes:
- /mnt/shared_document_store/nextcloud/html/:/var/www/html/
depends_on:
- db
environment:
- USER_UID=1000
- USER_GID=1000
- POSTGRES_ROOT_PASSWORD={{ NEXTCLOUD_POSTGRES_ROOT_PASSWORD }}
- POSTGRES_PASSWORD={{ NEXTCLOUD_POSTGRES_PASSWORD }}
- POSTGRES_DB={{ NEXTCLOUD_POSTGRES_DATABASE }}
- POSTGRES_USER={{ NEXTCLOUD_POSTGRES_USER }}
- POSTGRES_HOST={{ NEXTCLOUD_POSTGRES_HOST }}
- NEXTCLOUD_ADMIN_USER={{ NEXTCLOUD_ADMIN_USER }}
- NEXTCLOUD_ADMIN_PASSWORD={{ NEXTCLOUD_ADMIN_PASSWORD }}

@ -1,18 +0,0 @@
- name: Copy splunk modules to corp deploy server
hosts: corp
# vars_files:
# - mojo-vars-vault.yml
remote_user: josiah_ledbetter
tasks:
- name: Copy item to remote server
copy:
# make sure the permissions of the file you are copying are readable by ansible user
src: /Users/josiah_ledbetter/Documents/projects/splunk/configs/filething/
# src must not end in "/" or it will only copy files inside dir instead of entire dir
dest: /opt/splunk/temp/
owner: splunk
group: splunk
# these below affect the entire task, and should not go under the command's indention level. This took WAY too longer to figure out.
become: "true"
become_method: sudo
become_user: root

@ -1,8 +0,0 @@
all:
children:
corp:
hosts:
AUS01GMSPLUNK01:
vdc:
hosts:
v1-cs-sec-splunk01:

@ -1,17 +0,0 @@
---
- name: copy over test compose file
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: 0777
with_items:
- {src: 'test-compose.yml', dest: '/home/josiah/apps/test/docker-compose.yml'}
- {src: 'traefik.yml', dest: '/home/josiah/apps/test/traefik.yml'}
# - {src: 'traefik.toml', dest: '/home/josiah/apps/test/traefik.toml'}
- name: Deploy awful stack
docker_stack:
state: present
name: test
compose:
- /home/josiah/apps/test/docker-compose.yml

@ -1,26 +0,0 @@
version: "3.3"
services:
traefik:
image: "traefik:v2.2"
container_name: "traefik"
ports:
- "80:80"
- "443:443"
- "8080:8080"
environment:
DO_AUTH_TOKEN: "{{ DO_AUTH_TOKEN }}"
volumes:
- "/home/josiah/apps/test/letsencrypt/:/letsencrypt"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/home/josiah/apps/test/traefik.yml:/etc/traefik/traefik.yml"
whoami:
image: "containous/whoami"
container_name: "simple-service"
labels:
- "traefik.enable=true"
- "traefik.http.routers.whoami.rule=Host(`whoami.awful.club`)"
- "traefik.http.routers.whoami.entrypoints=websecure"
- "traefik.http.routers.whoami.tls.certresolver=awful-letsencrypt"

@ -1,39 +0,0 @@
# defaultEntryPoints must be at the top
# because it should not be in any table below
defaultEntryPoints: ["http", "https"]
global:
checkNewVersion: true
sendAnonymousUsage: true
api:
dashboard: false
debug: true
insecure: false
entryPoints:
ssh:
address: ":22"
web:
address: ":80"
websecure:
address: ":443"
providers:
docker:
watch: true
swarmMode: false
endpoint: "unix:///var/run/docker.sock"
exposedbydefault: false
certificatesResolvers:
awful-letsencrypt:
acme:
email: "me@jowj.net"
storage: "/letsencrypt/acme.json"
# caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
dnsChallenge:
provider: "digitalocean"
log:
filePath: "/log/traefik.log"
level: DEBUG
accessLog:
filePath: "/log/access.log"
bufferingSize: 100

@ -1 +0,0 @@
sys_packages: [ 'curl', 'vim', 'git', 'emacs', 'build-essential', 'mosh', 'python', 'python3-pip' ]

@ -1,31 +0,0 @@
Role Name
=========
Playbook to bootstrap wg vpn for my personal use
Requirements
------------
this playbook doesn't handle /creating the machines/. they have to be online and accessible either in my home network or in my cloud setup.
Role Variables
--------------
nothing lol
usage
-----
~ansible-playbook -i hosts.yml playbook.yml --tags=deploy_these_things --ask-vault-pass -ask-become-pass~
misc
----
useful for generating mobile device configs: qrencode -t ansiutf8 < /etc/wireguard/mobile.conf
License
-------
BSD
Author Information
------------------
email: me@jowj.net

@ -1,5 +0,0 @@
---
wireguard_server_listen_port: 51820
wireguard_server_lan_interface: enp3s0

@ -1,5 +0,0 @@
---
# handlers file for wg_vpn
- name: restart wireguard
service: name=wg-quick@wg0.service state=restarted

@ -1,71 +0,0 @@
---
# tasks file for wg_vpn
# playbook to go from a "base configured" server to a wg vpn peer
- name: add unstable to repos
shell: |
echo "deb http://deb.debian.org/debian/ unstable main" > /etc/apt/sources.list.d/unstable.list
printf 'Package: *\nPin: release a=unstable\nPin-Priority: 90\n' > /etc/apt/preferences.d/limit-unstable
- name: Install linux headers
apt:
name: "linux-headers-{{ ansible_kernel }}"
- name: install wireguard
apt:
name:
- wireguard
- build-essential
- dkms
- wireguard-dkms
- wireguard-tools
update_cache: yes
state: latest
- name: Enable IP forwarding
sysctl: name={{ item }} value=1 state=present
with_items:
- net.ipv4.ip_forward
- net.ipv6.conf.all.forwarding
- name: Create wireguard config directory
file: state=directory path=/etc/wireguard owner=root group=root mode=0700
- name: Install wireguard configuration
template: src=wg0.conf dest=/etc/wireguard/wg0.conf owner=root group=root mode=0600
notify: restart wireguard
- name: Start wireguard
service: name=wg-quick@wg0.service enabled=yes state=started
- name: Create matrix.conf file in ~/wireguard/.
template: src=ios.conf dest=/etc/wireguard/ owner=root mode=0600
tags:
- clients
- client_matrix
- name: Create frisket.conf file in ~/wireguard/.
template: src=frisket.conf dest=/etc/wireguard/ owner=root mode=0600
tags:
- clients
- client_frisket
- name: Create ling8.conf file in /etc/wireguard/.
template: src=ling8.conf dest=/etc/wireguard/ owner=root mode=0600
tags:
- clients
- client_ling8
- name: Create pvl.conf file in /etc/wireguard/.
template: src=pvl.conf dest=/etc/wireguard/ owner=root mode=0600
tags:
- clients
- client_pvl
- name: Create larva file in /etc/wireguard/.
template: src=wg0-larva.conf dest=/etc/wireguard/ owner=root mode=0600
tags:
- clients
- client_hatchery

@ -1,9 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_clients[4]['privkey'] }}
Address = {{ wireguard_vpn_network | ipsubnet(32, wireguard_clients[4]['offset']) }}
DNS = 1.1.1.1
[Peer]
PublicKey = {{ wireguard_server_pubkey }}
Endpoint = vpn.awful.club:51820
AllowedIPs = 0.0.0.0/0

@ -1,9 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_clients[0]['privkey'] }}
Address = {{ wireguard_vpn_network | ipsubnet(32, wireguard_clients[0]['offset']) }}
DNS = 1.1.1.1
[Peer]
PublicKey = {{ wireguard_server_pubkey }}
Endpoint = vpn.awful.club:51820
AllowedIPs = 0.0.0.0/0

@ -1,9 +0,0 @@
[Interface]
PrivateKey = <ios private key>
Address = 10.200.219.10/32
DNS = 1.1.1.1
[Peer]
PublicKey = e49UyNg/kqPETyT9K6nqIYjtTwlR8hY8Brm/P66xnmo=
Endpoint = vpn.awful.club:51820
AllowedIPs = 0.0.0.0/0

@ -1,9 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_clients[2]['privkey'] }}
Address = {{ wireguard_vpn_network | ipsubnet(32, wireguard_clients[2]['offset']) }}
DNS = 1.1.1.1
[Peer]
PublicKey = {{ wireguard_server_pubkey }}
Endpoint = vpn.awful.club:51820
AllowedIPs = 0.0.0.0/0

@ -1,9 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_clients[1]['privkey'] }}
Address = {{ wireguard_vpn_network | ipsubnet(32, wireguard_clients[1]['offset']) }}
DNS = 1.1.1.1
[Peer]
PublicKey = {{ wireguard_server_pubkey }}
Endpoint = vpn.awful.club:51820
AllowedIPs = 0.0.0.0/0

@ -1,11 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_clients[3]['privkey'] }}
Address = {{ wireguard_vpn_network | ipsubnet(32, wireguard_clients[3]['offset']) }}
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
[Peer]
PublicKey = {{ wireguard_server_pubkey }}
Endpoint = vpn.awful.club:51820
AllowedIPs = 10.200.219.0/24

@ -1,38 +0,0 @@
[Interface]
PrivateKey = {{ wireguard_server_privkey }}
{#
We want the Address field here to be an IP address
with the whole network in CIDR notation, like 10.0.0.1/24.
If wireguard_vpn_network is a CIDR network like 10.0.0.0/24,
and wireguard_server_offset is an integer like 1,
this will produce an offset of the start of the network + CIDR prefix,
which in this case will be the desired 10.0.0.1/24.
#}
Address = {{ wireguard_vpn_network | ipaddr(wireguard_server_offset) }}
ListenPort = {{ wireguard_server_listen_port }}
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
{% for client in wireguard_clients %}
[Peer]
# {{ client.name }}
PublicKey = {{ client.pubkey }}
{#
We want the Address field here to be an IP address
withOUT the whole network in CIDR notation, like 10.0.0.15/32.
If wireguard_vpn_network is a CIDR network like 10.0.0.0/24,
and client.offset is an integer like 15,
this will produce an offset of the start of the network with a /32 CIDR prefix
which in this case will be the desired 10.0.0.15/32.
#}
{% if client.name == 'larva' %}
AllowedIPs = {{ wireguard_vpn_network | ipsubnet(32, client.offset) }}, 192.168.0.0/16
{% else %}
AllowedIPs = {{ wireguard_vpn_network | ipsubnet(32, client.offset) }}
{% endif %}
{% endfor %}

@ -1,25 +0,0 @@
---
- hosts: vpn
roles:
- wg_vpn
tasks:
- name: Verify remote wg node has wg running
shell: ping -c1 10.200.219.1
register: remote_result
- assert:
that:
- "'1 packets transmitted, 1 packets received' in remote_result.stdout"
- name: Verify remote wg node has wg running
shell: ping -c1 10.200.219.2
register: local_result
- assert:
that:
- "'1 packets transmitted, 1 packets received' in local_result.stdout"
- name: Verify that local node can talk to remote
shell: ping -c1 10.200.219.1
register: tunnel_result
- assert:
that:
- "'1 packets transmitted, 1 packets received' in tunnel_result.stdout"

@ -1,2 +0,0 @@
---
# vars file for wg_vpn

@ -1,31 +0,0 @@
- name: deploy znc server
hosts: dockerhosts
remote_user: josiah
tasks:
- name: update or create the directory
shell: cd /home/josiah/znc-docker/ && git pull
register: result
ignore_errors: True
# if the folder doesn't exist, clone source.
- command: cd /home/josiah && git clone https://github.com/jowj/znc-docker.git
when: result is failed
- name: build from latest version
shell: cd ~/znc-docker/full && docker build -t znc -f Dockerfile .
- name: Check if there are any existing ZNC containers running and kill it
shell: docker ps | grep 'znc' | awk '{print $1}'
register: result
- name: if there are running containers, remove them
shell: docker ps | grep 'znc' | awk '{print $1}' | xargs docker kill
when: result.stdout != ""
- name: discover if any containers with ZNC in name
# this has to be a shell because command: isn't interactive
shell: docker container ls | grep 'znc'
register: result
ignore_errors: True
- name: remove any existing containers with ZNC in name
shell: docker container rm znc
when: result is succeeded
- name: run the container (assumes the volume is already set up)
command: docker run -d -p 5000:5000 --mount type="bind",source="/mnt/volume_sfo2_znc/",target=/znc-data znc
Loading…
Cancel
Save