Compare commits
1 Commits
master
...
features/i
Author | SHA1 | Date | |
---|---|---|---|
|
a8887edb74 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,5 +1,4 @@
|
||||
.terraform
|
||||
,vscode
|
||||
wireguard_profiles/*
|
||||
*~
|
||||
inventory/*
|
||||
|
@ -6,26 +6,32 @@
|
||||
vars_prompt:
|
||||
- name: ssh_pub_key_file
|
||||
prompt: Location of your public ssh key
|
||||
default: "~/.ssh/keys/Amazon_RSA.pub"
|
||||
default: "~/.ssh/id_rsa.pub"
|
||||
private: no
|
||||
- name: aws_region
|
||||
prompt: AWS Region to use for instance
|
||||
default: "eu-central-1"
|
||||
prompt: AWS Region to use for instaance
|
||||
default: "us-east-1"
|
||||
private: no
|
||||
- name: aws_ami
|
||||
prompt: Disk image to use for instance (default is debian buster arm64)
|
||||
default: "ami-07c35db4b1fe9aedd"
|
||||
private: no
|
||||
- name: aws_type
|
||||
prompt: Instance type to request
|
||||
default: "t4g.nano"
|
||||
private: no
|
||||
- name: dns_name
|
||||
prompt: Which hostname shall be registered for the host (Empty = no dns, Zone needs to be route53 managed)?
|
||||
default: ""
|
||||
private: no
|
||||
vars:
|
||||
dns_zone_name: "{{ dns_name | regex_replace('^[\\w-]+\\.', '') }}"
|
||||
- dns_zone_name: "{{ dns_name | regex_replace('^[\\w-]+\\.', '') }}"
|
||||
roles:
|
||||
- aws_graviton_nano
|
||||
- aws_graviton_nano_spot
|
||||
|
||||
- name: include playbook for pihole#
|
||||
import_playbook: headscale-server.yml
|
||||
- name: include playbook for pihole
|
||||
import_playbook: pihole.yml
|
||||
|
||||
#- name: include playbook for pihole#
|
||||
# import_playbook: pihole.yml
|
||||
- name: Include playbook to install wireguard
|
||||
import_playbook: wireguard.yml
|
||||
|
||||
#- name: include playbook for wireguard server
|
||||
# import_playbook: wireguard_pihole_only.yml
|
||||
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
# Install headscale
|
||||
- name: Install headscale
|
||||
hosts: launched
|
||||
remote_user: admin
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: install_headscale
|
||||
prompt: Shall the headscale server software be installed (Defaults to false)?
|
||||
default: false
|
||||
private: no
|
||||
roles:
|
||||
- role: headscale-server
|
||||
when: install_headscale
|
||||
|
@ -1,15 +1,50 @@
|
||||
- name: copy ssh public key
|
||||
amazon.aws.ec2_key:
|
||||
region: "{{ aws_region }}"
|
||||
name: vpn_key
|
||||
key_material: "{{ lookup('file', '{{ ssh_pub_key_file }}') }}"
|
||||
region: "{{ aws_region }}"
|
||||
|
||||
- name: create a VPC and request an IPv6 CIDR
|
||||
amazon.aws.ec2_vpc_net:
|
||||
name: wg-aws-net
|
||||
cidr_block: 10.10.0.0/16
|
||||
ipv6_cidr: True
|
||||
region: "{{ aws_region }}"
|
||||
register: vpc_net
|
||||
|
||||
- name: Create subnet with IPv6 block assigned
|
||||
amazon.aws.ec2_vpc_subnet:
|
||||
state: present
|
||||
assign_instances_ipv6: true
|
||||
map_public: true
|
||||
vpc_id: "{{ vpc_net.vpc.id }}"
|
||||
cidr: 10.10.0.0/24
|
||||
ipv6_cidr: "{{ vpc_net.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('/56','/64') }}"
|
||||
register: vpc_subnet
|
||||
|
||||
- name: create an internet gateway for vpc
|
||||
ec2_vpc_igw:
|
||||
vpc_id: "{{ vpc_net.vpc.id }}"
|
||||
state: present
|
||||
register: igw
|
||||
|
||||
- name: Set up public subnet route table
|
||||
ec2_vpc_route_table:
|
||||
vpc_id: "{{ vpc_net.vpc.id }}"
|
||||
region: "{{ aws_region }}"
|
||||
subnets:
|
||||
- "{{ vpc_subnet.subnet.id }}"
|
||||
routes:
|
||||
- dest: 0.0.0.0/0
|
||||
gateway_id: "{{ igw.gateway_id }}"
|
||||
register: route
|
||||
|
||||
- name: network security policy that allows all traffic incoming and outgoing
|
||||
amazon.aws.ec2_group:
|
||||
region: "{{ aws_region }}"
|
||||
name: "vpn allow all"
|
||||
description: allow all traffic/protocol/ports
|
||||
vpc_id: "{{ vpc_net.vpc.id }}"
|
||||
rules:
|
||||
- proto: all
|
||||
cidr_ip: 0.0.0.0/0
|
||||
@ -20,40 +55,23 @@
|
||||
cidr_ip: 0.0.0.0/0
|
||||
register: security_group
|
||||
|
||||
- name: find arm64 ami for debian
|
||||
amazon.aws.ec2_ami_info:
|
||||
region: "{{ aws_region }}"
|
||||
owners: amazon
|
||||
filters:
|
||||
name: "debian-11-arm64-20*"
|
||||
architecture: "arm64"
|
||||
register: amis
|
||||
|
||||
- name: Extract the most recently created AMI from the list
|
||||
ansible.builtin.set_fact:
|
||||
aws_ami: "{{ amis.images[-1].image_id }}"
|
||||
|
||||
- name: debug
|
||||
debug:
|
||||
var: aws_ami
|
||||
|
||||
- name: create graviton instance
|
||||
amazon.aws.ec2_instance:
|
||||
- name: create graviton spot instance
|
||||
community.aws.ec2_instance:
|
||||
region: "{{ aws_region }}"
|
||||
key_name: vpn_key
|
||||
name: "{{ dns_name }}"
|
||||
security_group: "{{ security_group.group_id }}"
|
||||
instance_type: "t4g.nano"
|
||||
instance_type: "{{ aws_type }}"
|
||||
image_id: "{{ aws_ami }}"
|
||||
instance_initiated_shutdown_behavior: terminate
|
||||
vpc_subnet_id: "{{ vpc_subnet.subnet.id }}"
|
||||
network:
|
||||
assign_public_ip: true
|
||||
wait: true
|
||||
state: running
|
||||
assign_public_ip: yes
|
||||
wait: yes
|
||||
tags:
|
||||
Environment: Testing
|
||||
register: graviton
|
||||
|
||||
- name: generate route53 dns entry for the instance
|
||||
amazon.aws.route53:
|
||||
route53:
|
||||
command: create
|
||||
overwrite: yes
|
||||
zone: "{{ dns_zone_name }}"
|
||||
@ -73,11 +91,11 @@
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ item.public_ip_address }}"
|
||||
hostname: "{{ item.public_ip }}"
|
||||
groupname: launched
|
||||
loop: "{{ graviton.instances }}"
|
||||
|
||||
- name: Print public IP of this server
|
||||
debug:
|
||||
msg: Your instance has th public IP address {{ item.public_ip_address }}
|
||||
msg: Your instance has th public IP address {{ item.public_ip }}
|
||||
loop: "{{ graviton.instances }}"
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Update APT package cache
|
||||
apt:
|
||||
update_cache: true
|
||||
upgrade: dist
|
||||
|
||||
- name: Install debian packages
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "unattended-upgrades"
|
||||
- "joe"
|
||||
- "fail2ban"
|
@ -1,58 +0,0 @@
|
||||
---
|
||||
- name: Update APT package cache
|
||||
apt:
|
||||
update_cache: true
|
||||
upgrade: dist
|
||||
|
||||
- name: Install debian packages
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- "unattended-upgrades"
|
||||
- "joe"
|
||||
- "fail2ban"
|
||||
|
||||
- name: Download headscale .deb
|
||||
get_url:
|
||||
url="https://github.com/juanfont/headscale/releases/download/v0.24.0/headscale_0.24.0_linux_arm64.deb"
|
||||
dest="/tmp/headscale.deb"
|
||||
|
||||
- name: Install my_package
|
||||
apt: deb="/tmp/headscale.deb"
|
||||
|
||||
- name: determine name of host
|
||||
ansible.builtin.set_fact:
|
||||
headscale_hostname: "{{ inventory_hostname }}"
|
||||
|
||||
- name: determine name of network
|
||||
ansible.builtin.set_fact:
|
||||
headscale_base_domain: "{{ headscale_hostname | regex_replace('^[\\w-]+\\.', '') }}"
|
||||
|
||||
- name: generate config
|
||||
template:
|
||||
src: "config.yaml"
|
||||
dest: "/etc/headscale/config.yaml"
|
||||
|
||||
- name: ensure directories are present
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
with_items:
|
||||
- /var/lib/headscale
|
||||
- /var/lib/headscale/cache
|
||||
|
||||
- name: Enable systemd service
|
||||
ansible.builtin.systemd:
|
||||
name: headscale.service
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
# Exit node:
|
||||
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.noarmor.gpg | sudo tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null
|
||||
# curl -fsSL https://pkgs.tailscale.com/stable/debian/bullseye.tailscale-keyring.list | sudo tee /etc/apt/sources.list.d/tailscale.list
|
||||
# apt-get update
|
||||
# apt-get install tailscale
|
||||
# tailscale up --advertise-exit-node --login-server https://headscale.wolkige.abgruen.de
|
||||
#
|
@ -1,50 +0,0 @@
|
||||
---
|
||||
# Headscale configuration
|
||||
# addresses. ports and paths
|
||||
server_url: "https://{{ headscale_hostname }}"
|
||||
listen_addr: 0.0.0.0:443
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
grpc_listen_addr: 127.0.0.1:50443
|
||||
grpc_allow_insecure: false
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
noise:
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
# IP ranges & dns
|
||||
# IP ranges & dns
|
||||
prefixes:
|
||||
v6: fd7a:115c:a1e0::/48
|
||||
v4: 10.13.100.0/24
|
||||
dns:
|
||||
#override_local_dns: true
|
||||
nameservers:
|
||||
global:
|
||||
- 1.1.1.1
|
||||
magic_dns: true
|
||||
base_domain: {{ headscale_base_domain }}
|
||||
|
||||
# DERP
|
||||
derp:
|
||||
server:
|
||||
enabled: true
|
||||
region_id: 999
|
||||
region_code: "aws-headscale-maecki"
|
||||
region_name: "aws-headscale-maecki"
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
paths: []
|
||||
auto_update_enabled: false
|
||||
update_frequency: 24h
|
||||
|
||||
# DB
|
||||
database:
|
||||
type: sqlite3
|
||||
sqlite:
|
||||
path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# TLS
|
||||
acme_url: https://acme-v02.api.letsencrypt.org/directory
|
||||
acme_email: ""
|
||||
tls_letsencrypt_hostname: "{{ headscale_hostname }}"
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
tls_letsencrypt_listen: ":http"
|
@ -1,108 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
### BEGIN INIT INFO
|
||||
# Provides: pihole-FTL
|
||||
# Required-Start: $remote_fs $syslog $network
|
||||
# Required-Stop: $remote_fs $syslog $network
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: pihole-FTL daemon
|
||||
# Description: Enable service provided by pihole-FTL daemon
|
||||
### END INIT INFO
|
||||
|
||||
FTLUSER=pihole
|
||||
PIDFILE=/run/pihole-FTL.pid
|
||||
|
||||
is_running() {
|
||||
pgrep -o "pihole-FTL" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
# Start the service
|
||||
start() {
|
||||
if is_running; then
|
||||
echo "pihole-FTL is already running"
|
||||
else
|
||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||
touch /var/log/pihole-FTL.log /var/log/pihole.log
|
||||
touch /run/pihole-FTL.pid /run/pihole-FTL.port
|
||||
touch /etc/pihole/dhcp.leases
|
||||
mkdir -p /run/pihole
|
||||
mkdir -p /var/log/pihole
|
||||
chown pihole:pihole /run/pihole /var/log/pihole
|
||||
# Remove possible leftovers from previous pihole-FTL processes
|
||||
rm -f /dev/shm/FTL-* 2> /dev/null
|
||||
rm /run/pihole/FTL.sock 2> /dev/null
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port
|
||||
chown pihole:pihole /etc/pihole /etc/pihole/dhcp.leases 2> /dev/null
|
||||
chown pihole:pihole /var/log/pihole-FTL.log /var/log/pihole.log
|
||||
chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
|
||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||
chown pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db 2> /dev/null
|
||||
if setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE+eip "$(which pihole-FTL)"; then
|
||||
su -s /bin/sh -c "/usr/bin/pihole-FTL" "$FTLUSER"
|
||||
else
|
||||
echo "Warning: Starting pihole-FTL as root because setting capabilities is not supported on this system"
|
||||
pihole-FTL
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop the service
|
||||
stop() {
|
||||
if is_running; then
|
||||
pkill -o pihole-FTL
|
||||
for i in {1..5}; do
|
||||
if ! is_running; then
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
echo
|
||||
|
||||
if is_running; then
|
||||
echo "Not stopped; may still be shutting down or shutdown may have failed, killing now"
|
||||
pkill -o -9 pihole-FTL
|
||||
exit 1
|
||||
else
|
||||
echo "Stopped"
|
||||
fi
|
||||
else
|
||||
echo "Not running"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
# Indicate the service status
|
||||
status() {
|
||||
if is_running; then
|
||||
echo "[ ok ] pihole-FTL is running"
|
||||
exit 0
|
||||
else
|
||||
echo "[ ] pihole-FTL is not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
### main logic ###
|
||||
case "$1" in
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
status)
|
||||
status
|
||||
;;
|
||||
start|restart|reload|condrestart)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $0 {start|stop|restart|reload|status}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
exit 0
|
@ -1 +0,0 @@
|
||||
PRIVACYLEVEL=0
|
@ -20,6 +20,9 @@
|
||||
name: "qrencode"
|
||||
state: present
|
||||
|
||||
- name: Reboot to make shure wireguard kernel module is loadable
|
||||
reboot:
|
||||
|
||||
- name: ensure wireguard services are stopped
|
||||
command: "systemctl stop wg-quick@wg0"
|
||||
|
||||
@ -65,18 +68,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
with_sequence: start=1 end={{ vpn_clients }}
|
||||
when: use_pihole_templates != true
|
||||
|
||||
- name: generate client configs for pihole setup
|
||||
template:
|
||||
src: "wg0-client-pihole.conf"
|
||||
dest: "~/wg/client_{{ item }}/wg0-client.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
with_sequence: start=1 end={{ vpn_clients }}
|
||||
when: use_pihole_templates == true
|
||||
with_indexed_items: "{{ vpn_client_private_keys.results }}"
|
||||
|
||||
- name: generate qr codes for client configs
|
||||
shell: umask 077; qrencode --type=PNG --output=/root/wg/{{ item }}/wg0-client.png < ~/wg/{{ item }}/wg0-client.conf
|
||||
@ -116,4 +108,4 @@
|
||||
src: "~/wg/{{item}}/wg0-client.png"
|
||||
dest: "wireguard_profiles/{{ ansible_ssh_host }}/{{item}}/"
|
||||
flat: yes
|
||||
with_sequence: start=1 end={{ vpn_clients }}
|
||||
with_items: "{{ vpn_client_names }}"
|
||||
|
@ -1,11 +0,0 @@
|
||||
[Interface]
|
||||
Address = {{ vpn_network }}.{{item|int + 1}}/32
|
||||
DNS = 10.100.100.1
|
||||
PrivateKey = {{ private_key_files.results[item|int].stdout }}
|
||||
MTU = 1500
|
||||
|
||||
[Peer]
|
||||
PublicKey = {{ public_key_files.results[0].stdout }}
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
Endpoint = {{ ansible_ssh_host }}:{{ vpn_port }}
|
||||
PersistentKeepalive = 0
|
@ -1,8 +1,7 @@
|
||||
[Interface]
|
||||
Address = {{ vpn_network }}.{{item|int + 1}}/32
|
||||
DNS = 9.9.9.9
|
||||
PrivateKey = {{ private_key_files.results[item|int].stdout }}
|
||||
MTU = 1500
|
||||
Address = {{ vpn_network }}.{{item.0 + 2}}/32
|
||||
DNS = {{ dns_for_clients }}
|
||||
PrivateKey = {{ item.1.stdout }}
|
||||
|
||||
[Peer]
|
||||
PublicKey = {{ vpn_server_public_key.stdout }}
|
||||
|
@ -5,7 +5,6 @@ ListenPort = {{ vpn_port }}
|
||||
PrivateKey = {{ vpn_server_private_key.stdout }}
|
||||
PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o ens5 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o ens5 -j MASQUERADE
|
||||
PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o ens5 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o ens5 -j MASQUERADE
|
||||
MTU = 1500
|
||||
|
||||
{% for i in vpn_client_public_keys.results %}
|
||||
# {{ i.item }}
|
||||
|
@ -1,11 +0,0 @@
|
||||
[Interface]
|
||||
Address = 10.100.100.2/32
|
||||
DNS = 10.100.100.1
|
||||
PrivateKey = yMIV+Rpg1KVbmpev3fPBipArnhmKyGA0bX3a0i0/C1s=
|
||||
MTU = 1500
|
||||
|
||||
[Peer]
|
||||
PublicKey = vBZjf26R0ZMyh8YZ2a257XQq28bfse5YOvTfIPENZFQ=
|
||||
AllowedIPs = 0.0.0.0/5, 8.0.0.0/7, 10.100.100.0/24, 11.0.0.0/8, 12.0.0.0/6, 16.0.0.0/4, 32.0.0.0/3, 64.0.0.0/2, 128.0.0.0/3, 160.0.0.0/5, 168.0.0.0/6, 172.0.0.0/12, 172.32.0.0/11, 172.64.0.0/10, 172.128.0.0/9, 173.0.0.0/8, 174.0.0.0/7, 176.0.0.0/4, 192.0.0.0/9, 192.128.0.0/11, 192.160.0.0/13, 192.169.0.0/16, 192.170.0.0/15, 192.172.0.0/14, 192.176.0.0/12, 192.192.0.0/10, 193.0.0.0/8, 194.0.0.0/7, 196.0.0.0/6, 200.0.0.0/5, 208.0.0.0/4
|
||||
Endpoint = 3.83.185.242:58172
|
||||
PersistentKeepalive = 0
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
# INstall wireguard to comply with pihole dns proxy
|
||||
- name: Install wireguard server on launched hosts
|
||||
hosts: launched
|
||||
remote_user: admin
|
||||
become: true
|
||||
vars_prompt:
|
||||
- name: vpn_clients
|
||||
prompt: Number of vpn clients to be generated
|
||||
default: 10
|
||||
private: no
|
||||
vars:
|
||||
vpn_network: '10.100.100'
|
||||
vpn_port: '58172'
|
||||
use_pihole_templates: true
|
||||
roles:
|
||||
- wireguard_server
|
||||
|
Loading…
x
Reference in New Issue
Block a user