dev-strongswan-gateway #1

Merged
mauritz.uphoff merged 3 commits from dev-strongswan-gateway into main 2025-07-06 17:47:06 +00:00
7 changed files with 285 additions and 188 deletions

View file

@ -1,13 +1,6 @@
variable "stackit_project_id_machine01" {
description = "Project ID for machine01"
default = "d75e6aab-b616-4b42-ae3b-aaf161ad626d"
type = string
}
variable "stackit_project_id_machine02" {
description = "Project ID for machine02"
default = "c30f0bc4-1b8c-430e-adff-9e862d3b2cd0"
type = string
variable "stackit_organization_id" {
default = "03a34540-3c1a-4794-b2c6-7111ecf824ef"
type = string
}
variable "stackit_region" {

View file

@ -1,138 +0,0 @@
resource "random_pet" "pet01" {}
resource "stackit_key_pair" "admin_keypair" {
name = "${random_pet.pet01.id}-keypair"
public_key = chomp(file("~/.ssh/id_rsa.pub"))
}
resource "stackit_network" "machine01" {
project_id = var.stackit_project_id_machine01
ipv4_prefix = "10.1.1.0/24"
name = "network-machine01"
ipv4_nameservers = ["9.9.9.9", "1.1.1.1"]
}
resource "stackit_network" "machine02" {
project_id = var.stackit_project_id_machine02
ipv4_prefix = "10.2.2.0/24"
name = "network-machine02"
ipv4_nameservers = ["9.9.9.9", "1.1.1.1"]
}
resource "stackit_network_interface" "machines" {
for_each = {
machine01 = {
network_id = stackit_network.machine01.network_id
ipv4 = "10.1.1.10"
project_id = var.stackit_project_id_machine01
}
machine02 = {
network_id = stackit_network.machine02.network_id
ipv4 = "10.2.2.10"
project_id = var.stackit_project_id_machine02
}
}
project_id = each.value.project_id
network_id = each.value.network_id
ipv4 = each.value.ipv4
security = false
}
resource "stackit_public_ip" "wan_ips" {
for_each = {
machine01 = {
network_interface_id = stackit_network_interface.machines["machine01"].network_interface_id
project_id = var.stackit_project_id_machine01
}
machine02 = {
network_interface_id = stackit_network_interface.machines["machine02"].network_interface_id
project_id = var.stackit_project_id_machine02
}
}
project_id = each.value.project_id
network_interface_id = each.value.network_interface_id
}
locals {
machine_ips = {
machine01 = {
local_ip = "10.1.1.10"
local_subnet = "10.1.1.0/24"
}
machine02 = {
local_ip = "10.2.2.10"
local_subnet = "10.2.2.0/24"
}
}
vpn_config = {
machine01 = {
local_ip = local.machine_ips.machine01.local_ip
remote_ip = stackit_public_ip.wan_ips["machine02"].ip
local_subnet = local.machine_ips.machine01.local_subnet
remote_subnet = local.machine_ips.machine02.local_subnet
leftid = stackit_public_ip.wan_ips["machine01"].ip
rightid = stackit_public_ip.wan_ips["machine02"].ip
}
machine02 = {
local_ip = local.machine_ips.machine02.local_ip
remote_ip = stackit_public_ip.wan_ips["machine01"].ip
local_subnet = local.machine_ips.machine02.local_subnet
remote_subnet = local.machine_ips.machine01.local_subnet
leftid = stackit_public_ip.wan_ips["machine02"].ip
rightid = stackit_public_ip.wan_ips["machine01"].ip
}
}
init_config = {
machine01 = templatefile("${path.module}/cloud-init.yaml", merge(local.vpn_config["machine01"], {
psk = var.vpn_psk
}))
machine02 = templatefile("${path.module}/cloud-init.yaml", merge(local.vpn_config["machine02"], {
psk = var.vpn_psk
}))
}
}
resource "stackit_server" "machines" {
for_each = {
machine01 = {
project_id = var.stackit_project_id_machine01
availability_zone = "eu01-1"
}
machine02 = {
project_id = var.stackit_project_id_machine02
availability_zone = "eu01-2"
}
}
project_id = each.value.project_id
name = each.key
availability_zone = each.value.availability_zone
machine_type = "c1.4"
keypair_name = stackit_key_pair.admin_keypair.name
user_data = local.init_config[each.key]
boot_volume = {
size = 64
source_type = "image"
source_id = var.debian_image_id
performance_class = "storage_premium_perf6"
delete_on_termination = true
}
network_interfaces = [
stackit_network_interface.machines[each.key].network_interface_id
]
}
output "machine01_public_ip" {
value = stackit_public_ip.wan_ips["machine01"].ip
}
output "machine02_public_ip" {
value = stackit_public_ip.wan_ips["machine02"].ip
}

30
02-projects.tf Normal file
View file

@ -0,0 +1,30 @@
resource "stackit_network_area" "sna" {
organization_id = var.stackit_organization_id
name = "sna-strongswan-deployment"
network_ranges = [
{
prefix = "10.1.0.0/16"
}
]
transfer_network = "172.16.9.0/24"
}
resource "stackit_resourcemanager_project" "cloud" {
parent_container_id = var.stackit_organization_id
name = "mu-stackit-strongswan-02-cloud"
owner_email = "mauritz.uphoff@stackit.cloud"
labels = {
"networkArea" = stackit_network_area.sna.network_area_id
}
}
resource "stackit_resourcemanager_project" "onprem" {
parent_container_id = var.stackit_organization_id
name = "mu-stackit-strongswan-02-onprem"
owner_email = "mauritz.uphoff@stackit.cloud"
/*labels = {
"networkArea" = stackit_network_area.sna.network_area_id
}*/
}

138
03-sw-appliances.tf Normal file
View file

@ -0,0 +1,138 @@
resource "random_pet" "pet01" {}
resource "stackit_key_pair" "admin_keypair" {
name = "${random_pet.pet01.id}-keypair"
public_key = chomp(file("~/.ssh/id_rsa.pub"))
}
resource "stackit_network" "cloud_network01" {
project_id = stackit_resourcemanager_project.cloud.project_id
ipv4_prefix = "10.1.1.0/24"
name = "network-01"
ipv4_nameservers = ["9.9.9.9", "1.1.1.1"]
}
resource "stackit_network" "onprem_network01" {
project_id = stackit_resourcemanager_project.onprem.project_id
ipv4_prefix = "192.168.1.0/24"
name = "network-02"
ipv4_nameservers = ["9.9.9.9", "1.1.1.1"]
}
resource "stackit_network_interface" "appliances" {
for_each = {
appliance01 = {
network_id = stackit_network.cloud_network01.network_id
ipv4 = "10.1.1.10"
project_id = stackit_resourcemanager_project.cloud.project_id
}
appliance02 = {
network_id = stackit_network.onprem_network01.network_id
ipv4 = "192.168.1.10"
project_id = stackit_resourcemanager_project.onprem.project_id
}
}
project_id = each.value.project_id
network_id = each.value.network_id
ipv4 = each.value.ipv4
security = false
}
resource "stackit_public_ip" "wan_ips_appliances" {
for_each = {
appliance01 = {
network_interface_id = stackit_network_interface.appliances["appliance01"].network_interface_id
project_id = stackit_resourcemanager_project.cloud.project_id
}
appliance02 = {
network_interface_id = stackit_network_interface.appliances["appliance02"].network_interface_id
project_id = stackit_resourcemanager_project.onprem.project_id
}
}
project_id = each.value.project_id
network_interface_id = each.value.network_interface_id
}
locals {
appliance_ips = {
appliance01 = {
local_ip = "10.1.1.10"
local_subnet = "10.1.1.0/24"
}
appliance02 = {
local_ip = "192.168.1.10"
local_subnet = "192.168.1.0/24"
}
}
vpn_config = {
appliance01 = {
local_ip = local.appliance_ips.appliance01.local_ip
remote_ip = stackit_public_ip.wan_ips_appliances["appliance02"].ip
local_subnet = local.appliance_ips.appliance01.local_subnet
remote_subnet = local.appliance_ips.appliance02.local_subnet
leftid = stackit_public_ip.wan_ips_appliances["appliance01"].ip
rightid = stackit_public_ip.wan_ips_appliances["appliance02"].ip
}
appliance02 = {
local_ip = local.appliance_ips.appliance02.local_ip
remote_ip = stackit_public_ip.wan_ips_appliances["appliance01"].ip
local_subnet = local.appliance_ips.appliance02.local_subnet
remote_subnet = local.appliance_ips.appliance01.local_subnet
leftid = stackit_public_ip.wan_ips_appliances["appliance02"].ip
rightid = stackit_public_ip.wan_ips_appliances["appliance01"].ip
}
}
init_config = {
appliance01 = templatefile("${path.module}/cloud-init.yaml", merge(local.vpn_config["appliance01"], {
psk = var.vpn_psk
}))
appliance02 = templatefile("${path.module}/cloud-init.yaml", merge(local.vpn_config["appliance02"], {
psk = var.vpn_psk
}))
}
}
resource "stackit_server" "appliances" {
for_each = {
appliance01 = {
project_id = stackit_resourcemanager_project.cloud.project_id
availability_zone = "eu01-1"
}
appliance02 = {
project_id = stackit_resourcemanager_project.onprem.project_id
availability_zone = "eu01-2"
}
}
project_id = each.value.project_id
name = each.key
availability_zone = each.value.availability_zone
machine_type = "c1.4"
keypair_name = stackit_key_pair.admin_keypair.name
user_data = local.init_config[each.key]
boot_volume = {
size = 64
source_type = "image"
source_id = var.debian_image_id
performance_class = "storage_premium_perf6"
delete_on_termination = true
}
network_interfaces = [
stackit_network_interface.appliances[each.key].network_interface_id
]
}
output "appliance01_public_ip" {
value = stackit_public_ip.wan_ips_appliances["appliance01"].ip
}
output "appliance02_public_ip" {
value = stackit_public_ip.wan_ips_appliances["appliance02"].ip
}

43
04-vms.tf Normal file
View file

@ -0,0 +1,43 @@
resource "stackit_network_area_route" "vpn" {
organization_id = var.stackit_organization_id
network_area_id = stackit_network_area.sna.network_area_id
prefix = "192.168.1.0/24"
// network interface strongswan cloud appliance
next_hop = "10.1.1.10"
}
resource "stackit_network_interface" "machine01_cloud" {
project_id = stackit_resourcemanager_project.cloud.project_id
network_id = stackit_network.cloud_network01.network_id
ipv4 = "10.1.1.11"
security = false
}
resource "stackit_server" "machine01_cloud" {
project_id = stackit_resourcemanager_project.cloud.project_id
name = "machine01"
availability_zone = "eu01-3"
machine_type = "c1.4"
keypair_name = stackit_key_pair.admin_keypair.name
boot_volume = {
size = 64
source_type = "image"
source_id = var.debian_image_id
performance_class = "storage_premium_perf6"
delete_on_termination = true
}
network_interfaces = [
stackit_network_interface.machine01_cloud.network_interface_id
]
}
resource "stackit_public_ip" "wan_ip_machine01" {
project_id = stackit_resourcemanager_project.cloud.project_id
network_interface_id = stackit_network_interface.machine01_cloud.network_interface_id
}
output "machine01_public_ip" {
value = stackit_public_ip.wan_ip_machine01.ip
}

111
README.md
View file

@ -1,97 +1,128 @@
# StrongSwan VPN Verification Guide
StrongSwan VPN Verification Guide
This guide helps verify that an IPsec VPN tunnel using StrongSwan is properly established between the following machines
provisioned via Terraform and configured with cloud-init:
This guide helps you verify that a site-to-site IPsec VPN tunnel using StrongSwan has been successfully established between virtual machines provisioned via Terraform and configured with cloud-init.
- `machine01` → IP: `10.1.1.10`
- `machine02` → IP: `10.2.2.10`
## Hosts Overview
The VPN uses IKEv2 and a Pre-Shared Key (PSK) to create a site-to-site tunnel automatically on boot.
The tunnel uses IKEv2 with a Pre-Shared Key (PSK) and is automatically established at boot.
| Host | IP Address | Role |
|-------------|------------|------------------------|
| appliance01 | 10.1.1.10 | Cloud VPN Appliance |
| machine01 | 10.1.1.11 | Cloud Internal Machine |
| appliance02 | 192.168.1.10 | On-Prem VPN Appliance |
---
## 1. Check the StrongSwan Service
## 🔧 Architecture
SSH into both machines:
![Architecture Diagram](docs/network-architecture.png)
```sh
---
## 1. Check StrongSwan Service Status
SSH into each machine using its public IP:
```bash
ssh -i ~/.ssh/id_rsa debian@<machine-public-ip>
```
Once logged in on each peer, run:
Once logged in, verify the StrongSwan service:
```sh
```bash
sudo ipsec statusall
```
You should see output like the following:
Expected output should resemble:
```
Status of IKE charon daemon (strongSwan 5.9.8, Linux ...):
uptime: ...
worker threads: ...
Connections:
net-net: 10.1.1.10...10.2.2.10 IKEv2, dpddelay=30s
net-net: 10.1.1.10...192.168.1.10 IKEv2, dpddelay=30s
net-net: local: [10.1.1.10] uses pre-shared key authentication
net-net: remote: [10.2.2.10] uses pre-shared key authentication
net-net: child: 10.1.1.0/24 === 10.2.2.0/24 TUNNEL
net-net: remote: [192.168.1.10] uses pre-shared key authentication
net-net: child: 10.1.1.0/24 === 192.168.1.0/24 TUNNEL
Security Associations (SAs) (0 up, 0 connecting):
none
```
At this point, the configuration is loaded but the tunnel might not be up yet.
This output confirms the configuration is loaded, but the tunnel may not yet be active.
---
## 2. Bring Up and Verify the VPN Tunnel
## 2. Bring Up the VPN Tunnel
If the VPN does not connect automatically, you can initiate it manually from either peer:
If the tunnel didnt start automatically, initiate it manually from either VPN appliance:
```sh
```bash
sudo ipsec up net-net
```
Then recheck the status:
Then re-check the connection:
```sh
```bash
sudo ipsec statusall
```
You should see something like:
You should now see an established connection:
```
Connections:
net-net[1]: ESTABLISHED 15s ago, 10.1.1.10...10.2.2.10
net-net[1]: ESTABLISHED 15s ago, 10.1.1.10...192.168.1.10
net-net{1}: INSTALLED, TUNNEL, ESP SPIs: ...
net-net{1}: 10.1.1.0/24 === 10.2.2.0/24
net-net{1}: 10.1.1.0/24 === 192.168.1.0/24
```
Look for the following:
- `ESTABLISHED` — the tunnel is active.
- Correct subnets in `===`, e.g., `10.1.1.0/24 === 10.2.2.0/24`.
Key indicators:
- ESTABLISHED: Tunnel is active
- Subnet-to-subnet routing: 10.1.1.0/24===192.168.1.0/24
---
## 3. Test Connectivity Through the VPN
## 3. Verify VPN-Backed Network Connectivity
Ping from one internal IP to the other (inside each VM):
Ping between hosts to validate that routing is working through the VPN tunnel:
```sh
# On machine01
ping 10.2.2.10
### 💻 From appliance01 (cloud) to appliance02 (on-prem)
# On machine02
```bash
ping 192.168.1.10
# ✅ Successful ping confirms VPN tunnel works
```
### 💻 From appliance02 (on-prem) to appliance01 (cloud)
```bash
ping 10.1.1.10
# ✅ Confirms bidirectional connectivity
```
Expect responses showing that packets are routed through the tunnel.
### 💻 From machine01 (cloud internal) to appliance02 (on-prem)
---
```bash
ping 192.168.1.10
# ✅ Tests routing through VPN appliance (appliance01)
```
## 4. Optional: Check Routing Table
### 💻 From appliance02 (on-prem) to machine01 (cloud internal)
Although not strictly necessary, you can confirm local routing with:
```bash
ping 10.1.1.11
# ✅ Tests project-project routing via SNA transfer network
```
```sh
ip route
```
### ❌ From machine01 (cloud) to appliance02 (VPN-disconnected)
If you remove the static route that directs 192.168.1.0/24 through appliance01:
```bash
ping 192.168.1.10
# ❌ Should fail, indicating that VPN appliance is required for routing
```
All success cases confirm correct tunnel and routing setup.
Failures (when expected) validate routing dependency on the VPN stack.

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB