Deploy AdminForth to EC2 with terraform on CI
Here is more advanced snippet to deploy AdminForth to Terraform.
Here Terraform state will be stored in the cloud, so you can run this deployment from any machine including stateless CI/CD.
We will use GitHub Actions as CI/CD, but you can use any other CI/CD, for example self-hosted free WoodpeckerCI.
Assume you have your AdminForth project in myadmin
.
Step 1 - Dockerfile
Create file Dockerfile
in myadmin
:
# use the same node version which you used during dev
FROM node:20-alpine
WORKDIR /code/
ADD package.json package-lock.json /code/
RUN npm ci
ADD . /code/
RUN npx tsx bundleNow.ts
CMD ["npm", "run", "startLive"]
Step 2 - compose.yml
create folder deploy
and create file compose.yml
inside:
services:
traefik:
image: "traefik:v2.5"
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
ports:
- "80:80"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
myadmin:
build: ./myadmin
restart: always
env_file:
- ./myadmin/.env
volumes:
- myadmin-db:/code/db
labels:
- "traefik.enable=true"
- "traefik.http.routers.myadmin.rule=PathPrefix(`/`)"
- "traefik.http.services.myadmin.loadbalancer.server.port=3500"
- "traefik.http.routers.myadmin.priority=2"
volumes:
myadmin-db:
Step 3 - create a SSH keypair
Make sure you are in deploy
folder, run next command here:
mkdir .keys && ssh-keygen -f .keys/id_rsa -N ""
Now it should create deploy/.keys/id_rsa
and deploy/.keys/id_rsa.pub
files with your SSH keypair. Terraform script will put the public key to the EC2 instance and will use private key to connect to the instance. Also you will be able to use it to connect to the instance manually.
Step 4 - .gitignore file
Create deploy/.gitignore
file with next content:
.terraform/
.keys/
*.tfstate
*.tfstate.*
*.tfvars
tfplan
Step 5 - Main terraform file main.tf
Create file main.tf
in deploy
folder:
locals {
app_name = "<your_app_name>"
aws_region = "eu-central-1"
}
provider "aws" {
region = local.aws_region
profile = "myaws"
}
data "aws_ami" "ubuntu_linux" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"]
}
}
data "aws_vpc" "default" {
default = true
}
resource "aws_eip" "eip" {
vpc = true
}
resource "aws_eip_association" "eip_assoc" {
instance_id = aws_instance.app_instance.id
allocation_id = aws_eip.eip.id
}
data "aws_subnet" "default_subnet" {
filter {
name = "vpc-id"
values = [data.aws_vpc.default.id]
}
filter {
name = "default-for-az"
values = ["true"]
}
filter {
name = "availability-zone"
values = ["${local.aws_region}a"]
}
}
resource "aws_security_group" "instance_sg" {
name = "${local.app_name}-instance-sg"
vpc_id = data.aws_vpc.default.id
ingress {
description = "Allow HTTP"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# SSH
ingress {
description = "Allow SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow all outbound traffic"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_key_pair" "app_deployer" {
key_name = "terraform-deploy_${local.app_name}-key"
public_key = file("./.keys/id_rsa.pub") # Path to your public SSH key
}
resource "aws_instance" "app_instance" {
ami = data.aws_ami.ubuntu_linux.id
instance_type = "t3a.small"
subnet_id = data.aws_subnet.default_subnet.id
vpc_security_group_ids = [aws_security_group.instance_sg.id]
key_name = aws_key_pair.app_deployer.key_name
root_block_device {
volume_size = 40 // Size in GB for root partition
volume_type = "gp2"
}
user_data = <<-EOF
#!/bin/bash
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
systemctl start docker
systemctl enable docker
usermod -a -G docker ubuntu
EOF
tags = {
Name = "${local.app_name}-instance"
}
}
resource "null_resource" "sync_files_and_run" {
# Use rsync to exclude node_modules, .git, db
provisioner "local-exec" {
# heredoc syntax
# remove files that where deleted on the source
command = <<-EOF
# -o StrictHostKeyChecking=no
rsync -t -av -e "ssh -i ./.keys/id_rsa -o StrictHostKeyChecking=no" \
--delete \
--exclude 'node_modules' \
--exclude '.git' \
--exclude '.terraform' \
--exclude 'terraform*' \
--exclude 'tfplan' \
--exclude '.keys' \
--exclude '.vscode' \
--exclude '.env' \
--exclude 'db' \
--exclude 'up-human/debug' \
--exclude 'up-human/storage' \
../ ubuntu@${aws_eip_association.eip_assoc.public_ip}:/home/ubuntu/app/
EOF
}
# Run docker compose after files have been copied
provisioner "remote-exec" {
inline = [
# fail bash specially and intentionally to stop the script on error
"bash -c 'while ! command -v docker &> /dev/null; do echo \"Waiting for Docker to be installed...\"; sleep 1; done'",
"bash -c 'while ! docker info &> /dev/null; do echo \"Waiting for Docker to start...\"; sleep 1; done'",
# please note that prune might destroy build cache and make build slower, however it releases disk space
"docker system prune -f",
# "docker buildx prune -f --filter 'type!=exec.cachemount'",
"cd /home/ubuntu/app/deploy",
"docker compose -p app -f compose.yml up --build -d"
]
connection {
type = "ssh"
user = "ubuntu"
private_key = file("./.keys/id_rsa")
host = aws_eip_association.eip_assoc.public_ip
}
}
# Ensure the resource is triggered every time based on timestamp or file hash
triggers = {
always_run = timestamp()
}
depends_on = [aws_instance.app_instance, aws_eip_association.eip_assoc]
}
output "instance_public_ip" {
value = aws_eip_association.eip_assoc.public_ip
}
######### This scetion is for tf state storage ##############
# S3 bucket for storing Terraform state
resource "aws_s3_bucket" "terraform_state" {
bucket = "${local.app_name}-terraform-state"
}
resource "aws_s3_bucket_lifecycle_configuration" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.bucket
rule {
status = "Enabled"
id = "Keep only the latest version of the state file"
noncurrent_version_expiration {
noncurrent_days = 30
}
}
}
resource "aws_s3_bucket_versioning" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.bucket
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_server_side_encryption_configuration" "terraform_state" {
bucket = aws_s3_bucket.terraform_state.bucket
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
# DynamoDB table for state locking
resource "aws_dynamodb_table" "terraform_lock" {
name = "${local.app_name}-terraform-lock-table"
billing_mode = "PAY_PER_REQUEST" # Dynamically scales to meet demand
hash_key = "LockID" # Primary key for the table
attribute {
name = "LockID"
type = "S"
}
}