diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..76f2b38 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.git +.github +ansible +__pycache__ +*.pyc \ No newline at end of file diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 0f2aa43..f2b98bb 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1 +1,13 @@ -Here you place the github actions automation +GitHub Actions workflows live here. + +Expected workflow behavior: + +- run `ansible-lint`, `shellcheck`, and playbook syntax checks on pull requests +- auto-deploy to the `staging` GitHub Environment on pushes to `main` +- allow manual deploys to `staging` or `production` through `workflow_dispatch` + +Required GitHub Environment secrets for each environment: + +- `DEPLOY_HOST` +- `DEPLOY_SSH_PRIVATE_KEY` +- `DEPLOY_KNOWN_HOSTS` diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml new file mode 100644 index 0000000..155635e --- /dev/null +++ b/.github/workflows/ci-cd.yml @@ -0,0 +1,132 @@ +name: CI and Deploy + +on: + pull_request: + push: + branches: + - main + workflow_dispatch: + inputs: + target_environment: + description: Target environment + required: true + default: staging + type: choice + options: + - staging + - production + +jobs: + lint: + name: Lint and Syntax Check + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install shellcheck + run: | + sudo apt-get update + sudo apt-get install -y shellcheck + + - name: Install Ansible tooling + run: | + python -m pip install --upgrade pip + pip install "ansible>=11,<12" "ansible-lint>=25,<26" + + - name: Install Ansible collections + working-directory: ansible + run: ansible-galaxy collection install -r requirements.yml + + - name: Write mock staging vault file for CI checks + run: | + mkdir -p ansible/inventory/group_vars/staging + cat > ansible/inventory/group_vars/staging/vault.yml <<'EOF' + --- + vault_postgres_app_password: lint-only + vault_postgres_readonly_password: lint-only + vault_postgres_superuser_password: lint-only + vault_s3_access_key: lint-only + vault_s3_secret_key: lint-only + vault_ci_deploy_ssh_private_key: | + lint-only + EOF + + - name: Run ansible-lint + working-directory: ansible + run: ansible-lint . + + - name: Run shellcheck on tracked shell templates + run: | + shellcheck -s bash ansible/roles/systemd_reload/templates/app-reload.sh.j2 + shellcheck -s bash ansible/roles/backup/templates/app-db-backup.sh.j2 + + - name: Syntax check Ansible playbooks + working-directory: ansible + run: | + ansible-playbook bootstrap.yml --syntax-check + ansible-playbook site.yml --syntax-check + ansible-playbook deploy.yml --syntax-check + + deploy: + name: Deploy + needs: lint + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + environment: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.target_environment || 'staging' }} + + env: + TARGET_ENVIRONMENT: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.target_environment || 'staging' }} + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install Ansible tooling + run: | + python -m pip install --upgrade pip + pip install "ansible>=11,<12" + + - name: Install Ansible collections + working-directory: ansible + run: ansible-galaxy collection install -r requirements.yml + + - name: Configure SSH key + run: | + mkdir -p ~/.ssh + printf '%s\n' "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > ~/.ssh/deploy_key + chmod 600 ~/.ssh/deploy_key + + - name: Configure known_hosts + run: | + mkdir -p ~/.ssh + printf '%s\n' "${{ secrets.DEPLOY_KNOWN_HOSTS }}" > ~/.ssh/known_hosts + chmod 644 ~/.ssh/known_hosts + + - name: Create temporary CI inventory + run: | + cat > ansible/inventory/ci.ini <- + Replace this placeholder with the migration command your application + framework needs. In the current flow it belongs after the app files + are synced and before app-reload.service is started. + + - name: Trigger application reload service + ansible.builtin.systemd_service: + name: "{{ app_reload_service_name }}" + state: reloaded + + - name: Wait for application container healthcheck to pass + ansible.builtin.command: + cmd: "docker inspect --format {{ '{{.State.Health.Status}}' }} {{ app_container_name }}" + register: app_container_health + changed_when: false + retries: 20 + delay: 3 + until: app_container_health.stdout == "healthy" + + - name: Verify application responds on localhost + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_bind_port }}" + status_code: 200 diff --git a/ansible/inventory/group_vars/all.yml b/ansible/inventory/group_vars/all.yml new file mode 100644 index 0000000..b156daa --- /dev/null +++ b/ansible/inventory/group_vars/all.yml @@ -0,0 +1,46 @@ +--- +app_name: cloudops-test +app_user: deploy +app_group: deploy + +app_root: /opt/app +app_storage_root: /opt/storage +app_repo_url: https://github.com/avidity/cloudops-test.git +app_image_repository: app +app_image_tag: latest +app_bind_host: 127.0.0.1 +app_bind_port: 8000 +app_domains: + - example.com + +docker_version: "29" + +# These versions now drive the Compose-managed service images inside the `app` +# stack. The old host-level `postgres` and `redis` roles are no longer used. +postgres_version: "18" +postgres_database: app +postgres_app_user: app_rw +postgres_readonly_user: app_ro +redis_version: "8" +nginx_version: "1.29" + +ssh_allowed_public_keys_dir: files/ssh_keys +backup_retention_months: 6 + +# Real S3-compatible bucket settings. +# These can stay as placeholders while local-only backup testing is used. +# Once a real bucket exists, replace them with the real bucket name and +# endpoint, then comment out the local-only override further below. +backup_s3_bucket: replace-me +backup_s3_endpoint: https://s3.example.com + +# Current local-only learning mode: +# - backups are still created on the VM under `/opt/storage/backups` +# - the upload step is skipped even though the role already supports it +# - this is useful until real S3-compatible credentials and bucket details exist +# +# When switching to real S3 upload later: +# - comment out the line below +# - set real `backup_s3_bucket` and `backup_s3_endpoint` values above +# - keep `vault_s3_access_key` and `vault_s3_secret_key` in the encrypted vault +backup_upload_enabled: false diff --git a/ansible/inventory/group_vars/production.yml b/ansible/inventory/group_vars/production.yml new file mode 100644 index 0000000..7de16f7 --- /dev/null +++ b/ansible/inventory/group_vars/production.yml @@ -0,0 +1,15 @@ +--- +# Current state: +# - this file is only a production scaffold +# - the `[production]` inventory group is still empty in `inventory/hosts` +# +# To switch production on later: +# - add real production hosts under `[production]` +# - replace `app_domains` with the real production FQDN(s) +# - optionally set `app_image_tag` to a specific release tag instead of `latest` +# - add the real encrypted production vault file with the database, backup, and +# deploy credentials +environment_name: production +app_image_tag: latest +app_domains: + - app.example.com diff --git a/ansible/inventory/group_vars/staging/staging.yml b/ansible/inventory/group_vars/staging/staging.yml new file mode 100644 index 0000000..8f2e3a8 --- /dev/null +++ b/ansible/inventory/group_vars/staging/staging.yml @@ -0,0 +1,5 @@ +--- +environment_name: staging +app_image_tag: latest +app_domains: + - staging.example.com diff --git a/ansible/inventory/group_vars/staging/vault.example.yml b/ansible/inventory/group_vars/staging/vault.example.yml new file mode 100644 index 0000000..d17c341 --- /dev/null +++ b/ansible/inventory/group_vars/staging/vault.example.yml @@ -0,0 +1,18 @@ +--- +# Copy this file to: +# inventory/group_vars/staging/vault.yml +# +# Encrypt the copied file with: +# ansible-vault encrypt inventory/group_vars/staging/vault.yml + +vault_postgres_app_password: change-me +vault_postgres_readonly_password: change-me + +# Used by the backup role when real S3-compatible upload is enabled. +# In the current local-only VM flow, uploads are disabled, so these can stay as +# dummy values until a real bucket is wired in. +vault_s3_access_key: change-me +vault_s3_secret_key: change-me +vault_ci_deploy_ssh_private_key: | + REPLACE_WITH_CI_DEPLOY_KEY +vault_postgres_superuser_password: change-me diff --git a/ansible/inventory/host_vars/README.md b/ansible/inventory/host_vars/README.md new file mode 100644 index 0000000..928dc6d --- /dev/null +++ b/ansible/inventory/host_vars/README.md @@ -0,0 +1,2 @@ +Add per-host overrides here when a specific machine needs values that differ +from the environment-wide defaults in `group_vars/`. diff --git a/ansible/inventory/host_vars/debian_test.yml b/ansible/inventory/host_vars/debian_test.yml new file mode 100644 index 0000000..98169a9 --- /dev/null +++ b/ansible/inventory/host_vars/debian_test.yml @@ -0,0 +1,4 @@ +--- +sshd_allow_users: + - deploy + - john diff --git a/ansible/inventory/hosts b/ansible/inventory/hosts new file mode 100644 index 0000000..771fd46 --- /dev/null +++ b/ansible/inventory/hosts @@ -0,0 +1,27 @@ +[staging] +# Replace with the real staging host or VM address before use. +debian_test ansible_host=203.0.113.10 + +[staging:vars] +ansible_user=deploy +ansible_python_interpreter=/usr/bin/python3 +# Replace with the real private key path used to access the staging host. +ansible_ssh_private_key_file=/path/to/your/deploy/private_key +ansible_ssh_common_args='-o IdentitiesOnly=yes' + +[production] +# Current state: +# - production is scaffolded only +# - no production host is configured yet +# +# To enable real production later: +# - add one or more real production hosts here, for example: +# app_prod ansible_host=203.0.113.10 +# - point `ansible_ssh_private_key_file` at the real production deploy key +# - add real production variables in `inventory/group_vars/production.yml` +# - add the matching encrypted production vault values before running +# `site.yml` or `deploy.yml` + +[app:children] +staging +production diff --git a/ansible/requirements.yml b/ansible/requirements.yml new file mode 100644 index 0000000..1bd9bfa --- /dev/null +++ b/ansible/requirements.yml @@ -0,0 +1,7 @@ +--- +collections: + - name: ansible.posix + - name: community.crypto + - name: community.docker + - name: community.general + - name: community.postgresql diff --git a/ansible/roles/app/defaults/main.yml b/ansible/roles/app/defaults/main.yml new file mode 100644 index 0000000..b9c32f7 --- /dev/null +++ b/ansible/roles/app/defaults/main.yml @@ -0,0 +1,84 @@ +--- +app_compose_project_name: "{{ app_name }}" +app_checkout_dir: "{{ app_root }}/src" +app_env_file: "{{ app_root }}/.env" +app_compose_file: "{{ app_root }}/docker-compose.yml" +app_container_name: "{{ app_name }}-app" +app_restart_policy: unless-stopped +app_healthcheck_interval: 30s +app_healthcheck_timeout: 5s +app_healthcheck_retries: 5 + +# Current local-only source/image flow: +# - the role copies the local Mac working tree to the VM +# - the VM then builds `{{ app_image_repository }}:{{ app_image_tag }}` locally +# - this keeps the feedback loop simple while developing the repo itself +# +# Future git-on-VM source delivery: +# - keep `app_checkout_dir` as the repository root on the VM +# - uncomment the settings below +# - uncomment the example `ansible.builtin.git` task in `roles/app/tasks/main.yml` +# - comment out the three local-copy tasks in that same file: +# - Copy application source to remote build context +# - Copy Dockerfile to remote build context +# - Copy dockerignore to remote build context +# - keep the compose file, reload service, and deploy health checks +# +# Future tagged-image workflow instead of building on the VM: +# - set `app_image_repository` to the real registry image name +# - set `app_image_tag` to the release tag you want to deploy +# - comment out the `build:` block inside the `app` service in +# `roles/app/tasks/main.yml` +# - uncomment the future `pull_policy` line described in that file +# - in that workflow the VM pulls a prebuilt image instead of building from +# copied files or a git checkout +# +# Git-on-VM settings to uncomment later: +# - `app_repo_version` is the branch, tag, or commit to check out +# - `app_repo_update` controls whether the VM pulls new changes on later runs +# app_repo_version: main +# app_repo_update: true + +# Tagged-image workflow settings to uncomment later: +# - use these only if deployments should pull a prebuilt image instead of +# building the image on the VM +# app_image_repository: ghcr.io/your-org/cloudops-test +# app_image_tag: "2026.05.01" +# app_image_pull_policy: always + +# `false` during full provisioning from `site.yml`: +# - the app role both syncs files and builds/starts the container itself +# +# `true` during update deployments from `deploy.yml`: +# - the app role only syncs files to the VM +# - the custom systemd reload service performs the rebuild/restart step +app_deploy_only: false + +app_postgres_service_name: postgres +app_redis_service_name: redis + +app_postgres_container_name: "{{ app_name }}-postgres" +app_redis_container_name: "{{ app_name }}-redis" + +app_postgres_image: "postgres:{{ postgres_version }}" +app_redis_image: "redis:{{ redis_version }}-bookworm" + +app_postgres_data_dir: "{{ app_storage_root }}/postgres" +app_redis_data_dir: "{{ app_storage_root }}/redis" + +app_postgres_init_dir: "{{ app_checkout_dir }}/docker/initdb" + +app_postgres_bootstrap_user: postgres +app_postgres_bootstrap_password: "{{ vault_postgres_superuser_password }}" + +# Live runtime PostgreSQL settings for the Compose-managed database. +# These make the PostgreSQL server configuration explicit in the same runtime +# path that the app actually uses, instead of only documenting defaults. +# +# If you later want to tune PostgreSQL differently per environment, override +# these in group_vars or host_vars and re-run `site.yml`. +app_postgres_listen_addresses: "*" +app_postgres_max_connections: 100 +app_postgres_shared_buffers: 128MB +app_postgres_log_connections: "on" +app_postgres_log_disconnections: "on" diff --git a/ansible/roles/app/handlers/main.yml b/ansible/roles/app/handlers/main.yml new file mode 100644 index 0000000..fe92594 --- /dev/null +++ b/ansible/roles/app/handlers/main.yml @@ -0,0 +1,2 @@ +--- +[] diff --git a/ansible/roles/app/tasks/main.yml b/ansible/roles/app/tasks/main.yml new file mode 100644 index 0000000..12e14b8 --- /dev/null +++ b/ansible/roles/app/tasks/main.yml @@ -0,0 +1,251 @@ +--- +- name: Ensure application source directory exists + ansible.builtin.file: + path: "{{ app_checkout_dir }}" + state: directory + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0755" + +# Current local-VM learning flow: +# - copy the local repository files from the Mac to the VM +# - build the image on the VM from those copied files +# This keeps iteration simple while the repo itself is still being developed. +# +# Future git-on-VM alternative: +# 1. Keep `Ensure application source directory exists`. +# 2. Uncomment the git checkout task example below. +# 3. Comment out these three local-copy tasks because the git checkout would +# already provide the same files inside `app_checkout_dir`: +# - Copy application source to remote build context +# - Copy Dockerfile to remote build context +# - Copy dockerignore to remote build context +# 4. Keep `Write application environment file`. +# 5. Keep `Write application compose file`. +# 6. Keep the final build/start task for `site.yml` and the reload-based flow in +# `deploy.yml`. +# +# Future tagged-image alternative: +# 1. Keep `Ensure application source directory exists` if you still want a place +# for env files, compose files, and helper assets on the VM. +# 2. Set a real registry image name and release tag in `roles/app/defaults` or +# inventory vars. +# 3. In the compose content below: +# - comment out the `build:` block under the `app` service +# - uncomment the future `pull_policy` line under the `app` service +# 4. In that workflow the VM pulls the prebuilt app image instead of building it +# from copied files or a git checkout. +# +# In other words, the source-sync mechanism changes, but the rest of the app +# role can stay mostly the same. +# +# Example future task: +# - name: Check out application repository on the VM +# ansible.builtin.git: +# repo: "{{ app_repo_url }}" +# dest: "{{ app_checkout_dir }}" +# version: "{{ app_repo_version | default('main') }}" +# update: "{{ app_repo_update | default(true) }}" +# force: false +# become_user: "{{ app_user }}" + +- name: Copy application source to remote build context + ansible.builtin.copy: + src: "{{ role_path }}/../../../app/" + dest: "{{ app_checkout_dir }}/app/" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0644" + directory_mode: "0755" + +- name: Copy Dockerfile to remote build context + ansible.builtin.copy: + src: "{{ role_path }}/../../../Dockerfile" + dest: "{{ app_checkout_dir }}/Dockerfile" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0644" + +- name: Copy dockerignore to remote build context + ansible.builtin.copy: + src: "{{ role_path }}/../../../.dockerignore" + dest: "{{ app_checkout_dir }}/.dockerignore" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0644" + +- name: Ensure application service data directories exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: "0777" + loop: + - "{{ app_postgres_data_dir }}" + - "{{ app_redis_data_dir }}" + - "{{ app_postgres_init_dir }}" + +- name: Write application environment file + ansible.builtin.copy: + dest: "{{ app_env_file }}" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0640" + content: | + APP_PORT=8000 + DATABASE_URL=postgresql://{{ postgres_app_user }}:{{ vault_postgres_app_password }}@{{ app_postgres_service_name }}:5432/{{ postgres_database }} + REDIS_URL=redis://{{ app_redis_service_name }}:6379/0 + +- name: Write PostgreSQL init SQL for the compose-managed stack + ansible.builtin.copy: + dest: "{{ app_postgres_init_dir }}/01-app.sql" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0644" + content: | + DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '{{ postgres_app_user }}') THEN + CREATE ROLE {{ postgres_app_user }} LOGIN PASSWORD '{{ vault_postgres_app_password }}'; + ELSE + ALTER ROLE {{ postgres_app_user }} LOGIN PASSWORD '{{ vault_postgres_app_password }}'; + END IF; + END $$; + + DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '{{ postgres_readonly_user }}') THEN + CREATE ROLE {{ postgres_readonly_user }} LOGIN PASSWORD '{{ vault_postgres_readonly_password }}'; + ELSE + ALTER ROLE {{ postgres_readonly_user }} LOGIN PASSWORD '{{ vault_postgres_readonly_password }}'; + END IF; + END $$; + + ALTER DATABASE {{ postgres_database }} OWNER TO {{ postgres_app_user }}; + GRANT CONNECT ON DATABASE {{ postgres_database }} TO {{ postgres_readonly_user }}; + + \connect {{ postgres_database }} + + GRANT ALL ON SCHEMA public TO {{ postgres_app_user }}; + GRANT USAGE ON SCHEMA public TO {{ postgres_readonly_user }}; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO {{ postgres_readonly_user }}; + GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO {{ postgres_readonly_user }}; + + ALTER DEFAULT PRIVILEGES FOR ROLE {{ postgres_app_user }} IN SCHEMA public + GRANT SELECT ON TABLES TO {{ postgres_readonly_user }}; + + ALTER DEFAULT PRIVILEGES FOR ROLE {{ postgres_app_user }} IN SCHEMA public + GRANT SELECT ON SEQUENCES TO {{ postgres_readonly_user }}; + +- name: Write application compose file + ansible.builtin.copy: + dest: "{{ app_compose_file }}" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0644" + content: | + services: + postgres: + image: {{ app_postgres_image }} + container_name: {{ app_postgres_container_name }} + restart: {{ app_restart_policy }} + command: + - postgres + - -c + - "listen_addresses={{ app_postgres_listen_addresses }}" + - -c + - "max_connections={{ app_postgres_max_connections }}" + - -c + - "shared_buffers={{ app_postgres_shared_buffers }}" + - -c + - "log_connections={{ app_postgres_log_connections }}" + - -c + - "log_disconnections={{ app_postgres_log_disconnections }}" + environment: + POSTGRES_DB: {{ postgres_database }} + POSTGRES_USER: {{ app_postgres_bootstrap_user }} + POSTGRES_PASSWORD: {{ app_postgres_bootstrap_password }} + volumes: + - {{ app_postgres_data_dir }}:/var/lib/postgresql + - {{ app_postgres_init_dir }}:/docker-entrypoint-initdb.d:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U {{ postgres_app_user }} -d {{ postgres_database }}"] + interval: 10s + timeout: 5s + retries: 10 + + redis: + image: {{ app_redis_image }} + container_name: {{ app_redis_container_name }} + restart: {{ app_restart_policy }} + command: + - redis-server + - --appendonly + - "yes" + - --appendfsync + - everysec + - --save + - "900 1" + - --save + - "300 10" + - --save + - "60 10000" + volumes: + - {{ app_redis_data_dir }}:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 10 + + app: + build: + context: ./src + image: {{ app_image_repository }}:{{ app_image_tag }} + # Future tagged-image workflow: + # - comment out the `build:` block above + # - uncomment `pull_policy` + # - set `app_image_repository` and `app_image_tag` to the real + # registry image and release tag + # pull_policy: {{ app_image_pull_policy | default('always') }} + container_name: {{ app_container_name }} + restart: {{ app_restart_policy }} + env_file: + - .env + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + ports: + - "{{ app_bind_host }}:{{ app_bind_port }}:8000" + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000')"] + interval: {{ app_healthcheck_interval }} + timeout: {{ app_healthcheck_timeout }} + retries: {{ app_healthcheck_retries }} + +# Full provisioning flow: +# - build and start the compose project directly from the app role +# +# Deploy-only flow: +# - only sync the files here +# - `deploy.yml` will trigger `app-reload.service` after the optional migration +# hook so rebuild/restart happens through systemd +# +# If the role later switches to git-on-VM source delivery: +# - this task can stay as it is +# - the only intended source-delivery change is replacing the three local-copy +# tasks above with a git checkout/update task +# +# If the role later switches to a tagged-image deployment flow: +# - this task can still stay as it is +# - the image build step will be skipped because the compose file will point to +# a pulled image instead of a local `build:` context +- name: Build and start the application compose project + community.docker.docker_compose_v2: + project_src: "{{ app_root }}" + state: present + build: always + when: not app_deploy_only | bool diff --git a/ansible/roles/backup/defaults/main.yml b/ansible/roles/backup/defaults/main.yml new file mode 100644 index 0000000..54b1fec --- /dev/null +++ b/ansible/roles/backup/defaults/main.yml @@ -0,0 +1,35 @@ +--- +backup_script_path: /usr/local/bin/app-db-backup.sh +backup_archive_dir: "{{ app_storage_root }}/backups" +backup_env_file: /etc/app-db-backup.env +backup_systemd_service_name: app-db-backup.service +backup_systemd_timer_name: app-db-backup.timer + +backup_packages: + - awscli + +# Compose-managed PostgreSQL backup settings. +backup_docker_binary: /usr/bin/docker +backup_postgres_container_name: "{{ app_name }}-postgres" +backup_postgres_exec_user: postgres +backup_postgres_dump_user: postgres +backup_pg_dump_command: pg_dumpall + +# Real S3-compatible upload settings. +# These are the intended end-state variables for the role. +# For local-only VM learning/testing, keep the inventory override +# `backup_upload_enabled: false` in place so the script only writes compressed +# archives to `backup_archive_dir` and skips the `aws s3 cp` upload step. +backup_s3_access_key: "{{ vault_s3_access_key }}" +backup_s3_secret_key: "{{ vault_s3_secret_key }}" +backup_s3_region: us-east-1 +backup_s3_prefix: "{{ app_name }}/postgresql/{{ inventory_hostname }}" + +# Default behavior for the role is real upload enabled. +# For the current local-only setup, override this to `false` in inventory. +# Later, when a real S3-compatible bucket is ready, comment out the +# local-only inventory override so this default becomes active again. +backup_upload_enabled: true +backup_local_retention_days: "{{ (backup_retention_months | int) * 31 }}" +backup_timer_on_calendar: hourly +backup_timer_randomized_delay_sec: 5m diff --git a/ansible/roles/backup/handlers/main.yml b/ansible/roles/backup/handlers/main.yml new file mode 100644 index 0000000..ed8a18f --- /dev/null +++ b/ansible/roles/backup/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: reload systemd + ansible.builtin.systemd_service: + daemon_reload: true diff --git a/ansible/roles/backup/tasks/main.yml b/ansible/roles/backup/tasks/main.yml new file mode 100644 index 0000000..ecc289f --- /dev/null +++ b/ansible/roles/backup/tasks/main.yml @@ -0,0 +1,99 @@ +--- +- name: Install backup dependencies + ansible.builtin.apt: + name: "{{ backup_packages }}" + state: present + +- name: Ensure backup archive directory exists + ansible.builtin.file: + path: "{{ backup_archive_dir }}" + state: directory + owner: root + group: root + mode: "0750" + +- name: Write backup environment file + ansible.builtin.copy: + dest: "{{ backup_env_file }}" + owner: root + group: root + mode: "0600" + content: | + BACKUP_ARCHIVE_DIR={{ backup_archive_dir }} + BACKUP_LOCAL_RETENTION_DAYS={{ backup_local_retention_days }} + BACKUP_UPLOAD_ENABLED={{ 'true' if backup_upload_enabled | bool else 'false' }} + BACKUP_S3_BUCKET={{ backup_s3_bucket }} + BACKUP_S3_ENDPOINT={{ backup_s3_endpoint }} + BACKUP_S3_PREFIX={{ backup_s3_prefix }} + AWS_ACCESS_KEY_ID={{ backup_s3_access_key }} + AWS_SECRET_ACCESS_KEY={{ backup_s3_secret_key }} + AWS_DEFAULT_REGION={{ backup_s3_region }} + AWS_EC2_METADATA_DISABLED=true + +# Current local-only behavior: +# - the same script is used in both modes +# - when `BACKUP_UPLOAD_ENABLED=false`, it creates the compressed archive locally +# and exits before the S3 upload section +# - when real S3 upload is ready, comment out the inventory override +# `backup_upload_enabled: false` so the upload block below becomes active +- name: Install backup script + ansible.builtin.template: + src: app-db-backup.sh.j2 + dest: "{{ backup_script_path }}" + owner: root + group: root + mode: "0755" + +- name: Install backup systemd service + ansible.builtin.copy: + dest: "/etc/systemd/system/{{ backup_systemd_service_name }}" + owner: root + group: root + mode: "0644" + content: | + [Unit] + Description=Create and upload a compressed PostgreSQL backup for {{ app_name }} + Requires=docker.service + After=docker.service network-online.target + Wants=network-online.target + ConditionPathExists={{ backup_script_path }} + ConditionPathExists={{ backup_env_file }} + + [Service] + Type=oneshot + User=root + Group=root + EnvironmentFile={{ backup_env_file }} + ExecStart={{ backup_script_path }} + Nice=10 + IOSchedulingClass=best-effort + IOSchedulingPriority=7 + StandardOutput=journal + StandardError=journal + notify: reload systemd + +- name: Install backup systemd timer + ansible.builtin.copy: + dest: "/etc/systemd/system/{{ backup_systemd_timer_name }}" + owner: root + group: root + mode: "0644" + content: | + [Unit] + Description=Run {{ backup_systemd_service_name }} every hour + + [Timer] + OnCalendar={{ backup_timer_on_calendar }} + Persistent=true + RandomizedDelaySec={{ backup_timer_randomized_delay_sec }} + Unit={{ backup_systemd_service_name }} + + [Install] + WantedBy=timers.target + notify: reload systemd + +- name: Ensure backup timer is enabled and started + ansible.builtin.systemd_service: + name: "{{ backup_systemd_timer_name }}" + state: started + enabled: true diff --git a/ansible/roles/backup/templates/app-db-backup.sh.j2 b/ansible/roles/backup/templates/app-db-backup.sh.j2 new file mode 100644 index 0000000..ed073ca --- /dev/null +++ b/ansible/roles/backup/templates/app-db-backup.sh.j2 @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +set -euo pipefail + +backup_env_file="{{ backup_env_file }}" +docker_bin="{{ backup_docker_binary }}" +postgres_container_name="{{ backup_postgres_container_name }}" +postgres_exec_user="{{ backup_postgres_exec_user }}" +pg_dump_command="{{ backup_pg_dump_command }}" +postgres_dump_user="{{ backup_postgres_dump_user }}" + +# shellcheck source=/dev/null +. "$backup_env_file" + +: "${BACKUP_ARCHIVE_DIR:?}" +: "${BACKUP_LOCAL_RETENTION_DAYS:?}" +: "${BACKUP_UPLOAD_ENABLED:?}" +: "${BACKUP_S3_BUCKET:?}" +: "${BACKUP_S3_ENDPOINT:?}" +: "${BACKUP_S3_PREFIX:?}" + +timestamp="$(date -u +%Y%m%dT%H%M%SZ)" +host_name="$(hostname -s)" +archive_name="${host_name}-postgresql-${timestamp}.sql.gz" +archive_path="${BACKUP_ARCHIVE_DIR}/${archive_name}" + +mkdir -p "${BACKUP_ARCHIVE_DIR}" +umask 077 + +"$docker_bin" exec --user "$postgres_exec_user" "$postgres_container_name" \ + "$pg_dump_command" -U "$postgres_dump_user" --clean --if-exists | /bin/gzip -9 > "${archive_path}" + +test -s "${archive_path}" + +find "${BACKUP_ARCHIVE_DIR}" -type f -name '*.sql.gz' -mtime +"${BACKUP_LOCAL_RETENTION_DAYS}" -delete + +# Local-only VM mode: +# keep the archive on disk and skip any remote upload. +# +# Real S3-compatible mode: +# comment out the inventory override `backup_upload_enabled: false` +# and the script will continue into the upload block below. +if [ "${BACKUP_UPLOAD_ENABLED}" != "true" ]; then + echo "Backup upload disabled; archive kept locally at ${archive_path}" + exit 0 +fi + +# Real S3-compatible upload path. +if [ -n "${BACKUP_S3_PREFIX}" ]; then + s3_uri="s3://${BACKUP_S3_BUCKET}/${BACKUP_S3_PREFIX%/}/${archive_name}" +else + s3_uri="s3://${BACKUP_S3_BUCKET}/${archive_name}" +fi + +/usr/bin/aws --endpoint-url "${BACKUP_S3_ENDPOINT}" s3 cp "${archive_path}" "${s3_uri}" + +echo "Uploaded ${archive_path} to ${s3_uri}" \ No newline at end of file diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000..12b5f0e --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,19 @@ +--- +common_packages: + - ca-certificates + - curl + - git + - gnupg + - jq + - rsync + - unzip + +common_directories: + - path: "{{ app_root }}" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0755" + - path: "{{ app_storage_root }}" + owner: "{{ app_user }}" + group: "{{ app_group }}" + mode: "0755" diff --git a/ansible/roles/common/handlers/main.yml b/ansible/roles/common/handlers/main.yml new file mode 100644 index 0000000..fe92594 --- /dev/null +++ b/ansible/roles/common/handlers/main.yml @@ -0,0 +1,2 @@ +--- +[] diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000..17e0ab2 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +- name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + +- name: Ensure common directories exist + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + owner: "{{ item.owner }}" + group: "{{ item.group }}" + mode: "{{ item.mode }}" + loop: "{{ common_directories }}" diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000..256b052 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,28 @@ +--- +docker_conflicting_packages: + - docker.io + - docker-compose + - docker-doc + - podman-docker + - containerd + - runc + +# By default this role installs the latest stable Docker packages available from +# Docker's official Debian repository. To pin an exact Docker release, replace +# the package names below with versioned package strings discovered with +# `apt-cache madison docker-ce`, for example: +# docker-ce=5:29.4.1-1~debian.13~trixie +# docker-ce-cli=5:29.4.1-1~debian.13~trixie +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + +docker_apt_keyring_dir: /etc/apt/keyrings +docker_apt_keyring_file: "{{ docker_apt_keyring_dir }}/docker.asc" +docker_apt_sources_file: /etc/apt/sources.list.d/docker.sources + +docker_manage_group_membership: false +docker_users: [] diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000..1a5058d --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000..8a3fe1f --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- name: Remove conflicting Docker packages + ansible.builtin.apt: + name: "{{ docker_conflicting_packages }}" + state: absent + +- name: Ensure Docker apt keyring directory exists + ansible.builtin.file: + path: "{{ docker_apt_keyring_dir }}" + state: directory + owner: root + group: root + mode: "0755" + +- name: Download Docker apt GPG key + ansible.builtin.get_url: + url: https://download.docker.com/linux/debian/gpg + dest: "{{ docker_apt_keyring_file }}" + owner: root + group: root + mode: "0644" + +- name: Get Debian package architecture + ansible.builtin.command: dpkg --print-architecture + register: docker_dpkg_architecture + changed_when: false + +- name: Remove incorrectly named Docker sources file + ansible.builtin.file: + path: /etc/apt/sources.list.d/docker.sources" + state: absent + +- name: Configure Docker apt repository + ansible.builtin.copy: + dest: "{{ docker_apt_sources_file }}" + owner: root + group: root + mode: "0644" + content: | + Types: deb + URIs: https://download.docker.com/linux/debian + Suites: {{ ansible_facts['distribution_release'] }} + Components: stable + Architectures: {{ docker_dpkg_architecture.stdout }} + Signed-By: {{ docker_apt_keyring_file }} + +- name: Update apt cache after adding Docker repository + ansible.builtin.apt: + update_cache: true + +- name: Install Docker Engine and plugins + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + +- name: Ensure Docker service is enabled and started + ansible.builtin.service: + name: docker + state: started + enabled: true + +- name: Optionally add users to docker group + ansible.builtin.user: + name: "{{ item }}" + groups: docker + append: true + loop: "{{ docker_users }}" + when: docker_manage_group_membership | bool diff --git a/ansible/roles/firewall/defaults/main.yml b/ansible/roles/firewall/defaults/main.yml new file mode 100644 index 0000000..5def25b --- /dev/null +++ b/ansible/roles/firewall/defaults/main.yml @@ -0,0 +1,8 @@ +--- +firewall_default_incoming_policy: deny +firewall_default_outgoing_policy: allow +firewall_allowed_tcp_ports: + - 22 + - 80 + - 443 +firewall_logging: "on" diff --git a/ansible/roles/firewall/handlers/main.yml b/ansible/roles/firewall/handlers/main.yml new file mode 100644 index 0000000..fe92594 --- /dev/null +++ b/ansible/roles/firewall/handlers/main.yml @@ -0,0 +1,2 @@ +--- +[] diff --git a/ansible/roles/firewall/tasks/main.yml b/ansible/roles/firewall/tasks/main.yml new file mode 100644 index 0000000..293ed9c --- /dev/null +++ b/ansible/roles/firewall/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Ensure ufw is installed + ansible.builtin.package: + name: ufw + state: present + +- name: Set default incoming policy + community.general.ufw: + direction: incoming + policy: "{{ firewall_default_incoming_policy }}" + +- name: Set default outgoing policy + community.general.ufw: + direction: outgoing + policy: "{{ firewall_default_outgoing_policy }}" + +- name: Allow required TCP ports + community.general.ufw: + rule: allow + port: "{{ item }}" + proto: tcp + loop: "{{ firewall_allowed_tcp_ports }}" + +- name: Enable ufw logging + community.general.ufw: + logging: "{{ firewall_logging }}" + +- name: Enable ufw + community.general.ufw: + state: enabled diff --git a/ansible/roles/journald/defaults/main.yml b/ansible/roles/journald/defaults/main.yml new file mode 100644 index 0000000..9807b57 --- /dev/null +++ b/ansible/roles/journald/defaults/main.yml @@ -0,0 +1,8 @@ +--- +journald_storage: persistent +journald_max_retention_sec: 15552000 +journald_max_file_sec: 1day + +journald_config_dir: /etc/systemd/journald.conf.d +journald_dropin_file: "{{ journald_config_dir }}/99-ansible.conf" +journald_persistent_dir: /var/log/journal diff --git a/ansible/roles/journald/handlers/main.yml b/ansible/roles/journald/handlers/main.yml new file mode 100644 index 0000000..3477c9c --- /dev/null +++ b/ansible/roles/journald/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart journald + ansible.builtin.service: + name: systemd-journald + state: restarted diff --git a/ansible/roles/journald/tasks/main.yml b/ansible/roles/journald/tasks/main.yml new file mode 100644 index 0000000..1a1078d --- /dev/null +++ b/ansible/roles/journald/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Ensure persistent journal directory exists + ansible.builtin.file: + path: "{{ journald_persistent_dir }}" + state: directory + owner: root + group: systemd-journal + mode: "2755" + +- name: Ensure journald drop-in directory exists + ansible.builtin.file: + path: "{{ journald_config_dir }}" + state: directory + owner: root + group: root + mode: "0755" + +- name: Configure journald retention and file rotation + ansible.builtin.copy: + dest: "{{ journald_dropin_file }}" + owner: root + group: root + mode: "0644" + content: | + [Journal] + Storage={{ journald_storage }} + MaxRetentionSec={{ journald_max_retention_sec }} + MaxFileSec={{ journald_max_file_sec }} + notify: restart journald diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml new file mode 100644 index 0000000..3af3ff2 --- /dev/null +++ b/ansible/roles/nginx/defaults/main.yml @@ -0,0 +1,33 @@ +--- +nginx_package_name: nginx +nginx_service_name: nginx + +nginx_apt_prerequisite_packages: + - ca-certificates + - curl + - gnupg2 + - debian-archive-keyring + - openssl + +nginx_apt_keyring_dir: /usr/share/keyrings +nginx_apt_keyring_file: "{{ nginx_apt_keyring_dir }}/nginx-archive-keyring.gpg" +nginx_apt_signing_key_tmp: /tmp/nginx_signing.key +nginx_apt_repository_file: /etc/apt/sources.list.d/nginx.list +nginx_apt_preference_file: /etc/apt/preferences.d/99nginx + +nginx_site_name: "{{ app_name }}" +nginx_server_name: "{{ app_domains | first }}" +nginx_upstream_host: "{{ app_bind_host }}" +nginx_upstream_port: "{{ app_bind_port }}" +nginx_stub_status_allow: + - 127.0.0.1 + - ::1 + +nginx_ssl_dir: /etc/nginx/ssl +nginx_ssl_private_key: "{{ nginx_ssl_dir }}/{{ nginx_site_name }}.key" +nginx_ssl_certificate: "{{ nginx_ssl_dir }}/{{ nginx_site_name }}.crt" + +nginx_conf_d_dir: /etc/nginx/conf.d +nginx_site_config_file: "{{ nginx_conf_d_dir }}/{{ nginx_site_name }}.conf" +nginx_default_conf_file: "{{ nginx_conf_d_dir }}/default.conf" +nginx_default_site_file: /etc/nginx/sites-enabled/default diff --git a/ansible/roles/nginx/handlers/main.yml b/ansible/roles/nginx/handlers/main.yml new file mode 100644 index 0000000..f974f4b --- /dev/null +++ b/ansible/roles/nginx/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: reload nginx + ansible.builtin.service: + name: nginx + state: reloaded diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..922d8c2 --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,155 @@ +--- +- name: Install Nginx repository prerequisites + ansible.builtin.apt: + name: "{{ nginx_apt_prerequisite_packages }}" + state: present + +- name: Ensure Nginx keyring directory exists + ansible.builtin.file: + path: "{{ nginx_apt_keyring_dir }}" + state: directory + owner: root + group: root + mode: "0755" + +- name: Download Nginx signing key + ansible.builtin.get_url: + url: https://nginx.org/keys/nginx_signing.key + dest: "{{ nginx_apt_signing_key_tmp }}" + owner: root + group: root + mode: "0644" + register: nginx_signing_key_download + +- name: Check whether Nginx keyring already exists + ansible.builtin.stat: + path: "{{ nginx_apt_keyring_file }}" + register: nginx_keyring_stat + +- name: Convert Nginx signing key to keyring format + ansible.builtin.command: + cmd: "gpg --dearmor --yes --output {{ nginx_apt_keyring_file }} {{ nginx_apt_signing_key_tmp }}" + when: nginx_signing_key_download.changed or not nginx_keyring_stat.stat.exists + +- name: Configure Nginx apt repository + ansible.builtin.copy: + dest: "{{ nginx_apt_repository_file }}" + owner: root + group: root + mode: "0644" + content: | + deb [signed-by={{ nginx_apt_keyring_file }}] https://nginx.org/packages/debian {{ ansible_facts['distribution_release'] }} nginx + +- name: Pin nginx.org packages above distro packages + ansible.builtin.copy: + dest: "{{ nginx_apt_preference_file }}" + owner: root + group: root + mode: "0644" + content: | + Package: * + Pin: origin nginx.org + Pin: release o=nginx + Pin-Priority: 900 + +- name: Update apt cache after adding Nginx repository + ansible.builtin.apt: + update_cache: true + +- name: Install Nginx + ansible.builtin.apt: + name: "{{ nginx_package_name }}" + state: present + +- name: Ensure Nginx SSL directory exists + ansible.builtin.file: + path: "{{ nginx_ssl_dir }}" + state: directory + owner: root + group: root + mode: "0755" + +- name: Generate self-signed certificate for local VM testing + ansible.builtin.command: + cmd: > + openssl req -x509 -nodes -newkey rsa:2048 + -keyout {{ nginx_ssl_private_key }} + -out {{ nginx_ssl_certificate }} + -days 365 + -subj /CN={{ nginx_server_name }} + -addext subjectAltName=DNS:{{ nginx_server_name }} + args: + creates: "{{ nginx_ssl_certificate }}" + notify: reload nginx + +- name: Set permissions on Nginx private key + ansible.builtin.file: + path: "{{ nginx_ssl_private_key }}" + owner: root + group: root + mode: "0600" + +- name: Remove default Nginx conf.d site if present + ansible.builtin.file: + path: "{{ nginx_default_conf_file }}" + state: absent + notify: reload nginx + +- name: Remove default Debian Nginx site if present + ansible.builtin.file: + path: "{{ nginx_default_site_file }}" + state: absent + notify: reload nginx + +- name: Install Nginx reverse proxy configuration + ansible.builtin.copy: + dest: "{{ nginx_site_config_file }}" + owner: root + group: root + mode: "0644" + content: | + server { + listen 80; + server_name {{ nginx_server_name }}; + + location = /nginx_status { + stub_status; + access_log off; + allow 127.0.0.1; + allow ::1; + deny all; + } + + location / { + return 301 https://$host$request_uri; + } + } + + server { + listen 443 ssl; + http2 on; + server_name {{ nginx_server_name }}; + + ssl_certificate {{ nginx_ssl_certificate }}; + ssl_certificate_key {{ nginx_ssl_private_key }}; + + location / { + proxy_pass http://{{ nginx_upstream_host }}:{{ nginx_upstream_port }}; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto https; + proxy_http_version 1.1; + } + } + notify: reload nginx + +- name: Validate Nginx configuration + ansible.builtin.command: nginx -t + changed_when: false + +- name: Ensure Nginx service is enabled and started + ansible.builtin.service: + name: "{{ nginx_service_name }}" + state: started + enabled: true diff --git a/ansible/roles/sshd/defaults/main.yml b/ansible/roles/sshd/defaults/main.yml new file mode 100644 index 0000000..1f04a8f --- /dev/null +++ b/ansible/roles/sshd/defaults/main.yml @@ -0,0 +1,7 @@ +--- +sshd_permit_root_login: "no" +sshd_password_authentication: "no" +sshd_kbd_interactive_authentication: "no" +sshd_pubkey_authentication: "yes" +sshd_allow_users: + - "{{ deploy_user }}" diff --git a/ansible/roles/sshd/handlers/main.yml b/ansible/roles/sshd/handlers/main.yml new file mode 100644 index 0000000..dc429e5 --- /dev/null +++ b/ansible/roles/sshd/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart sshd + ansible.builtin.service: + name: ssh + state: reloaded diff --git a/ansible/roles/sshd/tasks/main.yml b/ansible/roles/sshd/tasks/main.yml new file mode 100644 index 0000000..bced2a7 --- /dev/null +++ b/ansible/roles/sshd/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Ensure OpenSSH server is installed + ansible.builtin.package: + name: openssh-server + state: present + +- name: Disable root SSH login + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?PermitRootLogin\s+' + line: "PermitRootLogin {{ sshd_permit_root_login }}" + validate: /usr/sbin/sshd -t -f %s + notify: restart sshd + +- name: Disable SSH password authentication + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?PasswordAuthentication\s+' + line: "PasswordAuthentication {{ sshd_password_authentication }}" + validate: /usr/sbin/sshd -t -f %s + notify: restart sshd + +- name: Disable keyboard-interactive authentication + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?KbdInteractiveAuthentication\s+' + line: "KbdInteractiveAuthentication {{ sshd_kbd_interactive_authentication }}" + validate: /usr/sbin/sshd -t -f %s + notify: restart sshd + +- name: Ensure public key authentication is enabled + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?PubkeyAuthentication\s+' + line: "PubkeyAuthentication {{ sshd_pubkey_authentication }}" + validate: /usr/sbin/sshd -t -f %s + notify: restart sshd + +- name: Restrict allowed SSH users + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: '^#?AllowUsers\s+' + line: "AllowUsers {{ sshd_allow_users | join(' ') }}" + validate: /usr/sbin/sshd -t -f %s + notify: restart sshd diff --git a/ansible/roles/systemd_reload/defaults/main.yml b/ansible/roles/systemd_reload/defaults/main.yml new file mode 100644 index 0000000..14b8c7c --- /dev/null +++ b/ansible/roles/systemd_reload/defaults/main.yml @@ -0,0 +1,7 @@ +--- +app_reload_service_name: app-reload.service +app_reload_script_path: /usr/local/bin/app-reload.sh +app_reload_working_directory: "{{ app_root }}" +app_reload_compose_file: "{{ app_root }}/docker-compose.yml" +app_reload_compose_service: app +app_reload_timeout_start_sec: 300 diff --git a/ansible/roles/systemd_reload/handlers/main.yml b/ansible/roles/systemd_reload/handlers/main.yml new file mode 100644 index 0000000..ed8a18f --- /dev/null +++ b/ansible/roles/systemd_reload/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: reload systemd + ansible.builtin.systemd_service: + daemon_reload: true diff --git a/ansible/roles/systemd_reload/tasks/main.yml b/ansible/roles/systemd_reload/tasks/main.yml new file mode 100644 index 0000000..9bca581 --- /dev/null +++ b/ansible/roles/systemd_reload/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: Install application reload helper script + ansible.builtin.template: + src: app-reload.sh.j2 + dest: "{{ app_reload_script_path }}" + owner: root + group: root + mode: "0755" + +- name: Install application reload systemd unit + ansible.builtin.copy: + dest: "/etc/systemd/system/{{ app_reload_service_name }}" + owner: root + group: root + mode: "0644" + content: | + [Unit] + Description=Reload the {{ app_name }} application container + Documentation={{ app_repo_url }} + Requires=docker.service + After=docker.service network-online.target + Wants=network-online.target + ConditionPathExists={{ app_reload_compose_file }} + + [Service] + Type=oneshot + RemainAfterExit=yes + WorkingDirectory={{ app_reload_working_directory }} + ExecStart={{ app_reload_script_path }} + ExecReload={{ app_reload_script_path }} + TimeoutStartSec={{ app_reload_timeout_start_sec }} + StandardOutput=journal + StandardError=journal + notify: reload systemd diff --git a/ansible/roles/systemd_reload/templates/app-reload.sh.j2 b/ansible/roles/systemd_reload/templates/app-reload.sh.j2 new file mode 100644 index 0000000..5e447ca --- /dev/null +++ b/ansible/roles/systemd_reload/templates/app-reload.sh.j2 @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -euo pipefail + +working_directory="{{ app_reload_working_directory }}" +compose_file="{{ app_reload_compose_file }}" +compose_service="{{ app_reload_compose_service }}" + +cd "$working_directory" + +# Validate the compose file before trying to roll the app service. + /usr/bin/docker compose -f "$compose_file" config -q + +# Future hook: a migration command could be added here before the app +# container is rebuilt. +exec /usr/bin/docker compose -f "$compose_file" up -d --build --no-deps "$compose_service" \ No newline at end of file diff --git a/ansible/roles/users/defaults/main.yml b/ansible/roles/users/defaults/main.yml new file mode 100644 index 0000000..685029d --- /dev/null +++ b/ansible/roles/users/defaults/main.yml @@ -0,0 +1,12 @@ +--- +deploy_user: "{{ app_user }}" +deploy_group: "{{ app_group }}" +deploy_shell: /bin/bash +deploy_home: "/home/{{ deploy_user }}" +deploy_comment: Deployment user +deploy_secondary_groups: + - sudo +deploy_authorized_keys: [] +deploy_password_locked: true +deploy_sudo_nopasswd: true +deploy_sudoers_filename: "{{ deploy_user }}" diff --git a/ansible/roles/users/files/ssh_keys/README.md b/ansible/roles/users/files/ssh_keys/README.md new file mode 100644 index 0000000..240b229 --- /dev/null +++ b/ansible/roles/users/files/ssh_keys/README.md @@ -0,0 +1,3 @@ +Place one or more public keys here as `*.pub` files if you want the role to +load them from disk instead of defining `deploy_authorized_keys` in inventory +variables. diff --git a/ansible/roles/users/files/ssh_keys/deploy.pub b/ansible/roles/users/files/ssh_keys/deploy.pub new file mode 100644 index 0000000..8d9af70 --- /dev/null +++ b/ansible/roles/users/files/ssh_keys/deploy.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIODEx3ZSCqQfyiS7E7Cemkzqd/RAbMgKqhrRx0IqvFzr cloudops-test diff --git a/ansible/roles/users/handlers/main.yml b/ansible/roles/users/handlers/main.yml new file mode 100644 index 0000000..fe92594 --- /dev/null +++ b/ansible/roles/users/handlers/main.yml @@ -0,0 +1,2 @@ +--- +[] diff --git a/ansible/roles/users/tasks/main.yml b/ansible/roles/users/tasks/main.yml new file mode 100644 index 0000000..dcd7230 --- /dev/null +++ b/ansible/roles/users/tasks/main.yml @@ -0,0 +1,91 @@ +--- +- name: Ensure sudo is installed + ansible.builtin.package: + name: sudo + state: present + +- name: Discover deploy SSH public keys from the role files directory + ansible.builtin.set_fact: + deploy_authorized_key_files: >- + {{ + query( + 'ansible.builtin.fileglob', + role_path ~ '/' ~ ssh_allowed_public_keys_dir ~ '/*.pub' + ) + }} + when: deploy_authorized_keys | length == 0 + +- name: Read deploy SSH public keys from files + ansible.builtin.set_fact: + deploy_authorized_keys_from_files: >- + {{ + (deploy_authorized_keys_from_files | default([])) + + [lookup('ansible.builtin.file', item) | trim] + }} + loop: "{{ deploy_authorized_key_files | default([]) }}" + when: deploy_authorized_keys | length == 0 + +- name: Build final deploy SSH public key list + ansible.builtin.set_fact: + deploy_authorized_keys_effective: >- + {{ + ( + deploy_authorized_keys + if deploy_authorized_keys | length > 0 + else deploy_authorized_keys_from_files | default([]) + ) + | map('trim') + | reject('equalto', '') + | list + }} + +- name: Require at least one deploy SSH public key + ansible.builtin.assert: + that: + - deploy_authorized_keys_effective | length > 0 + fail_msg: >- + Provide deploy_authorized_keys in inventory vars or add one or more + .pub files under roles/users/files/ssh_keys/. + +- name: Ensure the deploy primary group exists + ansible.builtin.group: + name: "{{ deploy_group }}" + state: present + +- name: Ensure the deploy user exists + ansible.builtin.user: + name: "{{ deploy_user }}" + comment: "{{ deploy_comment }}" + group: "{{ deploy_group }}" + groups: "{{ deploy_secondary_groups | join(',') }}" + append: true + shell: "{{ deploy_shell }}" + home: "{{ deploy_home }}" + create_home: true + password_lock: "{{ deploy_password_locked | bool }}" + state: present + +- name: Ensure the deploy .ssh directory exists + ansible.builtin.file: + path: "{{ deploy_home }}/.ssh" + state: directory + owner: "{{ deploy_user }}" + group: "{{ deploy_group }}" + mode: "0700" + +- name: Install deploy SSH authorized keys + ansible.posix.authorized_key: + user: "{{ deploy_user }}" + key: "{{ item }}" + state: present + manage_dir: false + loop: "{{ deploy_authorized_keys_effective }}" + +- name: Configure sudoers access for deploy + ansible.builtin.template: + src: deploy-sudoers.j2 + dest: "/etc/sudoers.d/{{ deploy_sudoers_filename }}" + owner: root + group: root + mode: "0440" + validate: "visudo -cf %s" diff --git a/ansible/roles/users/templates/deploy-sudoers.j2 b/ansible/roles/users/templates/deploy-sudoers.j2 new file mode 100644 index 0000000..6d37c2c --- /dev/null +++ b/ansible/roles/users/templates/deploy-sudoers.j2 @@ -0,0 +1 @@ +{{ deploy_user }} ALL=(ALL) {% if deploy_sudo_nopasswd %}NOPASSWD:{% endif %}ALL diff --git a/ansible/site.yml b/ansible/site.yml new file mode 100644 index 0000000..b6edf6e --- /dev/null +++ b/ansible/site.yml @@ -0,0 +1,17 @@ +--- +- name: Configure application hosts + hosts: app + remote_user: deploy + become: true + + roles: + - common + - users + - sshd + - firewall + - journald + - docker + - app + - systemd_reload + - nginx + - backup diff --git a/app/server.py b/app/server.py index 608c607..4abce8e 100644 --- a/app/server.py +++ b/app/server.py @@ -1,10 +1,45 @@ -import http.server -import socketserver +import json +import os +from datetime import datetime, timezone +from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer -PORT = 8000 +PORT = int(os.getenv("APP_PORT", "8000")) -Handler = http.server.SimpleHTTPRequestHandler -with socketserver.TCPServer(("", PORT), Handler) as httpd: - print("serving at port", PORT) - httpd.serve_forever() +def emit_log(level, **fields): + payload = { + "ts": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"), + "level": level, + **fields, + } + print(json.dumps(payload), flush=True) + + +class JsonRequestHandler(SimpleHTTPRequestHandler): + def log_request(self, code="-", size="-"): + emit_log( + "info", + event="http_request", + client_ip=self.client_address[0], + method=getattr(self, "command", None), + path=getattr(self, "path", None), + status=int(code) if str(code).isdigit() else code, + size=int(size) if str(size).isdigit() else size, + user_agent=self.headers.get("User-Agent", ""), + ) + + def log_error(self, fmt, *args): + emit_log( + "error", + event="http_error", + client_ip=self.client_address[0] if self.client_address else None, + method=getattr(self, "command", None), + path=getattr(self, "path", None), + message=fmt % args, + ) + + +if __name__ == "__main__": + emit_log("info", event="startup", port=PORT, message="server starting") + with ThreadingHTTPServer(("0.0.0.0", PORT), JsonRequestHandler) as httpd: + httpd.serve_forever() \ No newline at end of file