diff --git a/.github/workflows/brakeman-analysis.yml b/.github/workflows/brakeman-analysis.yml index d7c67c592..da90e3b1e 100644 --- a/.github/workflows/brakeman-analysis.yml +++ b/.github/workflows/brakeman-analysis.yml @@ -1,44 +1,44 @@ -# This workflow integrates Brakeman with GitHub's Code Scanning feature -# Brakeman is a static analysis security vulnerability scanner for Ruby on Rails applications +# # This workflow integrates Brakeman with GitHub's Code Scanning feature +# # Brakeman is a static analysis security vulnerability scanner for Ruby on Rails applications -name: Brakeman Scan +# name: Brakeman Scan -# This section configures the trigger for the workflow. Feel free to customize depending on your convention -on: - push: - branches: [ "master", "main", "release" ] - pull_request: - branches: [ "master", "main", "release" ] +# # This section configures the trigger for the workflow. Feel free to customize depending on your convention +# on: +# push: +# branches: [ "master", "main", "release" ] +# pull_request: +# branches: [ "master", "main", "release" ] -jobs: - brakeman-scan: - name: Brakeman Scan - runs-on: ubuntu-latest - steps: - # Checkout the repository to the GitHub Actions runner - - name: Checkout - uses: actions/checkout@v3 +# jobs: +# brakeman-scan: +# name: Brakeman Scan +# runs-on: ubuntu-latest +# steps: +# # Checkout the repository to the GitHub Actions runner +# - name: Checkout +# uses: actions/checkout@v3 - # Customize the ruby version depending on your needs - - name: Setup Ruby - uses: ruby/setup-ruby@v1 - with: - ruby-version: 3.1.0 +# # Customize the ruby version depending on your needs +# - name: Setup Ruby +# uses: ruby/setup-ruby@v1 +# with: +# ruby-version: 3.1.0 - - name: Setup Brakeman - env: - BRAKEMAN_VERSION: '5.3.1' # SARIF support is provided in Brakeman version 4.10+ - run: | - gem install brakeman --version $BRAKEMAN_VERSION +# - name: Setup Brakeman +# env: +# BRAKEMAN_VERSION: '5.3.1' # SARIF support is provided in Brakeman version 4.10+ +# run: | +# gem install brakeman --version $BRAKEMAN_VERSION - # Execute Brakeman CLI and generate a SARIF output with the security issues identified during the analysis - - name: Scan - continue-on-error: true - run: | - brakeman -f sarif -o output.sarif.json . +# # Execute Brakeman CLI and generate a SARIF output with the security issues identified during the analysis +# - name: Scan +# continue-on-error: true +# run: | +# brakeman -f sarif -o output.sarif.json . - # Upload the SARIF file generated in the previous step - - name: Upload SARIF - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: output.sarif.json +# # Upload the SARIF file generated in the previous step +# - name: Upload SARIF +# uses: github/codeql-action/upload-sarif@v2 +# with: +# sarif_file: output.sarif.json diff --git a/.github/workflows/ci-features.yml b/.github/workflows/ci-features.yml index 6faf7071d..e3d93b1d0 100644 --- a/.github/workflows/ci-features.yml +++ b/.github/workflows/ci-features.yml @@ -1,33 +1,33 @@ -name: CI Feature Branches -on: - push: - branches-ignore: - - 'main' - - 'release' - - 'integration' +# name: CI Feature Branches +# on: +# push: +# branches-ignore: +# - 'main' +# - 'release' +# - 'integration' -jobs: - prep-workflow: - name: Prepping Workflow - runs-on: ubuntu-latest - steps: - - name: Check Runner Status - run: echo "Runner is ready to go" +# jobs: +# prep-workflow: +# name: Prepping Workflow +# runs-on: ubuntu-latest +# steps: +# - name: Check Runner Status +# run: echo "Runner is ready to go" - docker-compose-build: - name: Docker Compose Build - needs: prep-workflow - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build Docker Compose - run: | - docker compose build +# docker-compose-build: +# name: Docker Compose Build +# needs: prep-workflow +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - name: Build Docker Compose +# run: | +# docker compose build - ready-for-pr: - name: Ready for PR - needs: docker-compose-build - runs-on: ubuntu-latest - steps: - - name: Ready for Integration - run: echo "Ready for Integration, Open PR to merge feature branch to integration branch" +# ready-for-pr: +# name: Ready for PR +# needs: docker-compose-build +# runs-on: ubuntu-latest +# steps: +# - name: Ready for Integration +# run: echo "Ready for Integration, Open PR to merge feature branch to integration branch" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d95481e32..1b82f6d63 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,70 +1,70 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" +# # For most projects, this workflow file will not need changing; you simply need +# # to commit it to your repository. +# # +# # You may wish to alter this file to override the set of languages analyzed, +# # or to provide custom queries or build logic. +# # +# # ******** NOTE ******** +# # We have attempted to detect the languages in your repository. Please check +# # the `language` matrix defined below to confirm you have the correct set of +# # supported CodeQL languages. +# # +# name: "CodeQL" -on: - push: - branches: [ "master", "main", "release" ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ "master", "release" ] - schedule: - - cron: '28 9 * * 4' +# on: +# push: +# branches: [ "master", "main", "release" ] +# pull_request: +# # The branches below must be a subset of the branches above +# branches: [ "master", "release" ] +# schedule: +# - cron: '28 9 * * 4' -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write +# jobs: +# analyze: +# name: Analyze +# runs-on: ubuntu-latest +# permissions: +# actions: read +# contents: read +# security-events: write - strategy: - fail-fast: false - matrix: - language: [ 'javascript', 'ruby' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://git.io/codeql-language-support +# strategy: +# fail-fast: false +# matrix: +# language: [ 'javascript', 'ruby' ] +# # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] +# # Learn more about CodeQL language support at https://git.io/codeql-language-support - steps: - - name: Checkout repository - uses: actions/checkout@v2 +# steps: +# - name: Checkout repository +# uses: actions/checkout@v2 - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main +# # Initializes the CodeQL tools for scanning. +# - name: Initialize CodeQL +# uses: github/codeql-action/init@v2 +# with: +# languages: ${{ matrix.language }} +# # If you wish to specify custom queries, you can do so here or in a config file. +# # By default, queries listed here will override any specified in a config file. +# # Prefix the list here with "+" to use these queries and those in the config file. +# # queries: ./path/to/local/query, your-org/your-repo/queries@main - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 +# # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). +# # If this step fails, then you should remove it and run the build manually (see below) +# - name: Autobuild +# uses: github/codeql-action/autobuild@v2 - # â„šī¸ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl +# # â„šī¸ Command-line programs to run using the OS shell. +# # 📚 https://git.io/JvXDl - # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language +# # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines +# # and modify them (or add more) to build your code if your project +# # uses a compiled language - #- run: | - # make bootstrap - # make release +# #- run: | +# # make bootstrap +# # make release - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 +# - name: Perform CodeQL Analysis +# uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml index 664213803..8363d5460 100644 --- a/.github/workflows/e2e-tests.yml +++ b/.github/workflows/e2e-tests.yml @@ -1,57 +1,57 @@ -name: e2e-tests -on: [push] -jobs: - cypress-run: - name: "Cypress run" - runs-on: ubuntu-latest - strategy: - # when one test fails, DO NOT cancel the other - # containers, because this will kill Cypress processes - # leaving the Dashboard hanging ... - # https://github.com/cypress-io/github-action/issues/48 - fail-fast: false - matrix: - # run 4 copies of the current job in parallel - containers: [1, 2, 3] - node-version: [14.18.x] - services: - db: - env: - MYSQL_ALLOW_EMPTY_PASSWORD: yes - image: mysql:8.0 - ports: - - 3306 +# name: e2e-tests +# on: [push] +# jobs: +# cypress-run: +# name: "Cypress run" +# runs-on: ubuntu-latest +# strategy: +# # when one test fails, DO NOT cancel the other +# # containers, because this will kill Cypress processes +# # leaving the Dashboard hanging ... +# # https://github.com/cypress-io/github-action/issues/48 +# fail-fast: false +# matrix: +# # run 4 copies of the current job in parallel +# containers: [1, 2, 3] +# node-version: [14.18.x] +# services: +# db: +# env: +# MYSQL_ALLOW_EMPTY_PASSWORD: yes +# image: mysql:8.0 +# ports: +# - 3306 - steps: - - name: Checkout - uses: actions/checkout@v3 +# steps: +# - name: Checkout +# uses: actions/checkout@v3 - - name: Set up Ruby - uses: ruby/setup-ruby@359bebbc29cbe6c87da6bc9ea3bc930432750108 - with: - ruby-version: 3.1.0 - bundler-cache: true +# - name: Set up Ruby +# uses: ruby/setup-ruby@359bebbc29cbe6c87da6bc9ea3bc930432750108 +# with: +# ruby-version: 3.1.0 +# bundler-cache: true - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} +# - name: Use Node.js ${{ matrix.node-version }} +# uses: actions/setup-node@v1 +# with: +# node-version: ${{ matrix.node-version }} - - name: Start rails server and run e2e test - env: - RAILS_ENV: test - MPATH_DATABASE_USERNAME: root - MPATH_DATABASE_PASSWORD: root - MPATH_DATABASE_HOST: 127.0.0.1 - CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} - run: | - sudo service mysql start - gem install bundler - bundle install --jobs 4 --retry 3 - bundle exec rails db:create - bundle exec rails db:migrate RAILS_ENV=test - bundle exec rails db:seed RAILS_ENV=test - bundle exec rails assets:precompile RAILS_ENV=test - bundle exec rails server -e test -p 3000 -d - yarn cypress run --record --parallel --project ./spec --group "Parallel machines" \ No newline at end of file +# - name: Start rails server and run e2e test +# env: +# RAILS_ENV: test +# MPATH_DATABASE_USERNAME: root +# MPATH_DATABASE_PASSWORD: root +# MPATH_DATABASE_HOST: 127.0.0.1 +# CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} +# run: | +# sudo service mysql start +# gem install bundler +# bundle install --jobs 4 --retry 3 +# bundle exec rails db:create +# bundle exec rails db:migrate RAILS_ENV=test +# bundle exec rails db:seed RAILS_ENV=test +# bundle exec rails assets:precompile RAILS_ENV=test +# bundle exec rails server -e test -p 3000 -d +# yarn cypress run --record --parallel --project ./spec --group "Parallel machines" \ No newline at end of file diff --git a/.github/workflows/ecr-publish.yml b/.github/workflows/ecr-publish.yml index 5073c76d0..833240cad 100644 --- a/.github/workflows/ecr-publish.yml +++ b/.github/workflows/ecr-publish.yml @@ -26,10 +26,11 @@ jobs: - name: Build and push Docker image run: | - # Build image from docker/app/Dockerfile + # Build image docker build \ -f docker/app/Dockerfile \ -t ${{ secrets.AWS_ACCOUNT_ID }}.dkr.ecr.${{ secrets.AWS_REGION }}.amazonaws.com/microhealthllc/bo:latest \ + --no-cache \ docker/app # Push image @@ -37,8 +38,10 @@ jobs: - name: Force ECS Service Update run: | + set -x aws ecs update-service \ - --cluster mpath-Production \ - --service mpath-service \ + --cluster mpath-bo \ + --service mpath-app-bo \ --force-new-deployment \ - --region ${{ secrets.AWS_REGION }} + --region ${{ secrets.AWS_REGION }} \ + --output table diff --git a/.github/workflows/node.js.yml b/.github/workflows/node.js.yml index 7b037418e..395527637 100644 --- a/.github/workflows/node.js.yml +++ b/.github/workflows/node.js.yml @@ -1,27 +1,27 @@ -# This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions +# # This workflow will do a clean install of node dependencies, build the source code and run tests across different versions of node +# # For more information see: https://help.github.com/actions/language-and-framework-guides/using-nodejs-with-github-actions -name: Node.js CI +# name: Node.js CI -on: [push] +# on: [push] -jobs: - build: +# jobs: +# build: - runs-on: ubuntu-latest +# runs-on: ubuntu-latest - strategy: - matrix: - node-version: [14.18.x] +# strategy: +# matrix: +# node-version: [14.18.x] - steps: - - uses: actions/checkout@v2 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Install dependencies - run: yarn - # - run: npm ci - # - run: npm run build --if-present - # - run: npm test +# steps: +# - uses: actions/checkout@v2 +# - name: Use Node.js ${{ matrix.node-version }} +# uses: actions/setup-node@v1 +# with: +# node-version: ${{ matrix.node-version }} +# - name: Install dependencies +# run: yarn +# # - run: npm ci +# # - run: npm run build --if-present +# # - run: npm test diff --git a/.github/workflows/ossar-analysis.yml b/.github/workflows/ossar-analysis.yml index 5d83b8ac1..3be8ed9fa 100644 --- a/.github/workflows/ossar-analysis.yml +++ b/.github/workflows/ossar-analysis.yml @@ -1,49 +1,49 @@ -# This workflow integrates a collection of open source static analysis tools -# with GitHub code scanning. For documentation, or to provide feedback, visit -# https://github.com/github/ossar-action -name: OSSAR +# # This workflow integrates a collection of open source static analysis tools +# # with GitHub code scanning. For documentation, or to provide feedback, visit +# # https://github.com/github/ossar-action +# name: OSSAR -on: - push: - pull_request: +# on: +# push: +# pull_request: -jobs: - OSSAR-Scan: - # OSSAR runs on windows-latest. - # ubuntu-latest and macos-latest support coming soon - runs-on: windows-latest +# jobs: +# OSSAR-Scan: +# # OSSAR runs on windows-latest. +# # ubuntu-latest and macos-latest support coming soon +# runs-on: windows-latest - steps: - # Checkout your code repository to scan - - name: Checkout repository - uses: actions/checkout@v2 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 +# steps: +# # Checkout your code repository to scan +# - name: Checkout repository +# uses: actions/checkout@v2 +# with: +# # We must fetch at least the immediate parents so that if this is +# # a pull request then we can checkout the head. +# fetch-depth: 2 - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} +# # If this run was triggered by a pull request event, then checkout +# # the head of the pull request instead of the merge commit. +# - run: git checkout HEAD^2 +# if: ${{ github.event_name == 'pull_request' }} - # Ensure a compatible version of dotnet is installed. - # The [Microsoft Security Code Analysis CLI](https://aka.ms/mscadocs) is built with dotnet v3.1.201. - # A version greater than or equal to v3.1.201 of dotnet must be installed on the agent in order to run this action. - # Remote agents already have a compatible version of dotnet installed and this step may be skipped. - # For local agents, ensure dotnet version 3.1.201 or later is installed by including this action: - # - name: Install .NET - # uses: actions/setup-dotnet@v1 - # with: - # dotnet-version: '3.1.x' +# # Ensure a compatible version of dotnet is installed. +# # The [Microsoft Security Code Analysis CLI](https://aka.ms/mscadocs) is built with dotnet v3.1.201. +# # A version greater than or equal to v3.1.201 of dotnet must be installed on the agent in order to run this action. +# # Remote agents already have a compatible version of dotnet installed and this step may be skipped. +# # For local agents, ensure dotnet version 3.1.201 or later is installed by including this action: +# # - name: Install .NET +# # uses: actions/setup-dotnet@v1 +# # with: +# # dotnet-version: '3.1.x' - # Run open source static analysis tools - - name: Run OSSAR - uses: github/ossar-action@v1 - id: ossar +# # Run open source static analysis tools +# - name: Run OSSAR +# uses: github/ossar-action@v1 +# id: ossar - # Upload results to the Security tab - - name: Upload OSSAR results - uses: github/codeql-action/upload-sarif@v1 - with: - sarif_file: ${{ steps.ossar.outputs.sarifFile }} +# # Upload results to the Security tab +# - name: Upload OSSAR results +# uses: github/codeql-action/upload-sarif@v1 +# with: +# sarif_file: ${{ steps.ossar.outputs.sarifFile }} diff --git a/.github/workflows/rubocop-analysis.yml b/.github/workflows/rubocop-analysis.yml index 8ba20a50f..701b63a68 100644 --- a/.github/workflows/rubocop-analysis.yml +++ b/.github/workflows/rubocop-analysis.yml @@ -1,38 +1,38 @@ -# .github/workflows/rubocop-analysis.yml -name: "RuboCop" - -on: [push] - -jobs: - rubocop: - runs-on: ubuntu-latest - strategy: - fail-fast: false - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Ruby - uses: ruby/setup-ruby@v1 - with: - ruby-version: 3.1.0 - - # This step is not necessary if you add the gem to your Gemfile - - name: Install Code Scanning integration - run: bundle add code-scanning-rubocop --skip-install - - - name: Install dependencies - run: bundle install - - - name: RuboCop run - run: | - bash -c " - bundle exec rubocop --require code_scanning --format CodeScanning::SarifFormatter -o rubocop.sarif - [[ $? -ne 2 ]] - " - - - name: Upload Sarif output - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: rubocop.sarif +# # .github/workflows/rubocop-analysis.yml +# name: "RuboCop" + +# on: [push] + +# jobs: +# rubocop: +# runs-on: ubuntu-latest +# strategy: +# fail-fast: false + +# steps: +# - name: Checkout repository +# uses: actions/checkout@v3 + +# - name: Set up Ruby +# uses: ruby/setup-ruby@v1 +# with: +# ruby-version: 3.1.0 + +# # This step is not necessary if you add the gem to your Gemfile +# - name: Install Code Scanning integration +# run: bundle add code-scanning-rubocop --skip-install + +# - name: Install dependencies +# run: bundle install + +# - name: RuboCop run +# run: | +# bash -c " +# bundle exec rubocop --require code_scanning --format CodeScanning::SarifFormatter -o rubocop.sarif +# [[ $? -ne 2 ]] +# " + +# - name: Upload Sarif output +# uses: github/codeql-action/upload-sarif@v2 +# with: +# sarif_file: rubocop.sarif diff --git a/.github/workflows/ruby.yml b/.github/workflows/ruby.yml index f89a657e9..a7bac31f7 100644 --- a/.github/workflows/ruby.yml +++ b/.github/workflows/ruby.yml @@ -1,36 +1,36 @@ -# This workflow uses actions that are not certified by GitHub. -# They are provided by a third-party and are governed by -# separate terms of service, privacy policy, and support -# documentation. -# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake -# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby +# # This workflow uses actions that are not certified by GitHub. +# # They are provided by a third-party and are governed by +# # separate terms of service, privacy policy, and support +# # documentation. +# # This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake +# # For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby -name: Ruby +# name: Ruby -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] +# on: +# push: +# branches: [ master ] +# pull_request: +# branches: [ master ] -jobs: - test: +# jobs: +# test: - runs-on: ubuntu-latest +# runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Ruby - # To automatically get bug fixes and new Ruby versions for ruby/setup-ruby, - # change this to (see https://github.com/ruby/setup-ruby#versioning): - # uses: ruby/setup-ruby@v1 - uses: ruby/setup-ruby@ec106b438a1ff6ff109590de34ddc62c540232e0 - with: - ruby-version: 3.1.0 - bundler-cache: true - env: - ACTIONS_ALLOW_UNSECURE_COMMANDS: true - - name: Install dependencies - run: bundle install - - name: Run tests - run: bundle exec rake --version +# steps: +# - uses: actions/checkout@v2 +# - name: Set up Ruby +# # To automatically get bug fixes and new Ruby versions for ruby/setup-ruby, +# # change this to (see https://github.com/ruby/setup-ruby#versioning): +# # uses: ruby/setup-ruby@v1 +# uses: ruby/setup-ruby@ec106b438a1ff6ff109590de34ddc62c540232e0 +# with: +# ruby-version: 3.1.0 +# bundler-cache: true +# env: +# ACTIONS_ALLOW_UNSECURE_COMMANDS: true +# - name: Install dependencies +# run: bundle install +# - name: Run tests +# run: bundle exec rake --version diff --git a/.github/workflows/shiftleft-analysis.yml b/.github/workflows/shiftleft-analysis.yml index 963856165..2ea2aca56 100644 --- a/.github/workflows/shiftleft-analysis.yml +++ b/.github/workflows/shiftleft-analysis.yml @@ -1,36 +1,36 @@ -# This workflow integrates Scan with GitHub's code scanning feature -# Scan is a free open-source security tool for modern DevOps teams from ShiftLeft -# Visit https://slscan.io/en/latest/integrations/code-scan for help -name: SL Scan +# # This workflow integrates Scan with GitHub's code scanning feature +# # Scan is a free open-source security tool for modern DevOps teams from ShiftLeft +# # Visit https://slscan.io/en/latest/integrations/code-scan for help +# name: SL Scan -# This section configures the trigger for the workflow. Feel free to customize depending on your convention -on: push +# # This section configures the trigger for the workflow. Feel free to customize depending on your convention +# on: push -jobs: - Scan-Build: - # Scan runs on ubuntu, mac and windows - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - # Instructions - # 1. Setup JDK, Node.js, Python etc depending on your project type - # 2. Compile or build the project before invoking scan - # Example: mvn compile, or npm install or pip install goes here - # 3. Invoke Scan with the github token. Leave the workspace empty to use relative url +# jobs: +# Scan-Build: +# # Scan runs on ubuntu, mac and windows +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v1 +# # Instructions +# # 1. Setup JDK, Node.js, Python etc depending on your project type +# # 2. Compile or build the project before invoking scan +# # Example: mvn compile, or npm install or pip install goes here +# # 3. Invoke Scan with the github token. Leave the workspace empty to use relative url - - name: Perform Scan - uses: ShiftLeftSecurity/scan-action@master - env: - WORKSPACE: "" - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SCAN_AUTO_BUILD: true - with: - output: reports - # Scan auto-detects the languages in your project. To override uncomment the below variable and set the type - # type: credscan,java - # type: python +# - name: Perform Scan +# uses: ShiftLeftSecurity/scan-action@master +# env: +# WORKSPACE: "" +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +# SCAN_AUTO_BUILD: true +# with: +# output: reports +# # Scan auto-detects the languages in your project. To override uncomment the below variable and set the type +# # type: credscan,java +# # type: python - - name: Upload report - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: reports +# - name: Upload report +# uses: github/codeql-action/upload-sarif@v2 +# with: +# sarif_file: reports diff --git a/deployment/ecs/envs/bo/app_env_local_file.tf b/deployment/ecs/envs/bo/app_env_local_file.tf deleted file mode 100644 index 9a1fbab85..000000000 --- a/deployment/ecs/envs/bo/app_env_local_file.tf +++ /dev/null @@ -1,53 +0,0 @@ -data "aws_secretsmanager_secret_version" "db" { - secret_id = aws_secretsmanager_secret.db.id -} - -locals { - db_secret = jsondecode(data.aws_secretsmanager_secret_version.db.secret_string) - db_host_from_rds = aws_db_instance.this.address - db_port_from_rds = aws_db_instance.this.port - - effective_db_host = local.db_host_from_rds != "" ? local.db_host_from_rds : try(local.db_secret.host, "") - effective_db_port = local.db_port_from_rds != 0 ? local.db_port_from_rds : try(local.db_secret.port, 3306) -} - -resource "local_file" "app_env_json" { - filename = "${path.module}/app_env.json" - - content = templatefile("${path.module}/templates/app_env.json.tmpl", { - rails_env = var.rails_env - rails_log_to_stdout = var.rails_log_to_stdout - rails_serve_static = var.rails_serve_static - puma_port = var.puma_port - web_concurrency = var.web_concurrency - rails_max_threads = var.rails_max_threads - rails_min_threads = var.rails_min_threads - - # handle null secret_key_base - secret_key_base = try(coalesce(var.secret_key_base, ""), "") - - # Database fields (read-only from Secrets Manager + RDS) - db_name = try(local.db_secret.database, var.db_name) - db_host = local.effective_db_host - db_port = local.effective_db_port - db_user = local.db_secret.username - db_password = local.db_secret.password - - # Office365 / SSO - office365_client_id = var.office365_client_id - office365_client_secret = var.office365_client_secret - office365_redirect_uri = var.office365_redirect_uri - office365_provider_url = var.office365_provider_url - - # Keycloak - keycloak_client_id = var.keycloak_client_id - keycloak_client_secret = var.keycloak_client_secret - keycloak_realm = var.keycloak_realm - keycloak_server_url = var.keycloak_server_url - - # SSL flag - use_ssl = var.use_ssl - }) - - file_permission = "0600" -} diff --git a/deployment/ecs/envs/bo/main.tf b/deployment/ecs/envs/bo/main.tf index 572aa5ad8..b8ef6ab57 100644 --- a/deployment/ecs/envs/bo/main.tf +++ b/deployment/ecs/envs/bo/main.tf @@ -60,10 +60,10 @@ locals { module "ecs_service" { source = "../../modules/ecs" - cluster_name = "${local.app_name}-${local.env}" - service_name = "${local.app_name}-app-${local.env}" - vpc_id = local.vpc_id - subnet_ids = local.private_subnet_ids + cluster_name = "${local.app_name}-${local.env}" + service_name = "${local.app_name}-app-${local.env}" + vpc_id = local.vpc_id + subnet_ids = local.private_subnet_ids db_secret_arn = aws_secretsmanager_secret.db.arn app_secret_arn = aws_secretsmanager_secret.app.arn # Container / service @@ -73,6 +73,13 @@ module "ecs_service" { cpu = var.cpu memory = var.memory health_check_path = var.health_check_path + mpath_exec = var.mpath_exec + readonly_root_filesystem = true + environment_variables = { + RAILS_ENV = "production" + RAILS_SERVE_STATIC_FILES = "true" + NODE_ENV = "production" + } # ALB/TG/listeners inside the module (per-env ALB) create_alb = true @@ -91,8 +98,8 @@ module "ecs_service" { container_insights_enabled = true log_retention_days = var.log_retention_days assign_public_ip = false - - tags = local.tags + microsoft_secret_path = var.microsoft_secret_path + tags = local.tags } @@ -119,9 +126,33 @@ resource "aws_secretsmanager_secret" "app" { } resource "aws_secretsmanager_secret_version" "app" { - secret_id = aws_secretsmanager_secret.app.id + secret_id = aws_secretsmanager_secret.app.id secret_string = jsonencode({ SECRET_KEY_BASE = random_password.secret_key_base.result # ...other keys... }) } + + +module "twingate_connector" { + source = "../../modules/twingate-connector" + + # Network/cluster wiring + vpc_id = local.vpc_id + subnet_ids = local.private_subnet_ids + cluster_id = module.ecs_service.cluster_id + twingate_exec = var.twingate_exec + # Place the connector in private subnets, no public IP + assign_public_ip = false + desired_count = 1 + cpu = 1024 + memory = 2048 + + twingate_secret_path = var.twingate_secret_path + + readonly_root_filesystem = true + + + tags = merge(local.tags, { Service = "TwingateConnector" }) +} + diff --git a/deployment/ecs/envs/bo/rds.tf b/deployment/ecs/envs/bo/rds.tf index a8fb4bd7e..854f3c3b1 100644 --- a/deployment/ecs/envs/bo/rds.tf +++ b/deployment/ecs/envs/bo/rds.tf @@ -27,7 +27,7 @@ resource "aws_security_group" "rds_mysql" { from_port = 3306 to_port = 3306 protocol = "tcp" - security_groups = [module.ecs_service.service_sg_id] + security_groups = [module.ecs_service.service_sg_id,module.twingate_connector.service_sg_id] } egress { diff --git a/deployment/ecs/envs/bo/terraform.tfvars b/deployment/ecs/envs/bo/terraform.tfvars index 6da5d5038..df1361ffc 100644 --- a/deployment/ecs/envs/bo/terraform.tfvars +++ b/deployment/ecs/envs/bo/terraform.tfvars @@ -1,5 +1,7 @@ -aws_region = "us-east-1" -environment = "bo" +aws_region = "us-east-1" +environment = "bo" +microsoft_secret_path = "mpath/bo/microsoft" +twingate_secret_path = "mpath/bo/twingate" # --- RDS (cheap single-AZ) --- db_identifier = "mpath-bo-mysql" @@ -14,10 +16,10 @@ kms_key_id = null # or "arn:aws:kms:us-east-1:ACCOUNT:key/...." # ecs_tasks_sg_name = "your-ecs-tasks-sg-name" # --- Container / service (unchanged) --- -container_image = "295669632222.dkr.ecr.us-east-1.amazonaws.com/microhealthllc/mpath-bo:latest-working" -desired_count = 2 -cpu = 1024 -memory = 2048 +container_image = "295669632222.dkr.ecr.us-east-1.amazonaws.com/microhealthllc/mpath-bo:latest" +desired_count = 1 +cpu = 2048 +memory = 4096 # --- Healthcheck --- health_check_path = "/users/sign_in" @@ -45,3 +47,5 @@ tags = { } db_secret_arn = aws_secretsmanager_secret.db.arn +twingate_exec = false +mpath_exec = false \ No newline at end of file diff --git a/deployment/ecs/envs/bo/variables.tf b/deployment/ecs/envs/bo/variables.tf index fc8276378..5f0ec3873 100644 --- a/deployment/ecs/envs/bo/variables.tf +++ b/deployment/ecs/envs/bo/variables.tf @@ -7,6 +7,11 @@ variable "aws_region" { default = "us-east-1" } +variable "microsoft_secret_path" { + type = string + description = "Path to the Microsoft OAuth JSON secret in AWS Secrets Manager (e.g., mpath/bo/microsoft)" +} + # ===================================================================== # Container / Service configuration # ===================================================================== @@ -287,3 +292,22 @@ variable "use_ssl" { default = false description = "Enable SSL (HTTPS) for the application" } + + +variable "twingate_secret_path" { + description = "Optional path to the Twingate secret in AWS Secrets Manager (e.g., mpath/bo/twingate)." + type = string + default = "" +} + +variable "twingate_exec" { + type = bool + default = false +} + +variable "mpath_exec" { + type = bool + default = false +} + + diff --git a/deployment/ecs/main.tf b/deployment/ecs/main.tf index e5bafa6b9..157996e4b 100644 --- a/deployment/ecs/main.tf +++ b/deployment/ecs/main.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 5.95" # stable; avoids the 5.100.x ARM crash + version = "~> 5.95" } } } @@ -105,3 +105,58 @@ resource "aws_route_table_association" "private" { route_table_id = aws_route_table.private.id } +# Interface Endpoints required for ECS Exec / SSM +locals { + interface_endpoint_services = [ + "ssm", + "ssmmessages", + "ec2messages", + "logs" + ] +} + +resource "aws_security_group" "vpc_endpoints" { + name_prefix = "${var.vpc_name}-endpoints-" + vpc_id = aws_vpc.main.id + description = "Security group for VPC interface endpoints" + + # Allow HTTPS from within the VPC (e.g., ECS tasks) + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = [var.vpc_cidr_block] + description = "Allow VPC internal HTTPS" + } + + # Required egress + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + description = "Allow outbound traffic" + } + + tags = merge(local.common_tags, { + Name = "${var.vpc_name}-vpc-endpoints-sg" + }) +} + +resource "aws_vpc_endpoint" "ssm_interface_endpoints" { + for_each = toset(local.interface_endpoint_services) + + vpc_id = aws_vpc.main.id + service_name = "com.amazonaws.${var.aws_region}.${each.key}" + vpc_endpoint_type = "Interface" + subnet_ids = aws_subnet.private[*].id + security_group_ids = [ + aws_security_group.vpc_endpoints.id + ] + + private_dns_enabled = true + + tags = merge(local.common_tags, { + Name = "${var.vpc_name}-vpce-${each.key}" + }) +} diff --git a/deployment/ecs/modules/ecs/main.tf b/deployment/ecs/modules/ecs/main.tf index 14c2a53fc..b8201920e 100644 --- a/deployment/ecs/modules/ecs/main.tf +++ b/deployment/ecs/modules/ecs/main.tf @@ -2,6 +2,11 @@ data "aws_region" "current" {} data "aws_caller_identity" "current" {} +# Microsoft OAuth secret lookup by path +data "aws_secretsmanager_secret" "microsoft" { + count = var.microsoft_secret_path != null && var.microsoft_secret_path != "" ? 1 : 0 + name = var.microsoft_secret_path +} # Toggle ALB creation inside this module locals { @@ -11,22 +16,32 @@ locals { alb_name_effective = coalesce(var.alb_name, "${var.service_name}-alb") } +locals { + microsoft_secret_arn = length(data.aws_secretsmanager_secret.microsoft) > 0 ? data.aws_secretsmanager_secret.microsoft[0].arn : null +} + # Secrets to inject into the container (built from the ARNs passed in) locals { container_secrets = concat( var.db_secret_arn != null ? [ { name = "DB_USERNAME", valueFrom = "${var.db_secret_arn}:username::" }, { name = "DB_PASSWORD", valueFrom = "${var.db_secret_arn}:password::" }, - { name = "DB_HOST", valueFrom = "${var.db_secret_arn}:host::" }, - { name = "DB_PORT", valueFrom = "${var.db_secret_arn}:port::" }, - { name = "DB_NAME", valueFrom = "${var.db_secret_arn}:database::" }, - { name = "DB_ADAPTER", valueFrom = "${var.db_secret_arn}:adapter::" }, - { name = "DATABASE_URL", valueFrom = "${var.db_secret_arn}:url::" } + { name = "DB_HOST", valueFrom = "${var.db_secret_arn}:host::" }, + { name = "DB_PORT", valueFrom = "${var.db_secret_arn}:port::" }, + { name = "DB_NAME", valueFrom = "${var.db_secret_arn}:database::" }, + { name = "DB_ADAPTER", valueFrom = "${var.db_secret_arn}:adapter::" }, + { name = "DATABASE_URL", valueFrom = "${var.db_secret_arn}:url::" } ] : [], var.app_secret_arn != null ? [ # Rails core - { name = "SECRET_KEY_BASE", valueFrom = "${var.app_secret_arn}:SECRET_KEY_BASE::" }, - #{ name = "OFFICE365_CLIENT_ID", valueFrom = "${var.app_secret_arn}:OFFICE365_CLIENT_ID::" } + { name = "SECRET_KEY_BASE", valueFrom = "${var.app_secret_arn}:SECRET_KEY_BASE::" } + # { name = "OFFICE365_CLIENT_ID", valueFrom = "${var.app_secret_arn}:OFFICE365_CLIENT_ID::" } # (kept commented from original) + ] : [], + local.microsoft_secret_arn != null ? [ + { name = "OFFICE365_CLIENT_ID", valueFrom = "${local.microsoft_secret_arn}:OFFICE365_CLIENT_ID::" }, + { name = "OFFICE365_CLIENT_SECRET", valueFrom = "${local.microsoft_secret_arn}:OFFICE365_CLIENT_SECRET::" }, + { name = "OFFICE365_REDIRECT_URI", valueFrom = "${local.microsoft_secret_arn}:OFFICE365_REDIRECT_URI::" }, + { name = "OFFICE365_PROVIDER_URL", valueFrom = "${local.microsoft_secret_arn}:OFFICE365_PROVIDER_URL::" } ] : [] ) } @@ -54,8 +69,8 @@ resource "aws_iam_role" "ecs_execution_role" { assume_role_policy = jsonencode({ Version = "2012-10-17", Statement = [{ - Action = "sts:AssumeRole", - Effect = "Allow", + Action = "sts:AssumeRole", + Effect = "Allow", Principal = { Service = "ecs-tasks.amazonaws.com" } }] }) @@ -69,8 +84,8 @@ resource "aws_iam_role" "ecs_task_role" { assume_role_policy = jsonencode({ Version = "2012-10-17", Statement = [{ - Action = "sts:AssumeRole", - Effect = "Allow", + Action = "sts:AssumeRole", + Effect = "Allow", Principal = { Service = "ecs-tasks.amazonaws.com" } }] }) @@ -83,6 +98,23 @@ resource "aws_iam_role_policy_attachment" "ecs_execution_role_policy" { role = aws_iam_role.ecs_execution_role.name policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" } +# Attach SSM Managed Instance Core to allow ECS Exec sessions +resource "aws_iam_role_policy_attachment" "ecs_execution_role_ssm" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +# Attach CloudWatch Agent Server policy (optional but useful for ECS Exec sessions) +resource "aws_iam_role_policy_attachment" "ecs_execution_role_cwagent" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" +} + +# Attach the same to the ECS task role (so the container itself can establish session) +resource "aws_iam_role_policy_attachment" "ecs_task_role_ssm" { + role = aws_iam_role.ecs_task_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} # Extra ECR actions (kept from your original) resource "aws_iam_role_policy" "ecs_execution_role_ecr_policy" { @@ -134,8 +166,12 @@ resource "aws_ecs_task_definition" "app" { } ] + linuxParameters = { + readonlyRootFilesystem = var.readonly_root_filesystem + } # Secrets injected by ECS at container start secrets = local.container_secrets + # Run Rails seeds after short delay, then start the app command = [ "bash", "-lc", @@ -149,8 +185,7 @@ resource "aws_ecs_task_definition" "app" { awslogs-region = data.aws_region.current.name, awslogs-stream-prefix = "ecs" } - } - + }, healthCheck = var.health_check_enabled ? { command = ["CMD-SHELL", "curl -f http://localhost:${var.container_port}${var.health_check_path} || exit 1"] @@ -165,7 +200,6 @@ resource "aws_ecs_task_definition" "app" { tags = var.tags } - # ---- Networking SGs ---- resource "aws_security_group" "ecs_service" { name_prefix = "${var.service_name}-ecs-" @@ -320,9 +354,10 @@ resource "aws_ecs_service" "app" { desired_count = var.desired_count launch_type = "FARGATE" platform_version = var.platform_version + enable_execute_command = var.mpath_exec network_configuration { - subnets = var.subnet_ids # private subnets + subnets = var.subnet_ids # private subnets security_groups = [aws_security_group.ecs_service.id] assign_public_ip = var.assign_public_ip } @@ -353,7 +388,7 @@ resource "aws_ecs_service" "app" { # ---- Allow the ECS *execution role* to read Secrets Manager (and KMS if needed) ---- locals { - secret_arns = compact([var.db_secret_arn, var.app_secret_arn]) + secret_arns = compact([var.db_secret_arn, var.app_secret_arn, local.microsoft_secret_arn]) kms_key_arns_ = var.kms_key_arns # optional list; pass only if you used CMKs on the secrets } @@ -367,9 +402,9 @@ resource "aws_iam_role_policy" "ecs_exec_secrets" { Statement = concat( [ { - Sid = "ReadSecretsFromSecretsManager" - Effect = "Allow" - Action = [ + Sid = "ReadSecretsFromSecretsManager" + Effect = "Allow" + Action = [ "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret" ] diff --git a/deployment/ecs/modules/ecs/variables.tf b/deployment/ecs/modules/ecs/variables.tf index 98ebbcbaf..44637af08 100644 --- a/deployment/ecs/modules/ecs/variables.tf +++ b/deployment/ecs/modules/ecs/variables.tf @@ -26,15 +26,15 @@ variable "desired_count" { } variable "cpu" { - description = "CPU units for the task (256, 512, 1024, etc.)" + description = "CPU units for the task" type = number - default = 256 + default = 2048 } variable "memory" { description = "Memory for the task in MB" type = number - default = 512 + default = 4096 } variable "vpc_id" { @@ -124,10 +124,10 @@ variable "create_alb" { type = bool default = false -validation { - condition = !var.create_alb || (length(var.public_subnet_ids) > 0 && length(trimspace(var.acm_certificate_arn)) > 0) - error_message = "When create_alb = true, you must provide public_subnet_ids and acm_certificate_arn." -} + validation { + condition = !var.create_alb || (length(var.public_subnet_ids) > 0 && length(trimspace(var.acm_certificate_arn)) > 0) + error_message = "When create_alb = true, you must provide public_subnet_ids and acm_certificate_arn." + } } @@ -147,7 +147,7 @@ variable "ssl_policy" { description = "TLS policy for HTTPS listener" type = string # Consider: "ELBSecurityPolicy-TLS13-1-2-2021-06" for modern clients - default = "ELBSecurityPolicy-TLS-1-2-2017-01" + default = "ELBSecurityPolicy-TLS-1-2-2017-01" } variable "alb_name" { @@ -190,3 +190,21 @@ variable "kms_key_arns" { default = [] description = "Optional list of KMS key ARNs used to encrypt the secrets; grants kms:Decrypt to the execution role." } + +variable "microsoft_secret_path" { + type = string + description = "Path to the Microsoft OAuth JSON secret in AWS Secrets Manager" + default = null +} + +variable "readonly_root_filesystem" { + type = bool + default = true + description = "Whether to make the container's root filesystem read-only." +} + +variable "mpath_exec" { + description = "Enable exec" + type = bool + default = false +} \ No newline at end of file diff --git a/deployment/ecs/modules/twingate-connector/README.md b/deployment/ecs/modules/twingate-connector/README.md new file mode 100644 index 000000000..953079224 --- /dev/null +++ b/deployment/ecs/modules/twingate-connector/README.md @@ -0,0 +1,111 @@ +# Twingate ECS Connector Module + +This module deploys a Twingate connector as an ECS Fargate service in your AWS environment. The Twingate connector provides secure remote access to your private resources without requiring a VPN. + +## Features + +- Deploys Twingate connector as an ECS Fargate service +- Integrates with existing ECS cluster +- Runs in private subnets for enhanced security +- CloudWatch logging integration +- IAM roles with minimal required permissions +- Configurable resource allocation + +## Usage + +```hcl +module "twingate_connector" { + source = "./modules/twingate-connector" + + cluster_id = module.ecs.cluster_id + vpc_id = aws_vpc.main.id + subnet_ids = aws_subnet.private[*].id + assign_public_ip = false + + # Twingate Configuration + twingate_network = "your-network-name" + twingate_access_token = "your-access-token" + twingate_refresh_token = "your-refresh-token" + + # Resource Configuration + cpu = 1024 + memory = 2048 + desired_count = 1 + + tags = { + Environment = "Production" + Service = "TwingateConnector" + } +} +``` + +## Configuration + +### Required Variables + +- `cluster_id`: ECS cluster ID where the connector will be deployed +- `vpc_id`: VPC ID for security group creation +- `subnet_ids`: List of subnet IDs (recommend private subnets) +- `twingate_network`: Your Twingate network name +- `twingate_access_token`: Twingate access token (sensitive) +- `twingate_refresh_token`: Twingate refresh token (sensitive) + +### Optional Variables + +- `service_name`: Name of the service (default: "twingate-ecs-connector-2") +- `container_image`: Docker image for Twingate connector (default: "twingate/connector:1") +- `cpu`: CPU units for the task (default: 1024) +- `memory`: Memory in MB for the task (default: 2048) +- `desired_count`: Number of tasks to run (default: 1) +- `assign_public_ip`: Whether to assign public IP (default: false) + +## Security Considerations + +1. **Private Subnets**: The connector is deployed in private subnets by default +2. **IAM Permissions**: Uses minimal IAM permissions for ECS execution +3. **Sensitive Variables**: Access and refresh tokens are marked as sensitive +4. **Security Groups**: Only allows outbound traffic for Twingate connectivity + +## Deployment + +1. Enable Twingate in your main configuration: + ```hcl + twingate_enabled = true + ``` + +2. Set the required Twingate variables in your `terraform.tfvars`: + ```hcl + twingate_network = "your-network-name" + twingate_access_token = "your-access-token" + twingate_refresh_token = "your-refresh-token" + ``` + +3. Deploy with Terraform: + ```bash + terraform plan + terraform apply + ``` + +## Outputs + +- `task_definition_arn`: ARN of the Twingate connector task definition +- `service_arn`: ARN of the Twingate connector service +- `service_name`: Name of the Twingate connector service +- `security_group_id`: ID of the security group + +## Troubleshooting + +### Connector Not Starting +- Check CloudWatch logs: `/ecs/twingate-ecs-connector-2` +- Verify access and refresh tokens are valid +- Ensure network connectivity from private subnets + +### Connection Issues +- Verify security group allows outbound traffic +- Check NAT Gateway configuration for private subnets +- Confirm Twingate network configuration + +### Resource Issues +- Monitor CPU and memory utilization in CloudWatch +- Adjust `cpu` and `memory` variables if needed +- Consider scaling `desired_count` for high availability \ No newline at end of file diff --git a/deployment/ecs/modules/twingate-connector/main.tf b/deployment/ecs/modules/twingate-connector/main.tf new file mode 100644 index 000000000..f3c5fca0c --- /dev/null +++ b/deployment/ecs/modules/twingate-connector/main.tf @@ -0,0 +1,228 @@ +# CloudWatch Log Group for Twingate Connector +resource "aws_cloudwatch_log_group" "twingate_logs" { + name = "/ecs/${var.service_name}" + retention_in_days = var.log_retention_days + kms_key_id = var.enable_log_encryption ? var.log_kms_key_id : null + + tags = merge(var.tags, { + Name = "/ecs/${var.service_name}" + Service = "TwingateConnector" + }) +} + +# Twingate secret lookup by path (required) +data "aws_secretsmanager_secret" "twingate" { + name = var.twingate_secret_path +} + +# ECS Task Definition for Twingate Connector +resource "aws_ecs_task_definition" "twingate" { + family = var.service_name + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = var.cpu + memory = var.memory + execution_role_arn = aws_iam_role.ecs_execution_role.arn + task_role_arn = aws_iam_role.ecs_task_role.arn + + container_definitions = jsonencode([ + { + name = var.service_name + image = var.container_image + essential = true + memory = var.memory + cpu = var.cpu + + # readonlyRootFilesystem must be under linuxParameters + linuxParameters = { + readonlyRootFilesystem = var.readonly_root_filesystem + }, + + secrets = [ + { name = "TWINGATE_NETWORK", valueFrom = "${data.aws_secretsmanager_secret.twingate.arn}:TWINGATE_NETWORK::" }, + { name = "TWINGATE_ACCESS_TOKEN", valueFrom = "${data.aws_secretsmanager_secret.twingate.arn}:TWINGATE_ACCESS_TOKEN::" }, + { name = "TWINGATE_REFRESH_TOKEN", valueFrom = "${data.aws_secretsmanager_secret.twingate.arn}:TWINGATE_REFRESH_TOKEN::" }, + { name = "TWINGATE_LABEL_DEPLOYED_BY", valueFrom = "${data.aws_secretsmanager_secret.twingate.arn}:TWINGATE_LABEL_DEPLOYED_BY::" } + ], + + logConfiguration = { + logDriver = "awslogs" + options = { + awslogs-group = aws_cloudwatch_log_group.twingate_logs.name + awslogs-region = data.aws_region.current.name + awslogs-stream-prefix = "twingate-connector" + } + } + # Twingate connector doesn't expose any ports + } + ]) + + tags = var.tags +} + +# Security Group for Twingate Connector +resource "aws_security_group" "twingate_connector" { + name_prefix = "${var.service_name}-" + vpc_id = var.vpc_id + description = "Security group for Twingate connector" + + # Twingate connector needs outbound internet access + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + description = "All outbound traffic for Twingate connectivity" + } + + tags = merge(var.tags, { + Name = "${var.service_name}-sg" + Service = "TwingateConnector" + }) + + lifecycle { + create_before_destroy = true + } +} + +# ECS Service for Twingate Connector +resource "aws_ecs_service" "twingate" { + name = var.service_name + cluster = var.cluster_id + task_definition = aws_ecs_task_definition.twingate.arn + desired_count = var.desired_count + launch_type = "FARGATE" + platform_version = var.platform_version + enable_execute_command = var.twingate_exec + + network_configuration { + subnets = var.subnet_ids + security_groups = [aws_security_group.twingate_connector.id] + assign_public_ip = var.assign_public_ip + } + + deployment_maximum_percent = var.deployment_maximum_percent + deployment_minimum_healthy_percent = var.deployment_minimum_healthy_percent + + deployment_circuit_breaker { + enable = var.deployment_circuit_breaker_enabled + rollback = var.deployment_circuit_breaker_rollback + } + + tags = var.tags + + depends_on = [ + aws_iam_role_policy_attachment.ecs_execution_role_policy, + aws_iam_role_policy_attachment.ecs_exec_attach_sm + ] +} + +# IAM Role for ECS Execution +resource "aws_iam_role" "ecs_execution_role" { + name = "${var.service_name}-ecs-execution-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole", + Effect = "Allow", + Principal = { Service = "ecs-tasks.amazonaws.com" } + } + ] + }) + + tags = var.tags +} + +# IAM Role for ECS Task +resource "aws_iam_role" "ecs_task_role" { + name = "${var.service_name}-ecs-task-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole", + Effect = "Allow", + Principal = { Service = "ecs-tasks.amazonaws.com" } + } + ] + }) + + tags = var.tags +} + +# Attach AWS managed policy for ECS execution +resource "aws_iam_role_policy_attachment" "ecs_execution_role_policy" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +# Attach SSM Managed Instance Core to allow ECS Exec sessions +resource "aws_iam_role_policy_attachment" "ecs_execution_role_ssm" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +# Attach CloudWatch Agent Server policy (optional but useful for ECS Exec sessions) +resource "aws_iam_role_policy_attachment" "ecs_execution_role_cwagent" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" +} + +# Attach the same to the ECS task role (so the container itself can establish session) +resource "aws_iam_role_policy_attachment" "ecs_task_role_ssm" { + role = aws_iam_role.ecs_task_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +# Additional policy for ECS execution role to write to CloudWatch Logs +resource "aws_iam_role_policy" "ecs_execution_role_logs_policy" { + name = "${var.service_name}-ecs-execution-logs-policy" + role = aws_iam_role.ecs_execution_role.id + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + Resource = [ + aws_cloudwatch_log_group.twingate_logs.arn, + "${aws_cloudwatch_log_group.twingate_logs.arn}:*" + ] + } + ] + }) +} + +# Allow the execution role to read the Twingate secret +data "aws_iam_policy_document" "ecs_exec_sm" { + statement { + actions = ["secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret"] + resources = [ + data.aws_secretsmanager_secret.twingate.arn, + "${data.aws_secretsmanager_secret.twingate.arn}*" + ] + } +} + +resource "aws_iam_policy" "ecs_exec_sm" { + name = "${var.service_name}-ecs-exec-secretsmanager" + policy = data.aws_iam_policy_document.ecs_exec_sm.json +} + +resource "aws_iam_role_policy_attachment" "ecs_exec_attach_sm" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = aws_iam_policy.ecs_exec_sm.arn +} + +# Data sources +data "aws_region" "current" {} +data "aws_caller_identity" "current" {} diff --git a/deployment/ecs/modules/twingate-connector/outputs.tf b/deployment/ecs/modules/twingate-connector/outputs.tf new file mode 100644 index 000000000..bc7e11a84 --- /dev/null +++ b/deployment/ecs/modules/twingate-connector/outputs.tf @@ -0,0 +1,34 @@ +output "task_definition_arn" { + description = "ARN of the Twingate connector task definition" + value = aws_ecs_task_definition.twingate.arn +} + +output "service_arn" { + description = "ARN of the Twingate connector service" + value = aws_ecs_service.twingate.id +} + +output "service_name" { + description = "Name of the Twingate connector service" + value = aws_ecs_service.twingate.name +} + +output "security_group_id" { + description = "ID of the Twingate connector security group" + value = aws_security_group.twingate_connector.id +} + +output "execution_role_arn" { + description = "ARN of the Twingate connector execution role" + value = aws_iam_role.ecs_execution_role.arn +} + +output "task_role_arn" { + description = "ARN of the Twingate connector task role" + value = aws_iam_role.ecs_task_role.arn +} + +output "service_sg_id" { + description = "Security Group ID attached to the ECS service/tasks" + value = aws_security_group.twingate_connector.id +} \ No newline at end of file diff --git a/deployment/ecs/modules/twingate-connector/variables.tf b/deployment/ecs/modules/twingate-connector/variables.tf new file mode 100644 index 000000000..675bd3b00 --- /dev/null +++ b/deployment/ecs/modules/twingate-connector/variables.tf @@ -0,0 +1,127 @@ +variable "service_name" { + description = "Name of the Twingate connector service" + type = string + default = "twingate-ecs-connector-2" +} + +variable "container_image" { + description = "Docker image for the Twingate connector" + type = string + default = "twingate/connector:1" +} + +variable "cpu" { + description = "CPU units for the Twingate connector task" + type = number + default = 1024 +} + +variable "memory" { + description = "Memory (MB) for the Twingate connector task" + type = number + default = 2048 +} + +variable "desired_count" { + description = "Number of Twingate connector tasks to run" + type = number + default = 1 +} + +variable "cluster_id" { + description = "ECS cluster ID where the Twingate connector will be deployed" + type = string +} + +variable "vpc_id" { + description = "VPC ID where the Twingate connector will be deployed" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs for the Twingate connector" + type = list(string) +} + +variable "assign_public_ip" { + description = "Assign public IP to the Twingate connector task" + type = bool + default = false +} + +variable "platform_version" { + description = "ECS platform version" + type = string + default = "LATEST" +} + +variable "deployment_maximum_percent" { + description = "Maximum deployment percentage" + type = number + default = 200 +} + +variable "deployment_minimum_healthy_percent" { + description = "Minimum healthy deployment percentage" + type = number + default = 100 +} + +variable "deployment_circuit_breaker_enabled" { + description = "Enable deployment circuit breaker" + type = bool + default = true +} + +variable "deployment_circuit_breaker_rollback" { + description = "Enable deployment circuit breaker rollback" + type = bool + default = true +} + +variable "log_retention_days" { + description = "CloudWatch log retention period in days" + type = number + default = 30 +} + +variable "enable_log_encryption" { + description = "Enable CloudWatch log encryption" + type = bool + default = false +} + +variable "log_kms_key_id" { + description = "KMS key ID for CloudWatch log encryption" + type = string + default = null +} + +variable "readonly_root_filesystem" { + description = "Enable read-only root filesystem for container security" + type = bool + default = true +} + +variable "twingate_label_deployed_by" { + description = "Label indicating how the Twingate connector was deployed" + type = string + default = "ecs" +} + +variable "tags" { + description = "Tags to apply to all resources" + type = map(string) + default = {} +} + +variable "twingate_secret_path" { + description = "Path to the Twingate JSON secret in AWS Secrets Manager (e.g., mpath/bo/twingate)." + type = string +} + +variable "twingate_exec" { + description = "Enable exec" + type = bool + default = false +} \ No newline at end of file diff --git a/deployment/ecs/variables.tf b/deployment/ecs/variables.tf index f444218fc..44c8052d3 100644 --- a/deployment/ecs/variables.tf +++ b/deployment/ecs/variables.tf @@ -76,3 +76,15 @@ variable "tags" { type = map(string) default = {} } + +variable "waf_login_paths" { + description = "List of login or authentication-related paths that should use CAPTCHA" + type = list(string) + default = ["/users/sign_in", "/users/sign_up"] +} + +variable "waf_rate_limit" { + description = "Rate limit per IP (requests per 5 minutes)" + type = number + default = 2000 +} \ No newline at end of file diff --git a/deployment/ecs/waf.tf b/deployment/ecs/waf.tf index 5db18ecf4..a230a07b2 100644 --- a/deployment/ecs/waf.tf +++ b/deployment/ecs/waf.tf @@ -1,4 +1,3 @@ -# Minimal, low-blocking WebACL resource "aws_wafv2_web_acl" "mpath_web_acl" { name = "${var.environment}-${local.app_name}-web-acl" description = "${var.environment} ${local.app_name} WebACL" @@ -8,25 +7,22 @@ resource "aws_wafv2_web_acl" "mpath_web_acl" { allow {} } - # 1) Allow-only list of countries; everything else is blocked + # 0) Geo allowlist rule { name = "block-non-allowed-countries" priority = 0 - statement { not_statement { statement { geo_match_statement { - country_codes = var.waf_allowed_countries # e.g., ["US"] + country_codes = var.waf_allowed_countries } } } } - action { block {} } - visibility_config { sampled_requests_enabled = true cloudwatch_metrics_enabled = true @@ -34,23 +30,19 @@ resource "aws_wafv2_web_acl" "mpath_web_acl" { } } - # 2) AWS managed IP reputation (enforced) + # 1) AWSManagedRulesAmazonIpReputationList rule { name = "AWS-AWSManagedRulesAmazonIpReputationList" priority = 1 - statement { managed_rule_group_statement { vendor_name = "AWS" name = "AWSManagedRulesAmazonIpReputationList" } } - - # 'none' = respect the rule group's native action (block) override_action { none {} } - visibility_config { sampled_requests_enabled = true cloudwatch_metrics_enabled = true @@ -58,31 +50,222 @@ resource "aws_wafv2_web_acl" "mpath_web_acl" { } } - # 3) Common rules in COUNT mode (observe first) + # 2) AWSManagedRulesAnonymousIpList rule { - name = "AWS-AWSManagedRulesCommonRuleSet" + name = "AWS-AWSManagedRulesAnonymousIpList" priority = 2 + statement { + managed_rule_group_statement { + vendor_name = "AWS" + name = "AWSManagedRulesAnonymousIpList" + } + } + override_action { + none {} + } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "AWS-AWSManagedRulesAnonymousIpList" + } + } + # 3) AWSManagedRulesCommonRuleSet + rule { + name = "AWS-AWSManagedRulesCommonRuleSet" + priority = 3 statement { managed_rule_group_statement { vendor_name = "AWS" name = "AWSManagedRulesCommonRuleSet" } } + override_action { + count {} + } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "AWS-AWSManagedRulesCommonRuleSet" + } + } - # 'count' = do not block yet; just record matches + # 4) AWSManagedRulesAdminProtectionRuleSet + rule { + name = "AWS-AWSManagedRulesAdminProtectionRuleSet" + priority = 4 + statement { + managed_rule_group_statement { + vendor_name = "AWS" + name = "AWSManagedRulesAdminProtectionRuleSet" + } + } override_action { count {} } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "AWS-AWSManagedRulesAdminProtectionRuleSet" + } + } + # 5) AWSManagedRulesKnownBadInputsRuleSet + rule { + name = "AWS-AWSManagedRulesKnownBadInputsRuleSet" + priority = 5 + statement { + managed_rule_group_statement { + vendor_name = "AWS" + name = "AWSManagedRulesKnownBadInputsRuleSet" + } + } + override_action { + count {} + } visibility_config { sampled_requests_enabled = true cloudwatch_metrics_enabled = true - metric_name = "AWS-AWSManagedRulesCommonRuleSet" + metric_name = "AWS-AWSManagedRulesKnownBadInputsRuleSet" + } + } + + # 6) Rate-limit POST /users/sign_in + rule { + name = "rate-limit-signin-post" + priority = 6 + statement { + rate_based_statement { + limit = 2000 + aggregate_key_type = "IP" + scope_down_statement { + and_statement { + statement { + byte_match_statement { + search_string = "/users/sign_in" + positional_constraint = "EXACTLY" + field_to_match { + uri_path {} + } + text_transformation { + priority = 0 + type = "NONE" + } + } + } + statement { + byte_match_statement { + search_string = "POST" + positional_constraint = "EXACTLY" + field_to_match { + method {} + } + text_transformation { + priority = 0 + type = "NONE" + } + } + } + } + } + } + } + action { + block {} + } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "rate-limit-signin-post" + } + } + + # 7) Block /users/password/new entirely + rule { + name = "block-password-reset" + priority = 7 + statement { + byte_match_statement { + search_string = "/users/password/new" + positional_constraint = "EXACTLY" + field_to_match { + uri_path {} + } + text_transformation { + priority = 0 + type = "NONE" + } + } + } + action { + block {} + } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "block-password-reset" + } + } + # Block all OAuth sign-in / error callbacks + rule { + name = "block-oauth-callbacks" + priority = 8 + + statement { + byte_match_statement { + search_string = "/signin/oauth" + positional_constraint = "STARTS_WITH" + field_to_match { + uri_path {} + } + text_transformation { + priority = 0 + type = "NONE" + } + } + } + + action { + block {} + } + + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "block-oauth-callbacks" + } + } + + + # 9) Allow ELB health checks + rule { + name = "allow-elb-health-checks" + priority = 9 + statement { + byte_match_statement { + search_string = "ELB-HealthChecker" + positional_constraint = "CONTAINS" + field_to_match { + single_header { + name = "user-agent" + } + } + text_transformation { + priority = 0 + type = "NONE" + } + } + } + action { + allow {} + } + visibility_config { + sampled_requests_enabled = true + cloudwatch_metrics_enabled = true + metric_name = "allow-elb-health-checks" } } - # Web ACL level visibility visibility_config { sampled_requests_enabled = true cloudwatch_metrics_enabled = true @@ -91,3 +274,31 @@ resource "aws_wafv2_web_acl" "mpath_web_acl" { tags = local.common_tags } + +# CloudWatch Log Group +resource "aws_cloudwatch_log_group" "mpath_waf_logs" { + name = "aws-waf-logs-${var.environment}-${local.app_name}" + retention_in_days = 30 + tags = local.common_tags +} + +# Logging Configuration +resource "aws_wafv2_web_acl_logging_configuration" "mpath_waf_logging" { + resource_arn = aws_wafv2_web_acl.mpath_web_acl.arn + + log_destination_configs = [ + aws_cloudwatch_log_group.mpath_waf_logs.arn + ] + + redacted_fields { + single_header { + name = "authorization" + } + } + + redacted_fields { + single_header { + name = "cookie" + } + } +} diff --git a/docker/app/Dockerfile b/docker/app/Dockerfile index 438b1d07a..032442b12 100644 --- a/docker/app/Dockerfile +++ b/docker/app/Dockerfile @@ -1,9 +1,10 @@ -# Builder stage for installing dependencies +# ================================ +# Builder stage (gems + JS deps) +# ================================ FROM ruby:3.1.0 AS builder - WORKDIR /var/www/mPATH -# Install system dependencies (build-essential for native gems like mysql2, Node.js for Webpacker) +# System deps for native gems and Node build RUN apt-get update -qq && apt-get install -y \ build-essential \ curl dirmngr gnupg apt-transport-https ca-certificates \ @@ -14,66 +15,61 @@ RUN apt-get update -qq && apt-get install -y \ && npm install -g yarn \ && rm -rf /var/lib/apt/lists/* -# Install Bundler +# Bundler RUN gem install bundler:2.3.26 -# Copy lockfiles for caching +# Copy lockfiles first for better caching COPY Gemfile Gemfile.lock package.json yarn.lock ./ -# Bundle config and install (production only) +# Install Ruby deps (prod only) +ENV RAILS_ENV=production RUN bundle config set --local without 'development test' \ && bundle install -# Set production env for JS installs +# Install JS deps (prod only) — avoids Cypress/dev deps entirely ENV NODE_ENV=production - -# Install JS dependencies (production only) RUN yarn install --production --frozen-lockfile -# Install all Node dependencies -ENV NODE_ENV=production - -RUN npm install - -# Copy full app code +# Copy the rest of the app COPY . . -# Asset compilation stage (mimic runtime environment) +# ================================= +# Asset compilation stage +# ================================= FROM builder AS asset-compiler - WORKDIR /var/www/mPATH -# Set production environment to match runtime ENV RAILS_ENV=production NODE_ENV=production -# Create puma user and group (needed for permissions in asset compilation) +# puma user (for ownership during precompile) RUN groupadd -r puma && useradd -r -g puma -d /var/www/mPATH puma -# Set permissions as root +# dirs & perms USER root RUN mkdir -p /var/www/mPATH/tmp/pids /var/www/mPATH/tmp/cache /var/www/mPATH/log \ && chown -R puma:puma /var/www/mPATH /usr/local/bundle /tmp \ && chmod -R 755 /tmp -# Switch to puma user for asset compilation (mimics runtime) USER puma -# Install Webpacker/Shakapacker (no-op if already installed) +# Tolerate webpacker or shakapacker projects RUN bundle exec rails webpacker:install || true \ && bundle exec rails shakapacker:install || true -# Precompile assets (use dummy secret; ensure config.assets.initialize_on_precompile = false in config/application.rb) +# Precompile assets (ensure initialize_on_precompile is safe) RUN SECRET_KEY_BASE=dummy bundle exec rails assets:clobber assets:precompile -# Verify manifest.json exists (debugging step) -RUN test -f /var/www/mPATH/public/packs/manifest.json && echo "manifest.json created successfully" || { echo "Error: manifest.json not found"; exit 1; } +# Verify manifest +RUN test -f /var/www/mPATH/public/packs/manifest.json \ + && echo "manifest.json created successfully" || { echo "Error: manifest.json not found"; exit 1; } -# Final runtime stage +# ===================== +# Final runtime image +# ===================== FROM ruby:3.1.0 - WORKDIR /var/www/mPATH -# Install minimal runtime dependencies (libmariadb3 for mysql2 runtime) +# Minimal runtime libs + Node.js for ExecJS/Uglifier at boot RUN apt-get update -qq && apt-get install -y \ curl dirmngr gnupg apt-transport-https ca-certificates \ software-properties-common \ @@ -86,25 +82,22 @@ RUN apt-get update -qq && apt-get install -y \ && gosu nobody true \ && rm -rf /var/lib/apt/lists/* -# Create puma user and group +# puma user RUN groupadd -r puma && useradd -r -g puma -d /var/www/mPATH puma -# Copy bundled gems, app code, and precompiled assets from asset-compiler stage +# Bring in gems, app, and precompiled assets COPY --from=asset-compiler --chown=puma:puma /usr/local/bundle /usr/local/bundle COPY --from=asset-compiler --chown=puma:puma /var/www/mPATH /var/www/mPATH -# Set permissions as root +# Entrypoint + perms USER root RUN chmod +x /var/www/mPATH/docker/app/entrypoint.sh \ && mkdir -p /var/www/mPATH/tmp/pids /var/www/mPATH/tmp/cache /var/www/mPATH/log \ && chown -R puma:puma /var/www/mPATH /usr/local/bundle /tmp \ && chmod -R 755 /tmp -# Switch to puma user +# Run as puma USER puma -# Expose port for Puma EXPOSE 8443 - -# Entrypoint -ENTRYPOINT ["/bin/bash", "/var/www/mPATH/docker/app/entrypoint.sh"] \ No newline at end of file +ENTRYPOINT ["/bin/bash", "/var/www/mPATH/docker/app/entrypoint.sh"]