diff --git a/.github/actions/enforce-pr-title-format/action.yml b/.github/actions/enforce-pr-title-format/action.yml new file mode 100644 index 0000000..001cf83 --- /dev/null +++ b/.github/actions/enforce-pr-title-format/action.yml @@ -0,0 +1,114 @@ +name: Enforce PR title format +description: Check PR title matches required format and post guidance when invalid +inputs: + github-token: + description: GitHub token used to call the REST API + required: true + title-pattern: + description: Regex pattern the PR title must match + required: false + default: "^(SCM|BSS|BSS2)-[0-9]+ .{5,}" + title-pattern-description: + description: Human-readable description of the expected format + required: false + default: "PR title must start with SCM, BSS, or BSS2 ticket number followed by a space and description (minimum 5 characters, e.g., SCM-1234 description or BSS-5678 description or BSS2-9012 description)" + +runs: + using: composite + steps: + - name: Check PR title format and comment on invalid format + uses: actions/github-script@v6 + env: + TITLE_PATTERN: ${{ inputs.title-pattern }} + TITLE_PATTERN_DESCRIPTION: ${{ inputs.title-pattern-description }} + with: + github-token: ${{ inputs.github-token }} + script: | + const pr = context.payload.pull_request; + if (!pr) throw new Error('This action must be run in the context of a pull_request event.'); + + // Fetch current PR state to get up-to-date title (important for workflow reruns) + const { data: currentPr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pr.number, + }); + + const title = currentPr.title || ''; + const pattern = process.env.TITLE_PATTERN || '^(SCM|BSS)-[0-9]+ .{5,}'; + const patternDescription = process.env.TITLE_PATTERN_DESCRIPTION || 'PR title must start with SCM or BSS ticket number followed by a space and description (minimum 5 characters, e.g., SCM-1234 description or BSS-5678 description)'; + const regex = new RegExp(pattern); + + core.info(`PR title: "${title}"`); + core.info(`Expected pattern: ${pattern}`); + core.info(`Pattern description: ${patternDescription}`); + + if (!regex.test(title)) { + const body = [ + '❌ **Automated check: PR title format validation failed**', + '', + `**Current title:** \`${title}\``, + '', + `**Required format:** ${patternDescription}`, + '', + '**Examples of valid titles:**', + '- `SCM-1234 Add new user authentication feature`', + '- `BSS-5678 Fix API timeout issue`', + '- `SCM-9012 Update README with setup instructions`', + '- `BSS-3456 Optimize database query performance`', + '', + 'Please update your PR title to match the required format before merging.', + ].join('\n'); + + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr.number, + per_page: 100, + }); + + const marker = 'Automated check: PR title format validation failed'; + const existing = (comments || []).find(c => c.user && c.user.type === 'Bot' && c.body && c.body.includes(marker)); + + if (existing) { + // Update existing comment + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + body, + }); + } else { + // Create new comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr.number, + body, + }); + } + + throw new Error(`PR title does not match required format: ${patternDescription}`); + } else { + core.info('✅ PR title matches required format — check passed.'); + + // Optionally clean up old warning comments if title is now valid + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr.number, + per_page: 100, + }); + + const marker = 'Automated check: PR title format validation failed'; + const existing = (comments || []).find(c => c.user && c.user.type === 'Bot' && c.body && c.body.includes(marker)); + + if (existing) { + core.info('Deleting old validation failure comment since title is now valid.'); + await github.rest.issues.deleteComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existing.id, + }); + } + } diff --git a/.github/actions/extract-branch-name/action.yaml b/.github/actions/extract-branch-name/action.yaml new file mode 100644 index 0000000..863e2bf --- /dev/null +++ b/.github/actions/extract-branch-name/action.yaml @@ -0,0 +1,51 @@ +name: "Extract Branch Name" +description: "Extracts the branch name (strips feature/ prefix) and exposes it as an output" + +inputs: + branch: + description: "Branch name to extract (default: PR head or GITHUB_REF)" + required: false + default: ${{ github.head_ref || github.ref_name }} + short_code: + description: appended to tags when used instead of branches + required: false + default: "" + +outputs: + branch_name: + description: "Branch name without feature/ prefix" + value: ${{ steps.extract.outputs.branch_name }} + +runs: + using: "composite" + steps: + - name: Extract branch name + id: extract + shell: bash + env: + SHORT_CODE: ${{ inputs.short_code }} + BRANCH: ${{ inputs.branch }} + run: | + SHORT_CODE="${SHORT_CODE}" + BRANCH="${BRANCH}" + BRANCH_NAME="${BRANCH##*/}" + + # Check if this is a version tag + if [[ "$BRANCH_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + # If short_code is provided, prefix the tag + if [[ -n "$SHORT_CODE" ]]; then + BRANCH_NAME="${SHORT_CODE}-${BRANCH_NAME}" + fi + else + BRANCH_NAME=$(echo "$BRANCH_NAME" | tr -cd '[:alnum:]-' | cut -c 1-30) + fi + + # removing any trailing dash + BRANCH_NAME="${BRANCH_NAME%-}" + + echo "branch_name=$BRANCH_NAME" >> "$GITHUB_OUTPUT" + + - name: Echo branch name + shell: bash + run: | + echo "branch_name = '${{ steps.extract.outputs.branch_name }}'" diff --git a/.github/actions/set-metadata/action.yaml b/.github/actions/set-metadata/action.yaml new file mode 100644 index 0000000..98e9b39 --- /dev/null +++ b/.github/actions/set-metadata/action.yaml @@ -0,0 +1,194 @@ +name: "Set CI/CD metadata" + +description: "Sets metadata for CI/CD workflows" + +inputs: + calling_workflow: + description: "The workflow that is calling this action" + required: true + tag: + description: "The tag that is going to be used when CICD deploy calls this action" + required: false + action: + description: "Action to be performed (e.g., apply, destroy)" + required: false + environment: + description: "Environment for the action (e.g., cicd)" + required: false + image_tag: + description: "Docker image tag" + required: false + nation: + description: "Nation tag for the image" + required: false + sql_file: + description: "SQL file to run against the database (optional)" + required: false + default: "coreData.sql" + environment_name: + description: "Suffix to append to the job name for uniqueness" + required: false + triggered_by_lambda: + description: "This workflow is being called from lambda" + required: false + short_code: + description: "Short code to identify the user" + required: false + default: "" + build_type: + description: "Clean or Patch build" + db_snapshot_identifier: + description: "RDS snapshot identifier to use for the database" + required: false + release_tag: + description: "Release tag" + required: false + ttl_days: + description: "Number of days to keep the ephemeral environment" + required: false + +outputs: + # Default Variables + build_datetime: + description: "Build date and time" + value: ${{ steps.default_variables.outputs.build_datetime }} + build_timestamp: + description: "Build timestamp" + value: ${{ steps.default_variables.outputs.build_timestamp }} + build_epoch: + description: "Build epoch" + value: ${{ steps.default_variables.outputs.build_epoch }} + nodejs_version: + description: "Node.js version" + value: ${{ steps.default_variables.outputs.nodejs_version }} + python_version: + description: "Python version" + value: ${{ steps.default_variables.outputs.python_version }} + terraform_version: + description: "Terraform version" + value: ${{ steps.default_variables.outputs.terraform_version }} + version: + description: "Version" + value: ${{ steps.default_variables.outputs.version }} + # Tag + tag: + description: "Tag" + value: ${{ steps.tag.outputs.tag }} + # Workflow Variables + branch_name: + description: "Branch name" + value: ${{ steps.workflow_variables.outputs.branch_name }} + action: + description: "Action to be performed (e.g., apply, destroy)" + value: ${{ steps.workflow_variables.outputs.action }} + environment: + description: "Environment for the action (e.g., cicd)" + value: ${{ steps.workflow_variables.outputs.environment }} + image_tag: + description: "Docker image tag" + value: ${{ steps.set_image_tag.outputs.image_tag }} + nation: + description: "Nation tag for the image" + value: ${{ steps.workflow_variables.outputs.nation }} + environment_name: + description: "Suffix to append to the job name for uniqueness" + value: ${{ steps.workflow_variables.outputs.environment_name }} + triggered_by_lambda: + description: "This workflow is being called from lambda" + value: ${{ steps.workflow_variables.outputs.triggered_by_lambda }} + short_code: + description: "Short code to identify the user" + value: ${{ steps.workflow_variables.outputs.short_code }} + build_type: + description: "Clean or Patch build" + value: ${{ steps.workflow_variables.outputs.build_type }} + sql_file: + description: "SQL file to run against the database (optional)" + value: ${{ steps.workflow_variables.outputs.sql_file }} + db_snapshot_identifier: + description: "RDS snapshot identifier to use for the database" + value: ${{ steps.workflow_variables.outputs.db_snapshot_identifier }} + release_tag: + description: "Release tag" + value: ${{ steps.workflow_variables.outputs.release_tag }} + ttl_days: + description: "Number of days to keep the ephemeral environment" + value: ${{ steps.workflow_variables.outputs.ttl_days }} + +runs: + using: "composite" + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + + - name: "Set default variables" + shell: bash + id: default_variables + run: | + datetime=$(date -u +'%Y-%m-%dT%H:%M:%S%z') + echo "build_datetime=$datetime" >> $GITHUB_OUTPUT + echo "build_timestamp=$(date --date=$datetime -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT + echo "build_epoch=$(date --date=$datetime -u +'%s')" >> $GITHUB_OUTPUT + echo "nodejs_version=$(yq '.infrastructure.nodejs // "unknown"' .tool-versions.yml)" >> $GITHUB_OUTPUT + echo "python_version=$(yq '.infrastructure.python // "unknown"' .tool-versions.yml)" >> $GITHUB_OUTPUT + echo "terraform_version=$(yq '.infrastructure.terraform // "unknown"' .tool-versions.yml)" >> $GITHUB_OUTPUT + echo "version=$(head -n 1 .version 2> /dev/null || echo unknown)" >> $GITHUB_OUTPUT + + - name: "Set Deploy Variables" + shell: bash + if: ${{ inputs.calling_workflow == 'CI/CD deploy' }} + id: tag + env: + INPUT_TAG: ${{ inputs.tag }} + run: | + echo "tag=$INPUT_TAG" >> $GITHUB_OUTPUT + + - name: Extract Branch Name + id: extract_branch + uses: ./.github/actions/extract-branch-name + with: + short_code: ${{ inputs.short_code }} + + - name: Set image name + shell: bash + id: set_image_tag + run: | + RAW_REF="${{ steps.extract_branch.outputs.branch_name }}" + # Look for a semantic version like v1.0.1 anywhere in the string + if [[ "$RAW_REF" =~ (v[0-9]+\.[0-9]+\.[0-9]+) ]]; then + IMAGE_TAG="${BASH_REMATCH[1]}" # Extract just the semver + else + IMAGE_TAG="${RAW_REF}-latest" # Fallback + fi + echo "image_tag=$IMAGE_TAG" >> "$GITHUB_OUTPUT" + + - name: "Set ${{ inputs.calling_workflow }} Variables" + shell: bash + id: workflow_variables + env: + INPUT_CALLING_WORKFLOW: ${{ inputs.calling_workflow }} + INPUT_ACTION: ${{ inputs.action }} + INPUT_BUILD_TYPE: ${{ inputs.build_type }} + INPUT_DB_SNAPSHOT_IDENTIFIER: ${{ inputs.db_snapshot_identifier }} + INPUT_ENVIRONMENT: ${{ inputs.environment }} + INPUT_ENVIRONMENT_NAME: ${{ inputs.environment_name }} + INPUT_NATION: ${{ inputs.nation }} + INPUT_RELEASE_TAG: ${{ inputs.release_tag }} + INPUT_SHORT_CODE: ${{ inputs.short_code }} + INPUT_SQL_FILE: ${{ inputs.sql_file }} + INPUT_TTL_DAYS: ${{ inputs.ttl_days }} + INPUT_TRIGGERED_BY_LAMBDA: ${{ inputs.triggered_by_lambda }} + run: | + echo "calling_workflow=${INPUT_CALLING_WORKFLOW}" >> $GITHUB_OUTPUT + echo "branch_name=${{ steps.extract_branch.outputs.branch_name }}" >> $GITHUB_OUTPUT + echo "action=${INPUT_ACTION}" >> $GITHUB_OUTPUT + echo "environment=${INPUT_ENVIRONMENT}" >> $GITHUB_OUTPUT + echo "nation=${INPUT_NATION}" >> $GITHUB_OUTPUT + echo "sql_file=${INPUT_SQL_FILE}" >> $GITHUB_OUTPUT + echo "environment_name=${INPUT_ENVIRONMENT_NAME}" >> $GITHUB_OUTPUT + echo "triggered_by_lambda=${INPUT_TRIGGERED_BY_LAMBDA}" >> $GITHUB_OUTPUT + echo "short_code=${INPUT_SHORT_CODE}" >> $GITHUB_OUTPUT + echo "build_type=${INPUT_BUILD_TYPE}" >> $GITHUB_OUTPUT + echo "db_snapshot_identifier=${INPUT_DB_SNAPSHOT_IDENTIFIER}" >> $GITHUB_OUTPUT + echo "release_tag=${INPUT_RELEASE_TAG}" >> $GITHUB_OUTPUT + echo "ttl_days=${INPUT_TTL_DAYS}" >> $GITHUB_OUTPUT diff --git a/.github/workflows/cicd-1-pull-request.yaml b/.github/workflows/cicd-1-pull-request.yaml index 634c453..430d858 100644 --- a/.github/workflows/cicd-1-pull-request.yaml +++ b/.github/workflows/cicd-1-pull-request.yaml @@ -1,116 +1,60 @@ -name: "CI/CD pull request" +name: CI/CD 2 - On Pull Request # The total recommended execution time for the "CI/CD Pull Request" workflow is around 20 minutes. +# Test comment on: - push: - branches: - - "**" pull_request: - types: [opened, reopened] + types: [opened, reopened, ready_for_review, synchronize] + +concurrency: + group: pull-${{ github.ref }} + cancel-in-progress: false + +permissions: + id-token: write + contents: read jobs: metadata: - name: "Set CI/CD metadata" + if: github.event.pull_request.draft == false && github.actor != 'dependabot[bot]' + name: "Set Metadata" runs-on: ubuntu-latest timeout-minutes: 1 outputs: - build_datetime_london: ${{ steps.variables.outputs.build_datetime_london }} - build_datetime: ${{ steps.variables.outputs.build_datetime }} - build_timestamp: ${{ steps.variables.outputs.build_timestamp }} - build_epoch: ${{ steps.variables.outputs.build_epoch }} - nodejs_version: ${{ steps.variables.outputs.nodejs_version }} - python_version: ${{ steps.variables.outputs.python_version }} - terraform_version: ${{ steps.variables.outputs.terraform_version }} - version: ${{ steps.variables.outputs.version }} - does_pull_request_exist: ${{ steps.pr_exists.outputs.does_pull_request_exist }} + build_datetime_london: ${{ steps.set-metadata.outputs.build_datetime_london }} + build_datetime: ${{ steps.set-metadata.outputs.build_datetime }} + build_timestamp: ${{ steps.set-metadata.outputs.build_timestamp }} + build_epoch: ${{ steps.set-metadata.outputs.build_epoch }} + nodejs_version: ${{ steps.set-metadata.outputs.nodejs_version }} + python_version: ${{ steps.set-metadata.outputs.python_version }} + terraform_version: ${{ steps.set-metadata.outputs.terraform_version }} + version: ${{ steps.set-metadata.outputs.version }} + action: ${{ steps.set-metadata.outputs.action }} + environment: ${{ steps.set-metadata.outputs.environment }} + nation: ${{ steps.set-metadata.outputs.nation }} + branch_name: ${{ steps.set-metadata.outputs.branch_name }} + sql_file: ${{ steps.set-metadata.outputs.sql_file }} + image_tag: ${{ steps.set-metadata.outputs.image_tag }} steps: - name: "Checkout code" - uses: actions/checkout@v4 - - name: "Set CI/CD variables" - id: variables - run: | - datetime=$(date -u +'%Y-%m-%dT%H:%M:%S%z') - BUILD_DATETIME=$datetime make version-create-effective-file - echo "build_datetime_london=$(TZ=Europe/London date --date=$datetime +'%Y-%m-%dT%H:%M:%S%z')" >> $GITHUB_OUTPUT - echo "build_datetime=$datetime" >> $GITHUB_OUTPUT - echo "build_timestamp=$(date --date=$datetime -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT - echo "build_epoch=$(date --date=$datetime -u +'%s')" >> $GITHUB_OUTPUT - echo "nodejs_version=$(grep "^nodejs" .tool-versions | cut -f2 -d' ')" >> $GITHUB_OUTPUT - echo "python_version=$(grep "^nodejs" .tool-versions | cut -f2 -d' ')" >> $GITHUB_OUTPUT - echo "terraform_version=$(grep "^terraform" .tool-versions | cut -f2 -d' ')" >> $GITHUB_OUTPUT - echo "version=$(head -n 1 .version 2> /dev/null || echo unknown)" >> $GITHUB_OUTPUT - - name: "Check if pull request exists for this branch" - id: pr_exists - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - branch_name=${GITHUB_HEAD_REF:-$(echo $GITHUB_REF | sed 's#refs/heads/##')} - echo "Current branch is '$branch_name'" - if gh pr list --head $branch_name | grep -q .; then - echo "Pull request exists" - echo "does_pull_request_exist=true" >> $GITHUB_OUTPUT - else - echo "Pull request doesn't exist" - echo "does_pull_request_exist=false" >> $GITHUB_OUTPUT - fi - - name: "List variables" - run: | - export BUILD_DATETIME_LONDON="${{ steps.variables.outputs.build_datetime_london }}" - export BUILD_DATETIME="${{ steps.variables.outputs.build_datetime }}" - export BUILD_TIMESTAMP="${{ steps.variables.outputs.build_timestamp }}" - export BUILD_EPOCH="${{ steps.variables.outputs.build_epoch }}" - export NODEJS_VERSION="${{ steps.variables.outputs.nodejs_version }}" - export PYTHON_VERSION="${{ steps.variables.outputs.python_version }}" - export TERRAFORM_VERSION="${{ steps.variables.outputs.terraform_version }}" - export VERSION="${{ steps.variables.outputs.version }}" - export DOES_PULL_REQUEST_EXIST="${{ steps.pr_exists.outputs.does_pull_request_exist }}" - make list-variables - commit-stage: # Recommended maximum execution time is 2 minutes - name: "Commit stage" - needs: [metadata] - uses: ./.github/workflows/stage-1-commit.yaml - with: - build_datetime: "${{ needs.metadata.outputs.build_datetime }}" - build_timestamp: "${{ needs.metadata.outputs.build_timestamp }}" - build_epoch: "${{ needs.metadata.outputs.build_epoch }}" - nodejs_version: "${{ needs.metadata.outputs.nodejs_version }}" - python_version: "${{ needs.metadata.outputs.python_version }}" - terraform_version: "${{ needs.metadata.outputs.terraform_version }}" - version: "${{ needs.metadata.outputs.version }}" - secrets: inherit - test-stage: # Recommended maximum execution time is 5 minutes - name: "Test stage" - needs: [metadata] - uses: ./.github/workflows/stage-2-test.yaml - with: - build_datetime: "${{ needs.metadata.outputs.build_datetime }}" - build_timestamp: "${{ needs.metadata.outputs.build_timestamp }}" - build_epoch: "${{ needs.metadata.outputs.build_epoch }}" - nodejs_version: "${{ needs.metadata.outputs.nodejs_version }}" - python_version: "${{ needs.metadata.outputs.python_version }}" - terraform_version: "${{ needs.metadata.outputs.terraform_version }}" - version: "${{ needs.metadata.outputs.version }}" - secrets: inherit - build-stage: # Recommended maximum execution time is 3 minutes - name: "Build stage" + uses: actions/checkout@v6 + + - name: "Set CI/CD metadata" + id: set-metadata + uses: ./.github/actions/set-metadata + with: + calling_workflow: ${{ github.workflow }} + nation: ${{ inputs.nation || 'en' }} + environment: ${{ inputs.environment || 'autotest' }} + action: ${{ inputs.action || 'apply' }} + sql_file: ${{ inputs.sql_file || 'coreData.sql' }} + + coding-standards: # Recommended maximum execution time is 5 minutes + if: github.actor != 'dependabot[bot]' + name: "Enforce Coding Standards" needs: [metadata] - uses: ./.github/workflows/stage-3-build.yaml - if: needs.metadata.outputs.does_pull_request_exist == 'true' || (github.event_name == 'pull_request' && (github.event.action == 'opened' || github.event.action == 'reopened')) - with: - build_datetime: "${{ needs.metadata.outputs.build_datetime }}" - build_timestamp: "${{ needs.metadata.outputs.build_timestamp }}" - build_epoch: "${{ needs.metadata.outputs.build_epoch }}" - nodejs_version: "${{ needs.metadata.outputs.nodejs_version }}" - python_version: "${{ needs.metadata.outputs.python_version }}" - terraform_version: "${{ needs.metadata.outputs.terraform_version }}" - version: "${{ needs.metadata.outputs.version }}" - secrets: inherit - acceptance-stage: # Recommended maximum execution time is 10 minutes - name: "Acceptance stage" - needs: [metadata, build-stage] - uses: ./.github/workflows/stage-4-acceptance.yaml - if: needs.metadata.outputs.does_pull_request_exist == 'true' || (github.event_name == 'pull_request' && (github.event.action == 'opened' || github.event.action == 'reopened')) + uses: ./.github/workflows/stage-1-coding-standards.yaml with: build_datetime: "${{ needs.metadata.outputs.build_datetime }}" build_timestamp: "${{ needs.metadata.outputs.build_timestamp }}" @@ -120,3 +64,26 @@ jobs: terraform_version: "${{ needs.metadata.outputs.terraform_version }}" version: "${{ needs.metadata.outputs.version }}" secrets: inherit + + # enforce-pr-title-format: # + # name: "Enforce PR title format" + # runs-on: ubuntu-latest + # if: github.event.pull_request.draft == false && github.actor != 'dependabot[bot]' + # permissions: + # issues: write + # contents: read + # steps: + # - name: Checkout code + # uses: actions/checkout@v6 + + # - name: Generate a token + # id: generate-token + # uses: actions/create-github-app-token@v3 + # with: + # app-id: ${{ vars.GH_APP_ID }} + # private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} + + # - name: Check PR title format + # uses: ./.github/actions/enforce-pr-title-format + # with: + # github-token: ${{ steps.generate-token.outputs.token }} diff --git a/.github/workflows/stage-1-coding-standards.yaml b/.github/workflows/stage-1-coding-standards.yaml new file mode 100644 index 0000000..1ac377c --- /dev/null +++ b/.github/workflows/stage-1-coding-standards.yaml @@ -0,0 +1,133 @@ +name: Stage 1 - Run Coding Standards + +on: + workflow_call: + inputs: + build_datetime: + description: "Build datetime, set by the CI/CD pipeline workflow" + required: true + type: string + build_timestamp: + description: "Build timestamp, set by the CI/CD pipeline workflow" + required: true + type: string + build_epoch: + description: "Build epoch, set by the CI/CD pipeline workflow" + required: true + type: string + nodejs_version: + description: "Node.js version, set by the CI/CD pipeline workflow" + required: true + type: string + python_version: + description: "Python version, set by the CI/CD pipeline workflow" + required: true + type: string + terraform_version: + description: "Terraform version, set by the CI/CD pipeline workflow" + required: true + type: string + version: + description: "Version of the software, set by the CI/CD pipeline workflow" + required: true + type: string + +jobs: + scan-secrets: + name: "Scan secrets" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + fetch-depth: 0 # Full history is needed to scan all commits + - name: "Scan secrets" + uses: ./.github/actions/scan-secrets + + check-file-format: + name: "Check file format" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + fetch-depth: 0 # Full history is needed to compare branches + - name: "Check file format" + uses: ./.github/actions/check-file-format + + check-markdown-format: + name: "Check Markdown format" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + fetch-depth: 0 # Full history is needed to compare branches + - name: "Check Markdown format" + uses: ./.github/actions/check-markdown-format + + check-english-usage: + name: "Check English usage" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + with: + fetch-depth: 0 # Full history is needed to compare branches + - name: "Check English usage" + uses: ./.github/actions/check-english-usage + + lint-terraform: + name: "Lint Terraform" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + - name: "Lint Terraform" + uses: ./.github/actions/lint-terraform + + count-lines-of-code: + name: "Count lines of code" + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + - name: "Count lines of code" + uses: ./.github/actions/create-lines-of-code-report + with: + build_datetime: "${{ inputs.build_datetime }}" + build_timestamp: "${{ inputs.build_timestamp }}" + idp_aws_report_upload_account_id: "${{ secrets.IDP_AWS_REPORT_UPLOAD_ACCOUNT_ID }}" + idp_aws_report_upload_region: "${{ secrets.IDP_AWS_REPORT_UPLOAD_REGION }}" + idp_aws_report_upload_role_name: "${{ secrets.IDP_AWS_REPORT_UPLOAD_ROLE_NAME }}" + idp_aws_report_upload_bucket_endpoint: "${{ secrets.IDP_AWS_REPORT_UPLOAD_BUCKET_ENDPOINT }}" + + scan-dependencies: + name: "Scan dependencies" + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + - name: "Scan dependencies" + uses: ./.github/actions/scan-dependencies + with: + build_datetime: "${{ inputs.build_datetime }}" + build_timestamp: "${{ inputs.build_timestamp }}" + idp_aws_report_upload_account_id: "${{ secrets.IDP_AWS_REPORT_UPLOAD_ACCOUNT_ID }}" + idp_aws_report_upload_region: "${{ secrets.IDP_AWS_REPORT_UPLOAD_REGION }}" + idp_aws_report_upload_role_name: "${{ secrets.IDP_AWS_REPORT_UPLOAD_ROLE_NAME }}" + idp_aws_report_upload_bucket_endpoint: "${{ secrets.IDP_AWS_REPORT_UPLOAD_BUCKET_ENDPOINT }}" + verbose: false diff --git a/.tool-versions b/.tool-versions index 0c02acd..0d035de 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,14 +1,16 @@ # This file is for you! Please, updated to the versions agreed by your team. -terraform 1.7.0 +terraform 1.13.2 +terraform-docs 0.19.0 pre-commit 3.6.0 +python 3.12 vale 3.6.0 # ============================================================================== # The section below is reserved for Docker image versions. # TODO: Move this section - consider using a different file for the repository template dependencies. -# docker/ghcr.io/anchore/grype v0.69.1@sha256:d41fcb371d0af59f311e72123dff46900ebd6d0482391b5a830853ee4f9d1a76 # SEE: https://github.com/anchore/grype/pkgs/container/grype +# docker/ghcr.io/anchore/grype v0.100.0@sha256:651e558f9ba84f2a790b3449c8a57cbbf4f34e004f7d3f14ae8f8cbeede4cd33 # SEE: https://github.com/anchore/grype/pkgs/container/grype # docker/ghcr.io/anchore/syft v0.92.0@sha256:63c60f0a21efb13e80aa1359ab243e49213b6cc2d7e0f8179da38e6913b997e0 # SEE: https://github.com/anchore/syft/pkgs/container/syft # docker/ghcr.io/gitleaks/gitleaks v8.18.0@sha256:fd2b5cab12b563d2cc538b14631764a1c25577780e3b7dba71657d58da45d9d9 # SEE: https://github.com/gitleaks/gitleaks/pkgs/container/gitleaks # docker/ghcr.io/igorshubovych/markdownlint-cli v0.37.0@sha256:fb3e79946fce78e1cde84d6798c6c2a55f2de11fc16606a40d49411e281d950d # SEE: https://github.com/igorshubovych/markdownlint-cli/pkgs/container/markdownlint-cli diff --git a/.tool-versions.yaml b/.tool-versions.yaml new file mode 100644 index 0000000..72ca576 --- /dev/null +++ b/.tool-versions.yaml @@ -0,0 +1,7 @@ +infrastructure: + terraform: 1.13.2 + terraform-docs: 0.19.0 + pre-commit: 3.6.0 + vale: 3.6.0 + python: 3.12.0 + nodejs: 24.8.0 diff --git a/infrastructure/modules/api-gateway/main.tf b/infrastructure/modules/api-gateway/main.tf new file mode 100644 index 0000000..9417850 --- /dev/null +++ b/infrastructure/modules/api-gateway/main.tf @@ -0,0 +1,254 @@ +########### +# Secrets # +########### +resource "random_password" "api_auth_token" { + length = 20 + special = false +} + +resource "aws_secretsmanager_secret" "api_token" { + name = "${var.name_prefix}-${var.api_gateway_name}-api" + description = "Auth token for api gateway" + + dynamic "replica" { + for_each = var.secret_replication_regions + content { + region = replica.value + } + } +} + +resource "aws_secretsmanager_secret_version" "api_token" { + secret_id = aws_secretsmanager_secret.api_token.id + secret_string = random_password.api_auth_token.result +} + + + +################ +# API Gateway # +################ + +# API Gateway REST API +resource "aws_api_gateway_rest_api" "api" { + name = "${var.name_prefix}-${var.api_gateway_name}" + description = var.api_gateway_description + + endpoint_configuration { + types = ["REGIONAL"] + } + + lifecycle { + create_before_destroy = true + } +} + + + +# API Resource +resource "aws_api_gateway_resource" "api_resource" { + rest_api_id = aws_api_gateway_rest_api.api.id + parent_id = aws_api_gateway_rest_api.api.root_resource_id + path_part = var.api_path_part +} + +resource "aws_api_gateway_method" "post_method" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = var.http_method + authorization = "NONE" + api_key_required = true +} + +# Integration with Lambda +resource "aws_api_gateway_integration" "lambda_integration" { + rest_api_id = aws_api_gateway_rest_api.api.id + resource_id = aws_api_gateway_resource.api_resource.id + http_method = aws_api_gateway_method.post_method.http_method + integration_http_method = "POST" + type = "AWS_PROXY" + uri = var.aws_lambda_arn +} + +# Lambda Permission for API Gateway +resource "aws_lambda_permission" "api_gateway" { + statement_id = "AllowAPIGatewayInvoke" + action = "lambda:InvokeFunction" + function_name = var.aws_lambda_name + principal = "apigateway.amazonaws.com" + source_arn = "${aws_api_gateway_rest_api.api.execution_arn}/*/*" +} + +# Deploy API Gateway +resource "aws_api_gateway_deployment" "deployment" { + depends_on = [aws_api_gateway_integration.lambda_integration] + + rest_api_id = aws_api_gateway_rest_api.api.id + + triggers = { + redeployment_trigger = sha1(jsonencode(aws_api_gateway_integration.lambda_integration)) + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_api_gateway_stage" "stage" { + deployment_id = aws_api_gateway_deployment.deployment.id + rest_api_id = aws_api_gateway_rest_api.api.id + stage_name = var.stage_name + xray_tracing_enabled = true + + access_log_settings { + destination_arn = aws_cloudwatch_log_group.log_group.arn + format = "{\"requestId\":\"$context.requestId\",\"ip\":\"$context.identity.sourceIp\",\"user\":\"$context.identity.user\",\"requestTime\":\"$context.requestTime\",\"httpMethod\":\"$context.httpMethod\",\"resourcePath\":\"$context.resourcePath\",\"status\":\"$context.status\",\"protocol\":\"$context.protocol\",\"responseLength\":\"$context.responseLength\"}" + } +} + +########################## +# API Key and Usage Plan # +########################## +resource "aws_api_gateway_api_key" "my_api_key" { + name = "${var.name_prefix}-${var.api_gateway_name}-api-key-${var.api_gateway_name}" + enabled = true + value = aws_secretsmanager_secret_version.api_token.secret_string +} + +resource "aws_api_gateway_usage_plan" "usage_plan" { + depends_on = [aws_api_gateway_stage.stage] + name = "${var.name_prefix}-${var.api_gateway_name}-usage-plan" + description = "The usage plan used for the ${var.name_prefix}-${var.api_gateway_name} endpoint" + + api_stages { + api_id = aws_api_gateway_rest_api.api.id + stage = aws_api_gateway_stage.stage.stage_name + } + + quota_settings { + limit = 1000 + period = "WEEK" + } + + throttle_settings { + burst_limit = 25 + rate_limit = 50 + } +} + +resource "aws_api_gateway_usage_plan_key" "usage_plan_key" { + key_id = aws_api_gateway_api_key.my_api_key.id + key_type = "API_KEY" + usage_plan_id = aws_api_gateway_usage_plan.usage_plan.id +} + + +#################### +# cloudwatch # +#################### + +resource "aws_cloudwatch_log_group" "log_group" { + name = "${var.name_prefix}-api-gateway-Execution-Logs_${aws_api_gateway_rest_api.api.id}/${var.name_prefix}-${var.api_gateway_name}" + retention_in_days = 365 +} + + +############################################### +# IAM roles API Gateway logs account settings # +############################################### +resource "aws_iam_role" "apigateway_cloudwatch" { + name = "${var.name_prefix}-apigateway-cloudwatch-logs" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Principal = { + Service = "apigateway.amazonaws.com" + }, + Action = "sts:AssumeRole" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "apigateway_logs" { + role = aws_iam_role.apigateway_cloudwatch.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs" +} + +resource "aws_api_gateway_account" "account" { + cloudwatch_role_arn = aws_iam_role.apigateway_cloudwatch.arn +} + +###################### +# route 53 and Cert # +###################### + +resource "aws_acm_certificate" "cert" { + count = var.certificate_arn == null ? 1 : 0 + domain_name = var.hosted_zone_name + subject_alternative_names = ["${var.domain_name_prefix}.${var.hosted_zone_name}"] + validation_method = "DNS" +} + +resource "aws_route53_record" "cert_validation" { + for_each = var.certificate_arn == null ? { + for dvo in aws_acm_certificate.cert[0].domain_validation_options : + dvo.domain_name => { + name = dvo.resource_record_name + type = dvo.resource_record_type + record = dvo.resource_record_value + } + } : {} + + zone_id = var.route53_hosted_zone_id + name = each.value.name + type = each.value.type + ttl = 300 + records = [each.value.record] +} + +resource "aws_acm_certificate_validation" "cert_validation" { + count = var.certificate_arn == null ? 1 : 0 + certificate_arn = aws_acm_certificate.cert[0].arn + + validation_record_fqdns = [ + for record in aws_route53_record.cert_validation : record.fqdn + ] +} + +# Use existing wildcard certificate from DNS stack or newly created certificate +resource "aws_api_gateway_domain_name" "gateway_domain_name" { + domain_name = "${var.domain_name_prefix}.${var.hosted_zone_name}" + regional_certificate_arn = var.certificate_arn != null ? var.certificate_arn : aws_acm_certificate_validation.cert_validation[0].certificate_arn + security_policy = "TLS_1_2" + + endpoint_configuration { + types = ["REGIONAL"] + } +} + + +resource "aws_route53_record" "route53_record" { + name = "${var.domain_name_prefix}.${var.hosted_zone_name}" + type = "A" + zone_id = var.route53_hosted_zone_id + + alias { + evaluate_target_health = true + name = aws_api_gateway_domain_name.gateway_domain_name.regional_domain_name + zone_id = aws_api_gateway_domain_name.gateway_domain_name.regional_zone_id + } +} + +#map to custom domain dont hit default API Gateway domain +resource "aws_api_gateway_base_path_mapping" "custom_domain_mapping" { + api_id = aws_api_gateway_rest_api.api.id + stage_name = aws_api_gateway_stage.stage.stage_name + domain_name = aws_api_gateway_domain_name.gateway_domain_name.domain_name + base_path = var.stage_name + + depends_on = [aws_api_gateway_stage.stage] +} + + diff --git a/infrastructure/modules/api-gateway/outputs.tf b/infrastructure/modules/api-gateway/outputs.tf new file mode 100644 index 0000000..51dc20b --- /dev/null +++ b/infrastructure/modules/api-gateway/outputs.tf @@ -0,0 +1,25 @@ +output "api_gateway_id" { + description = "The ID of the API Gateway" + value = aws_api_gateway_rest_api.api.id +} + +output "api_gateway_url" { + description = "The URL of the API Gateway custom domain" + value = "https://${aws_api_gateway_domain_name.gateway_domain_name.domain_name}/${var.api_path_part}" +} + +output "api_gateway_invoke_url" { + description = "The invoke URL of the API Gateway stage" + value = aws_api_gateway_stage.stage.invoke_url +} + +output "api_key_id" { + description = "The ID of the API key" + value = aws_api_gateway_api_key.my_api_key.id +} + +output "api_key_secret_arn" { + description = "The ARN of the API key secret in Secrets Manager" + value = aws_secretsmanager_secret.api_token.arn +} + diff --git a/infrastructure/modules/api-gateway/readme.md b/infrastructure/modules/api-gateway/readme.md new file mode 100644 index 0000000..e928a0c --- /dev/null +++ b/infrastructure/modules/api-gateway/readme.md @@ -0,0 +1,78 @@ +# API Gateway + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [random](#provider_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_acm_certificate.cert](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acm_certificate) | resource | +| [aws_acm_certificate_validation.cert_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acm_certificate_validation) | resource | +| [aws_api_gateway_account.account](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_account) | resource | +| [aws_api_gateway_api_key.my_api_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_api_key) | resource | +| [aws_api_gateway_base_path_mapping.custom_domain_mapping](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_base_path_mapping) | resource | +| [aws_api_gateway_deployment.deployment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_deployment) | resource | +| [aws_api_gateway_domain_name.gateway_domain_name](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_domain_name) | resource | +| [aws_api_gateway_integration.lambda_integration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration) | resource | +| [aws_api_gateway_method.post_method](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method) | resource | +| [aws_api_gateway_resource.api_resource](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_resource) | resource | +| [aws_api_gateway_rest_api.api](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_rest_api) | resource | +| [aws_api_gateway_stage.stage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_stage) | resource | +| [aws_api_gateway_usage_plan.usage_plan](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_usage_plan) | resource | +| [aws_api_gateway_usage_plan_key.usage_plan_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_usage_plan_key) | resource | +| [aws_cloudwatch_log_group.log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_iam_role.apigateway_cloudwatch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.apigateway_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_lambda_permission.api_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_route53_record.cert_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_route53_record.route53_record](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_secretsmanager_secret.api_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) | resource | +| [aws_secretsmanager_secret_version.api_token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource | +| [random_password.api_auth_token](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [api_gateway_description](#input_api_gateway_description) | Description for the API Gateway | `string` | n/a | yes | +| [api_gateway_name](#input_api_gateway_name) | the name of the API Gateway | `any` | n/a | yes | +| [api_path_part](#input_api_path_part) | the url path for the API | `any` | n/a | yes | +| [aws_account_id](#input_aws_account_id) | n/a | `any` | n/a | yes | +| [aws_lambda_arn](#input_aws_lambda_arn) | n/a | `any` | n/a | yes | +| [aws_lambda_name](#input_aws_lambda_name) | n/a | `any` | n/a | yes | +| [aws_region](#input_aws_region) | The AWS region where the API Gateway is deployed | `string` | `"eu-west-2"` | no | +| [certificate_arn](#input_certificate_arn) | The ARN of the ACM certificate to use for the custom domain (optional, will create if not provided) | `string` | `null` | no | +| [domain_name_prefix](#input_domain_name_prefix) | Prefix for the custom domain name | `string` | n/a | yes | +| [hosted_zone_name](#input_hosted_zone_name) | The hosted zone name for the custom domain | `string` | n/a | yes | +| [http_method](#input_http_method) | The HTTP method to use for the API Gateway | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | Prefix for naming resources | `string` | n/a | yes | +| [route53_hosted_zone_id](#input_route53_hosted_zone_id) | The ID of the Route53 hosted zone | `string` | n/a | yes | +| [secret_replication_regions](#input_secret_replication_regions) | List of additional regions where created secrets should be replicated | `list(string)` | n/a | yes | +| [stage_name](#input_stage_name) | the API stage name | `any` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [api_gateway_id](#output_api_gateway_id) | The ID of the API Gateway | +| [api_gateway_invoke_url](#output_api_gateway_invoke_url) | The invoke URL of the API Gateway stage | +| [api_gateway_url](#output_api_gateway_url) | The URL of the API Gateway custom domain | +| [api_key_id](#output_api_key_id) | The ID of the API key | +| [api_key_secret_arn](#output_api_key_secret_arn) | The ARN of the API key secret in Secrets Manager | + + diff --git a/infrastructure/modules/api-gateway/variables.tf b/infrastructure/modules/api-gateway/variables.tf new file mode 100644 index 0000000..df4102a --- /dev/null +++ b/infrastructure/modules/api-gateway/variables.tf @@ -0,0 +1,64 @@ +variable "aws_account_id" {} +variable "aws_lambda_name" {} +variable "aws_lambda_arn" {} +variable "aws_region" { + description = "The AWS region where the API Gateway is deployed" + type = string + default = "eu-west-2" + +} + +variable "api_gateway_name" { + description = "the name of the API Gateway" +} + +variable "api_path_part" { + description = "the url path for the API" +} + +variable "stage_name" { + description = "the API stage name" +} + + + +variable "http_method" { + description = "The HTTP method to use for the API Gateway" + type = string +} + +variable "api_gateway_description" { + description = "Description for the API Gateway" + type = string + +} + +variable "name_prefix" { + description = "Prefix for naming resources" + type = string +} + +variable "hosted_zone_name" { + description = "The hosted zone name for the custom domain" + type = string +} +variable "domain_name_prefix" { + description = "Prefix for the custom domain name" + type = string +} + +variable "route53_hosted_zone_id" { + description = "The ID of the Route53 hosted zone" + type = string +} + +variable "certificate_arn" { + description = "The ARN of the ACM certificate to use for the custom domain (optional, will create if not provided)" + type = string + default = null +} + +variable "secret_replication_regions" { + description = "List of additional regions where created secrets should be replicated" + type = list(string) +} diff --git a/infrastructure/modules/aws-backup-destination/backup.tf b/infrastructure/modules/aws-backup-destination/backup.tf new file mode 100644 index 0000000..2cdad37 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/backup.tf @@ -0,0 +1,13 @@ +resource "aws_backup_vault" "vault" { + name = var.name_prefix != null ? "${var.name_prefix}-backup-vault" : "${var.source_account_name}-backup-vault" + kms_key_arn = var.kms_key +} + +output "vault_arn" { + value = aws_backup_vault.vault.arn +} + +output "vault_name" { + description = "The name of the backup vault." + value = aws_backup_vault.vault.name +} diff --git a/infrastructure/modules/aws-backup-destination/backup_vault_lock.tf b/infrastructure/modules/aws-backup-destination/backup_vault_lock.tf new file mode 100644 index 0000000..e1a3178 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/backup_vault_lock.tf @@ -0,0 +1,7 @@ +resource "aws_backup_vault_lock_configuration" "vault_lock" { + count = var.enable_vault_protection ? 1 : 0 + backup_vault_name = aws_backup_vault.vault.name + changeable_for_days = var.vault_lock_type == "compliance" ? var.changeable_for_days : null + max_retention_days = var.vault_lock_max_retention_days + min_retention_days = var.vault_lock_min_retention_days +} diff --git a/infrastructure/modules/aws-backup-destination/backup_vault_policy.tf b/infrastructure/modules/aws-backup-destination/backup_vault_policy.tf new file mode 100644 index 0000000..cc6d1a1 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/backup_vault_policy.tf @@ -0,0 +1,22 @@ +resource "aws_backup_vault_policy" "vault_policy" { + backup_vault_name = aws_backup_vault.vault.name + policy = data.aws_iam_policy_document.vault_policy.json +} + +data "aws_iam_policy_document" "vault_policy" { + + statement { + sid = "AllowCopyToVault" + effect = "Allow" + + principals { + type = "AWS" + identifiers = [for account_id in var.source_account_ids : "arn:aws:iam::${account_id}:root"] + } + + actions = [ + "backup:CopyIntoBackupVault" + ] + resources = ["*"] + } +} diff --git a/infrastructure/modules/aws-backup-destination/iam.tf b/infrastructure/modules/aws-backup-destination/iam.tf new file mode 100644 index 0000000..646fe48 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/iam.tf @@ -0,0 +1,105 @@ +############################################# +# Cross-account role for copy-recovery-point +# Created only when enable_cross_account_vault_access = true +############################################# + +locals { + copy_recovery_role_name = var.name_prefix != null && var.name_prefix != "" ? "${var.name_prefix}-copy-recovery-point" : "copy-recovery-point" +} + +data "aws_iam_policy_document" "copy_recovery_point_assume" { + count = var.enable_cross_account_vault_access ? 1 : 0 + + statement { + effect = "Allow" + principals { + type = "AWS" + identifiers = [for account_id in var.source_account_ids : "arn:aws:iam::${account_id}:root"] + } + actions = ["sts:AssumeRole"] + } + + # Allow AWS Backup service to assume when executing StartCopyJob in this account + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + actions = ["sts:AssumeRole"] + } +} + +resource "aws_iam_role" "copy_recovery_point" { + count = var.enable_cross_account_vault_access ? 1 : 0 + name = local.copy_recovery_role_name + assume_role_policy = data.aws_iam_policy_document.copy_recovery_point_assume[0].json + description = "Role assumed by source account lambda to start and describe AWS Backup copy jobs, also passed to AWS Backup service for execution" + tags = { + ModuleComponent = "aws-backup-destination" + Purpose = "copy-recovery-point-cross-account" + } +} + +data "aws_iam_policy_document" "copy_recovery_point_permissions" { + count = var.enable_cross_account_vault_access ? 1 : 0 + + # Start copy job (resource-level supports recoveryPoint*) + statement { + effect = "Allow" + actions = [ + "backup:StartCopyJob" + ] + # Recovery points originate from the source account; allow any recovery point ARN pattern for that account & any region used via var.region + resources = ["arn:aws:backup:${var.region}:${var.account_id}:recovery-point:*"] + } + + # Describe copy job (no resource-level restriction) + statement { + effect = "Allow" + actions = [ + "backup:DescribeCopyJob" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "backup:CopyIntoBackupVault", + "backup:CopyFromBackupVault" + ] + resources = concat( + [ + "arn:aws:backup:${var.region}:${var.account_id}:recovery-point:*", + "arn:aws:backup:${var.region}:${var.account_id}:backup-vault:${aws_backup_vault.vault.name}" + ], + [for account_id in var.source_account_ids : "arn:aws:backup:${var.region}:${account_id}:backup-vault:*"] + ) + } + + # Pass this role to AWS Backup service when invoking StartCopyJob with IamRoleArn + statement { + effect = "Allow" + actions = ["iam:PassRole"] + resources = [aws_iam_role.copy_recovery_point[0].arn] + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = ["backup.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy" "copy_recovery_point_policy" { + count = var.enable_cross_account_vault_access ? 1 : 0 + name = "${local.copy_recovery_role_name}-policy" + role = aws_iam_role.copy_recovery_point[0].id + policy = data.aws_iam_policy_document.copy_recovery_point_permissions[0].json +} + +output "copy_recovery_point_role_arn" { + description = "ARN of role to assume from source account lambda (set ASSUME_ROLE_ARN to this). Only present if enabled." + value = try(aws_iam_role.copy_recovery_point[0].arn, null) + depends_on = [aws_iam_role.copy_recovery_point] +} diff --git a/infrastructure/modules/aws-backup-destination/readme.md b/infrastructure/modules/aws-backup-destination/readme.md new file mode 100644 index 0000000..fb5fb21 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/readme.md @@ -0,0 +1,91 @@ +# AWS Backup Module + +The AWS Backup Module helps automates the setup of AWS Backup resources in a destination account. It streamlines the process of creating, managing, and standardising backup configurations. + +## Inputs + +| Name | Description | Type | Default | Required | +| ------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | ----------------------- | :------: | +| [account_id](#input_account_id) | The id of the account that the vault will be in | `string` | n/a | yes | +| [changeable_for_days](#input_changeable_for_days) | How long you want the vault lock to be changeable for, only applies to compliance mode. This value is expressed in days no less than 3 and no greater than 36,500; otherwise, an error will return. | `number` | `14` | no | +| [enable_vault_protection](#input_enable_vault_protection) | Flag which controls if the vault lock is enabled | `bool` | `false` | no | +| [enable_iam_protection](#input_enable_vault_protection) | Flag which controls if the vault iam is locked down, and copy restrictions are in place | `bool` | `false` | no | +| [kms_key](#input_kms_key) | The KMS key used to secure the vault | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | Optional name prefix for vault resources | `string` | `null` | no | +| [region](#input_region) | The region we should be operating in | `string` | `"eu-west-2"` | no | +| [source_account_ids](#input_source_account_ids) | The ids of the accounts that backups will come from | `list(string)` | n/a | yes | +| [source_account_name](#input_source_account_name) | The name of the account that backups will come from | `string` | n/a | yes | +| [vault_lock_max_retention_days](#input_vault_lock_max_retention_days) | The maximum retention period required on recovery points when vault lock enabled | `number` | `365` | no | +| [vault_lock_min_retention_days](#input_vault_lock_min_retention_days) | The minimum retention period required on recovery points when vault lock enabled | `number` | `365` | no | +| [vault_lock_type](#input_vault_lock_type) | The type of lock that the vault should be, will default to governance | `string` | `"governance"` | no | +| [source\vault_arn](#input_source_vault_arn) | arn of the source vault, used to restrict where copies are allowed back to | `string` | `"arn:aws:backup:blah"` | no | + +## Example + +```terraform +module "test_backup_vault" { + source = "./modules/aws_backup" + source_account_name = "test" + account_id = local.aws_accounts_ids["backup"] + source_account_ids = [local.aws_accounts_ids["test"]] + kms_key = aws_kms_key.backup_key.arn + enable_vault_protection = true +} +``` + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +| ------------------------------------------------ | ------- | +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [aws_backup_vault.vault](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault) | resource | +| [aws_backup_vault_lock_configuration.vault_lock](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault_lock_configuration) | resource | +| [aws_backup_vault_policy.vault_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault_policy) | resource | +| [aws_iam_role.copy_recovery_point](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.copy_recovery_point_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [aws_iam_policy_document.copy_recovery_point_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.copy_recovery_point_permissions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.vault_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Module Inputs + +| Name | Description | Type | Default | Required | +| ------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -------------- | :------: | +| [account_id](#input_account_id) | The id of the account that the vault will be in | `string` | n/a | yes | +| [changeable_for_days](#input_changeable_for_days) | How long you want the vault lock to be changeable for, only applies to compliance mode. This value is expressed in days no less than 3 and no greater than 36,500; otherwise, an error will return. | `number` | `14` | no | +| [enable_cross_account_vault_access](#input_enable_cross_account_vault_access) | Flag to enable cross account vault access for AWS Backup | `bool` | `false` | no | +| [enable_vault_protection](#input_enable_vault_protection) | Flag which controls if the vault lock is enabled | `bool` | `false` | no | +| [kms_key](#input_kms_key) | The KMS key used to secure the vault | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | Optional name prefix for vault resources | `string` | `null` | no | +| [region](#input_region) | The region we should be operating in | `string` | `"eu-west-2"` | no | +| [source_account_ids](#input_source_account_ids) | The ids of the accounts that backups will come from | `list(string)` | n/a | yes | +| [source_account_name](#input_source_account_name) | The name of the account that backups will come from | `string` | n/a | yes | +| [source_vault_arn](#input_source_vault_arn) | Source account vault arn, if set copies back are restricted to only this vault | `string` | `""` | no | +| [vault_lock_max_retention_days](#input_vault_lock_max_retention_days) | The maximum retention period required on recovery points when vault lock enabled | `number` | `365` | no | +| [vault_lock_min_retention_days](#input_vault_lock_min_retention_days) | The minimum retention period required on recovery points when vault lock enabled | `number` | `365` | no | +| [vault_lock_type](#input_vault_lock_type) | The type of lock that the vault should be, will default to governance | `string` | `"governance"` | no | + +## Outputs + +| Name | Description | +| ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| [copy_recovery_point_role_arn](#output_copy_recovery_point_role_arn) | arn of role to assume from source account lambda (set ASSUME_ROLE_ARN to this). Only present if enabled. | +| [vault_arn](#output_vault_arn) | n/a | +| [vault_name](#output_vault_name) | The name of the backup vault. | + + diff --git a/infrastructure/modules/aws-backup-destination/variables.tf b/infrastructure/modules/aws-backup-destination/variables.tf new file mode 100644 index 0000000..4d456a8 --- /dev/null +++ b/infrastructure/modules/aws-backup-destination/variables.tf @@ -0,0 +1,88 @@ +variable "source_account_name" { + # This is used as a prefix for the vault name, and referenced by the policy and the lock. + # It doesn't have to match anything in the source AWS account. + description = "The name of the account that backups will come from" + type = string +} + +variable "source_account_ids" { + # These source account IDs are used in the policy to allow root in each source account + # to copy backups into the vault. + description = "The ids of the accounts that backups will come from" + type = list(string) +} + +variable "account_id" { + # This is used to deny root from being able to copy backups from the vault + # to anywhere other than the source account. The constraint will need to + # be removed if the original source account is lost. + description = "The id of the account that the vault will be in" + type = string +} + +variable "region" { + description = "The region we should be operating in" + type = string + default = "eu-west-2" +} + +variable "kms_key" { + description = "The KMS key used to secure the vault" + type = string +} + +variable "enable_vault_protection" { + # With this set to true, the minimum and maximum retention periods are also set only if this is true. + description = "Flag which controls if the vault lock is enabled" + type = bool + default = false +} + +variable "vault_lock_type" { + description = "The type of lock that the vault should be, will default to governance" + type = string + # See toplevel README.md: + # DO NOT SET THIS TO compliance UNTIL YOU ARE SURE THAT YOU WANT TO LOCK THE VAULT PERMANENTLY + # When you do, you will also need to set "enable_vault_protection" to true for it to take effect. + default = "governance" +} + +variable "vault_lock_min_retention_days" { + description = "The minimum retention period required on recovery points when vault lock enabled" + type = number + default = 365 +} + +variable "vault_lock_max_retention_days" { + description = "The maximum retention period required on recovery points when vault lock enabled" + type = number + default = 365 +} + +variable "changeable_for_days" { + description = "How long you want the vault lock to be changeable for, only applies to compliance mode. This value is expressed in days no less than 3 and no greater than 36,500; otherwise, an error will return." + type = number + default = 14 +} + +variable "source_vault_arn" { + type = string + description = "Source account vault arn, if set copies back are restricted to only this vault" + default = "" +} + +variable "name_prefix" { + description = "Optional name prefix for vault resources" + type = string + default = null + validation { + condition = var.name_prefix == null || can(regex("^[^0-9]*$", var.name_prefix)) + error_message = "The name_prefix must not contain any numbers." + } +} + +variable "enable_cross_account_vault_access" { + description = "Flag to enable cross account vault access for AWS Backup" + type = bool + default = false +} diff --git a/infrastructure/modules/aws-backup-source/RESTORE_VALIDATION_EXAMPLE.md b/infrastructure/modules/aws-backup-source/RESTORE_VALIDATION_EXAMPLE.md new file mode 100644 index 0000000..2df4ea8 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/RESTORE_VALIDATION_EXAMPLE.md @@ -0,0 +1,249 @@ +# Example: Enable RDS Restore Testing with Automated Validation + +This example shows how to enable automated validation of restored RDS instances during AWS Backup restore testing. + +## Overview + +When AWS Backup performs automated restore testing (configured via `aws_backup_restore_testing_plan`), this feature automatically validates the restored database to ensure it's accessible and contains expected data. + +## Configuration + +### Basic Setup (No Database Connectivity Testing) + +```hcl +module "source" { + source = "../../modules/aws-backup-source" + + # ... other configuration ... + + backup_plan_config_rds = { + enable = true + selection_tag = "NHSE-Enable-Backup" + validation_window_hours = 1 + compliance_resource_types = ["RDS"] + restore_testing_overrides = { + dbSubnetGroupName = "my-rds-subnet-group" + dbParameterGroupName = "my-db-parameter-group" + } + } + + # Enable restore validation + restore_validation_enable = true +} +``` + +This will validate: + +- RDS instance is in "available" state +- Instance configuration matches expectations +- Instance is accessible via API + +### Advanced Setup (With Database Connectivity Testing) + +For more comprehensive validation including actual database connectivity and structure checks: + +```hcl +# First, create a secret with database credentials +resource "aws_secretsmanager_secret" "db_validation_credentials" { + name = "${var.name_prefix}-restore-validation-credentials" +} + +resource "aws_secretsmanager_secret_version" "db_validation_credentials" { + secret_id = aws_secretsmanager_secret.db_validation_credentials.id + secret_string = jsonencode({ + username = "validation_user" + password = "secure_password" # Use a secure method to generate/store this + database = "postgres" + port = 5432 + }) +} + +# Create security group for Lambda to access RDS +resource "aws_security_group" "restore_validation_lambda" { + name = "${var.name_prefix}-restore-validation-lambda" + description = "Security group for restore validation Lambda" + vpc_id = var.vpc_id + + egress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/8"] # Adjust to your RDS subnet CIDR + } +} + +# Allow Lambda security group to access RDS +resource "aws_security_group_rule" "rds_allow_validation_lambda" { + type = "ingress" + from_port = 5432 + to_port = 5432 + protocol = "tcp" + security_group_id = var.rds_security_group_id # Your RDS security group + source_security_group_id = aws_security_group.restore_validation_lambda.id +} + +# Configure backup module with full validation +module "source" { + source = "../../modules/aws-backup-source" + + # ... other configuration ... + + backup_plan_config_rds = { + enable = true + selection_tag = "NHSE-Enable-Backup" + validation_window_hours = 2 # Give more time for thorough validation + compliance_resource_types = ["RDS"] + restore_testing_overrides = { + dbSubnetGroupName = "my-rds-subnet-group" + dbParameterGroupName = "my-db-parameter-group" + } + } + + # Enable restore validation with connectivity testing + restore_validation_enable = true + restore_validation_db_credentials_secret_name = aws_secretsmanager_secret.db_validation_credentials.name + restore_validation_db_credentials_secret_arn = aws_secretsmanager_secret.db_validation_credentials.arn + restore_validation_expected_subnet_pattern = "rds-private-postgres" + restore_validation_timeout_seconds = 600 + restore_validation_log_retention_days = 14 + + # VPC configuration for Lambda to access RDS in private subnets + restore_validation_vpc_config = { + subnet_ids = var.lambda_subnet_ids # Private subnets with NAT gateway + security_group_ids = [aws_security_group.restore_validation_lambda.id] + } +} +``` + +## Validation Checks Performed + +### 1. Instance Availability Check + +- Verifies the restored RDS instance exists +- Checks that instance status is "available" +- Retrieves endpoint information + +### 2. Instance Configuration Check + +- Validates DB subnet group matches expected pattern +- Verifies engine type and version +- Checks instance is in correct VPC + +### 3. Database Connectivity Check (if credentials provided) + +- Attempts to connect to the database +- Executes a simple query (`SELECT version()`) +- Verifies connection can be established + +### 4. Database Structure Check (if credentials provided) + +- Counts tables in the database +- Checks database size +- Verifies schema structure exists + +## Monitoring Validation Results + +### CloudWatch Logs + +Validation results are logged to CloudWatch: + +```text +/aws/lambda/{name_prefix}-backup-restore-validation +``` + +### Example Log Output + +```json +{ + "restore_job_id": "12345678-1234-1234-1234-123456789012", + "db_instance_id": "restored-db-instance-2024-01-15", + "validation_results": { + "instance_available": { + "passed": true, + "message": "Instance status: available" + }, + "instance_configuration": { + "passed": true, + "message": "Configuration verified" + }, + "database_connectivity": { + "passed": true, + "message": "Connection successful" + }, + "database_structure": { + "passed": true, + "message": "Found 25 tables" + } + }, + "overall_status": "PASSED" +} +``` + +### CloudWatch Alarms + +Create alarms to notify on validation failures: + +```hcl +resource "aws_cloudwatch_log_metric_filter" "restore_validation_failures" { + name = "${var.name_prefix}-restore-validation-failures" + log_group_name = "/aws/lambda/${var.name_prefix}-backup-restore-validation" + pattern = "{ $.overall_status = \"FAILED\" }" + + metric_transformation { + name = "RestoreValidationFailures" + namespace = "BackupRestoreTesting" + value = "1" + } +} + +resource "aws_cloudwatch_metric_alarm" "restore_validation_failures" { + alarm_name = "${var.name_prefix}-restore-validation-failures" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + metric_name = "RestoreValidationFailures" + namespace = "BackupRestoreTesting" + period = 300 + statistic = "Sum" + threshold = 0 + alarm_description = "Alert when restore validation fails" + alarm_actions = [var.sns_topic_arn] +} +``` + +## Troubleshooting + +### Lambda Cannot Connect to RDS + +- Verify Lambda is in correct subnets with NAT gateway access +- Check security group rules allow Lambda → RDS on port 5432 +- Confirm restored instance uses expected subnet group + +### Credentials Not Working + +- Verify secret exists and contains correct format +- Check Lambda has permission to read the secret +- Ensure credentials have read access to the database + +### Validation Times Out + +- Increase `restore_validation_timeout_seconds` +- Check VPC configuration for network issues +- Review Lambda logs for specific errors + +## Cost Considerations + +- **Lambda invocations**: Once per restore test (typically weekly) +- **Lambda duration**: ~30-60 seconds per validation +- **CloudWatch Logs**: Minimal storage (~1MB per month) +- **VPC**: Data transfer charges apply if Lambda is in VPC + +Estimated monthly cost: **< $1 USD** for weekly testing + +## Security Best Practices + +1. **Use dedicated validation credentials** with read-only access +2. **Rotate credentials regularly** via Secrets Manager +3. **Restrict Lambda iam role** to minimum required permissions +4. **Use VPC endpoints** for Secrets Manager to avoid internet traffic +5. **Enable CloudWatch Logs encryption** in production +6. **Set appropriate log retention** (14-30 days recommended) diff --git a/infrastructure/modules/aws-backup-source/backup_framework.tf b/infrastructure/modules/aws-backup-source/backup_framework.tf new file mode 100644 index 0000000..afad231 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_framework.tf @@ -0,0 +1,41 @@ +resource "aws_backup_framework" "rds" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + # must be underscores instead of dashes + name = replace("${var.name_prefix}-rds-framework", "-", "_") + description = "${var.project_name} RDS Backup Framework" + + # Evaluates if resources are protected by a backup plan. + control { + name = "BACKUP_RESOURCES_PROTECTED_BY_BACKUP_PLAN" + + scope { + compliance_resource_types = var.backup_plan_config_rds.compliance_resource_types + tags = { + (var.backup_plan_config_rds.selection_tag) = (var.backup_plan_config_rds.selection_tag_value) + } + } + } + + # Evaluates if resources have at least one recovery point created within the past 1 day. + control { + name = "BACKUP_LAST_RECOVERY_POINT_CREATED" + + input_parameter { + name = "recoveryPointAgeUnit" + value = "days" + } + + input_parameter { + name = "recoveryPointAgeValue" + value = "1" + } + + scope { + compliance_resource_types = var.backup_plan_config_rds.compliance_resource_types + tags = { + (var.backup_plan_config_rds.selection_tag) = (var.backup_plan_config_rds.selection_tag_value) + } + } + } + +} diff --git a/infrastructure/modules/aws-backup-source/backup_notification.tf b/infrastructure/modules/aws-backup-source/backup_notification.tf new file mode 100644 index 0000000..44572ee --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_notification.tf @@ -0,0 +1,12 @@ +resource "aws_backup_vault_notifications" "backup_notification" { + count = var.enable_notifications ? 1 : 0 + backup_vault_name = aws_backup_vault.main.name + sns_topic_arn = var.notifications_sns_topic_arn != "" ? var.notifications_sns_topic_arn : aws_sns_topic.backup[0].arn + backup_vault_events = [ + "BACKUP_JOB_FAILED", + "RESTORE_JOB_FAILED", + "COPY_JOB_FAILED", + "S3_BACKUP_OBJECT_FAILED", + "S3_RESTORE_OBJECT_FAILED" + ] +} diff --git a/infrastructure/modules/aws-backup-source/backup_plan.tf b/infrastructure/modules/aws-backup-source/backup_plan.tf new file mode 100644 index 0000000..4fa6ebf --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_plan.tf @@ -0,0 +1,52 @@ +resource "aws_backup_plan" "rds" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + name = "${var.name_prefix}-rds-plan" + + dynamic "rule" { + for_each = var.backup_plan_config_rds.rules + content { + recovery_point_tags = { + backup_rule_name = rule.value.name + } + rule_name = rule.value.name + target_vault_name = aws_backup_vault.main.name + schedule = rule.value.schedule + completion_window = rule.value.completion_window + lifecycle { + delete_after = rule.value.lifecycle.delete_after != null ? rule.value.lifecycle.delete_after : null + cold_storage_after = rule.value.lifecycle.cold_storage_after != null ? rule.value.lifecycle.cold_storage_after : null + } + dynamic "copy_action" { + for_each = rule.value.copy_action != null ? rule.value.copy_action : {} + content { + lifecycle { + delete_after = copy_action.value + } + destination_vault_arn = aws_backup_vault.intermediary_vault[0].arn + } + } + } + } +} + +resource "aws_backup_selection" "rds" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + iam_role_arn = aws_iam_role.backup.arn + name = "${var.name_prefix}-rds-selection" + plan_id = aws_backup_plan.rds[0].id + + selection_tag { + key = var.backup_plan_config_rds.selection_tag + type = "STRINGEQUALS" + value = (var.backup_plan_config_rds.selection_tag_value == null) ? "True" : var.backup_plan_config_rds.selection_tag_value + } + condition { + dynamic "string_equals" { + for_each = local.selection_tags_rds_null_checked + content { + key = (try(string_equals.value.key, null) == null) ? null : "aws:ResourceTag/${string_equals.value.key}" + value = try(string_equals.value.value, null) + } + } + } +} diff --git a/infrastructure/modules/aws-backup-source/backup_report_plan.tf b/infrastructure/modules/aws-backup-source/backup_report_plan.tf new file mode 100644 index 0000000..2ab2c58 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_report_plan.tf @@ -0,0 +1,72 @@ +# Create the reports +resource "aws_backup_report_plan" "backup_jobs" { + name = var.name_prefix != null ? "${replace(var.name_prefix, "-", "_")}_backup_jobs" : "backup_jobs" # dashes not allowed in backup plan names + description = "Report for showing whether backups ran successfully in the last 24 hours" + + report_delivery_channel { + formats = [ + "JSON" + ] + s3_bucket_name = var.reports_bucket + s3_key_prefix = "backup_jobs" + } + + report_setting { + report_template = "BACKUP_JOB_REPORT" + } +} + +# Create the restore testing completion reports +resource "aws_backup_report_plan" "backup_restore_testing_jobs" { + name = var.name_prefix != null ? "${replace(var.name_prefix, "-", "_")}_backup_restore_testing_jobs" : "backup_restore_testing_jobs" + description = "Report for showing whether backup restore test ran successfully in the last 24 hours" + + report_delivery_channel { + formats = [ + "JSON" + ] + s3_bucket_name = var.reports_bucket + s3_key_prefix = "backup_restore_testing_jobs" + } + + report_setting { + report_template = "RESTORE_JOB_REPORT" + } +} + +resource "aws_backup_report_plan" "resource_compliance" { + name = var.name_prefix != null ? "${replace(var.name_prefix, "-", "_")}_resource_compliance" : "resource_compliance" + description = "Report for showing whether resources are compliant with the framework" + + report_delivery_channel { + formats = [ + "JSON" + ] + s3_bucket_name = var.reports_bucket + s3_key_prefix = "resource_compliance" + } + + report_setting { + framework_arns = local.framework_arn_list + number_of_frameworks = length(local.framework_arn_list) + report_template = "RESOURCE_COMPLIANCE_REPORT" + } +} + +resource "aws_backup_report_plan" "copy_jobs" { + count = var.backup_copy_vault_arn != "" && var.backup_copy_vault_account_id != "" ? 1 : 0 + name = var.name_prefix != null ? "${replace(var.name_prefix, "-", "_")}_copy_jobs" : "copy_jobs" + description = "Report for showing whether copies ran successfully in the last 24 hours" + + report_delivery_channel { + formats = [ + "JSON" + ] + s3_bucket_name = var.reports_bucket + s3_key_prefix = "copy_jobs" + } + + report_setting { + report_template = "COPY_JOB_REPORT" + } +} diff --git a/infrastructure/modules/aws-backup-source/backup_restore_testing.tf b/infrastructure/modules/aws-backup-source/backup_restore_testing.tf new file mode 100644 index 0000000..e01f10d --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_restore_testing.tf @@ -0,0 +1,27 @@ +resource "aws_backup_restore_testing_plan" "backup_restore_testing_plan" { + name = var.name_prefix != null ? "${replace(var.name_prefix, "-", "_")}_backup_restore_testing_plan" : "backup_restore_testing_plan" + schedule_expression = var.restore_testing_plan_scheduled_expression + start_window_hours = var.restore_testing_plan_start_window + recovery_point_selection { + algorithm = var.restore_testing_plan_algorithm + include_vaults = [aws_backup_vault.main.arn] + recovery_point_types = var.restore_testing_plan_recovery_point_types + selection_window_days = var.restore_testing_plan_selection_window_days + } +} + +resource "aws_backup_restore_testing_selection" "backup_restore_testing_selection_rds" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + name = "backup_restore_testing_selection_rds" + restore_testing_plan_name = aws_backup_restore_testing_plan.backup_restore_testing_plan.name + iam_role_arn = aws_iam_role.backup.arn + validation_window_hours = var.backup_plan_config_rds.validation_window_hours # number of hours to leave the restored RDS instance available for custom validation checks + protected_resource_type = "RDS" + protected_resource_conditions { + string_equals { + key = "aws:ResourceTag/${var.backup_plan_config_rds.selection_tag}" + value = "True" + } + } + restore_metadata_overrides = local.rds_overrides +} diff --git a/infrastructure/modules/aws-backup-source/backup_vault.tf b/infrastructure/modules/aws-backup-source/backup_vault.tf new file mode 100644 index 0000000..47d2f65 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_vault.tf @@ -0,0 +1,10 @@ +resource "aws_backup_vault" "main" { + name = "${local.resource_name_prefix}-vault" + kms_key_arn = aws_kms_key.aws_backup_key.arn +} + +resource "aws_backup_vault" "intermediary_vault" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + name = "${var.name_prefix}-intermediary-vault" + kms_key_arn = aws_kms_key.aws_backup_key.arn +} diff --git a/infrastructure/modules/aws-backup-source/backup_vault_policy.tf b/infrastructure/modules/aws-backup-source/backup_vault_policy.tf new file mode 100644 index 0000000..392394d --- /dev/null +++ b/infrastructure/modules/aws-backup-source/backup_vault_policy.tf @@ -0,0 +1,47 @@ +resource "aws_backup_vault_policy" "vault_policy" { + backup_vault_name = aws_backup_vault.main.name + policy = data.aws_iam_policy_document.vault_policy.json +} + +data "aws_iam_policy_document" "vault_policy" { + + + statement { + sid = "DenyApartFromTerraform" + effect = "Deny" + + principals { + type = "AWS" + identifiers = ["*"] + } + + condition { + test = "ArnNotEquals" + values = local.terraform_role_arns + variable = "aws:PrincipalArn" + } + + actions = [ + "backup:DeleteRecoveryPoint", + "backup:PutBackupVaultAccessPolicy", + "backup:UpdateRecoveryPointLifecycle" + ] + + resources = ["*"] + } + dynamic "statement" { + for_each = var.backup_copy_vault_arn != "" && var.backup_copy_vault_account_id != "" ? [1] : [] + content { + sid = "Allow account to copy into backup vault" + effect = "Allow" + + actions = ["backup:CopyIntoBackupVault"] + resources = ["*"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${var.backup_copy_vault_account_id}:root"] + } + } + } +} diff --git a/infrastructure/modules/aws-backup-source/data.tf b/infrastructure/modules/aws-backup-source/data.tf new file mode 100644 index 0000000..8dc71f2 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/data.tf @@ -0,0 +1,37 @@ +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +# tflint-ignore: terraform_unused_declarations +data "aws_iam_roles" "roles" { + name_regex = "AWSReservedSSO_Admin_.*" + path_prefix = "/aws-reserved/sso.amazonaws.com/" +} + +locals { + local_account_id = data.aws_caller_identity.current.account_id +} + +###################### +# Terraform Remote State +###################### + +data "terraform_remote_state" "vpc" { + backend = "s3" + + config = { + bucket = "bss-${var.environment_name}-${var.nation}-terraform-state" + key = "terraform-state/vpc.tfstate" + region = "eu-west-2" + } +} + +data "terraform_remote_state" "rds_instance" { + backend = "s3" + + config = { + bucket = "bss-${var.environment_name}-${var.nation}-terraform-state" + key = "terraform-state/rds-instance.tfstate" + region = "eu-west-2" + } +} diff --git a/infrastructure/modules/aws-backup-source/eventbridge.tf b/infrastructure/modules/aws-backup-source/eventbridge.tf new file mode 100644 index 0000000..fb19be3 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/eventbridge.tf @@ -0,0 +1,36 @@ +module "eventbridge" { + source = "terraform-aws-modules/eventbridge/aws" + version = "4.3.0" + + create_bus = false + create_role = false + + rules = { + "${var.name_prefix}-backup-start-cross-account-copy-job" = { + description = "Identify when a new recovery point is created in the intermediary vault" + event_pattern = jsonencode( + { + "source" : ["aws.backup"], + "account" : ["${data.aws_caller_identity.current.account_id}"], + "region" : ["eu-west-2"], + "detail" : { + "eventName" : ["RecoveryPointCreated"], + "serviceEventDetails" : { + "backupVaultName" : [{ "wildcard" : "*-intermediary-vault" }] + } + } + } + ) + enabled = true + } + } + + targets = { + "${var.name_prefix}-backup-start-cross-account-copy-job" = [ + { + name = "start-cross-account-copy-job" + arn = "arn:aws:lambda:eu-west-2:${data.aws_caller_identity.current.account_id}:function:${var.name_prefix}-backup-start-cross-account-copy-job" + } + ] + } +} diff --git a/infrastructure/modules/aws-backup-source/iam.tf b/infrastructure/modules/aws-backup-source/iam.tf new file mode 100644 index 0000000..3f8310d --- /dev/null +++ b/infrastructure/modules/aws-backup-source/iam.tf @@ -0,0 +1,38 @@ +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +resource "aws_iam_role" "backup" { + name = "${var.project_name}-backup" + assume_role_policy = data.aws_iam_policy_document.assume_role.json + permissions_boundary = length(var.iam_role_permissions_boundary) > 0 ? var.iam_role_permissions_boundary : null +} + +resource "aws_iam_role_policy_attachment" "backup" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup" + role = aws_iam_role.backup.name +} + +resource "aws_iam_role_policy_attachment" "restore" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForRestores" + role = aws_iam_role.backup.name +} + +resource "aws_iam_role_policy_attachment" "s3_restore" { + policy_arn = "arn:aws:iam::aws:policy/AWSBackupServiceRolePolicyForS3Restore" + role = aws_iam_role.backup.name +} + +resource "aws_iam_role_policy_attachment" "s3_backup" { + policy_arn = "arn:aws:iam::aws:policy/AWSBackupServiceRolePolicyForS3Backup" + role = aws_iam_role.backup.name +} diff --git a/infrastructure/modules/aws-backup-source/kms.tf b/infrastructure/modules/aws-backup-source/kms.tf new file mode 100644 index 0000000..d6bc250 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/kms.tf @@ -0,0 +1,56 @@ +resource "aws_kms_key" "aws_backup_key" { + description = "AWS Backup KMS Key" + deletion_window_in_days = 30 + enable_key_rotation = true + #policy = data.aws_iam_policy_document.backup_key_policy.json +} + +resource "aws_kms_alias" "backup_key" { + name = var.name_prefix != null ? "alias/${var.name_prefix}/backup-key" : "alias/${var.environment_name}/backup-key" + target_key_id = aws_kms_key.aws_backup_key.key_id +} + +resource "aws_kms_key_policy" "backup_key_policy" { + key_id = aws_kms_key.aws_backup_key.id + policy = data.aws_iam_policy_document.backup_key_policy.json +} + +data "aws_iam_policy_document" "backup_key_policy" { + #checkov:skip=CKV_AWS_109:See (CERSS-25168) for more info + #checkov:skip=CKV_AWS_111:See (CERSS-25169) for more info + statement { + sid = "AllowBackupUseOfKey" + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + actions = ["kms:GenerateDataKey", "kms:Decrypt", "kms:Encrypt"] + resources = ["*"] + } + statement { + sid = "EnableIAMUserPermissions" + principals { + type = "AWS" + identifiers = concat(["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"], local.terraform_role_arns) + } + actions = ["kms:*"] + resources = ["*"] + } + statement { + sid = "Allow attachment of persistent resources" + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${var.backup_copy_vault_account_id}:root"] + } + actions = [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:CreateGrant", + "kms:ListGrants", + "kms:DescribeKey" + ] + resources = ["*"] + } +} diff --git a/infrastructure/modules/aws-backup-source/lambda_copy_job.tf b/infrastructure/modules/aws-backup-source/lambda_copy_job.tf new file mode 100644 index 0000000..1ec5c1b --- /dev/null +++ b/infrastructure/modules/aws-backup-source/lambda_copy_job.tf @@ -0,0 +1,86 @@ +data "aws_iam_policy_document" "lambda_assume_role" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + actions = ["sts:AssumeRole"] + } +} + +resource "aws_iam_role" "iam_for_lambda_copy_job" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + name = "${var.name_prefix}-backup-cross-account-copy-job-lambda" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json +} + +data "aws_iam_policy_document" "lambda_copy_job_permissions" { + version = "2012-10-17" + statement { + effect = "Allow" + actions = ["kms:Decrypt", "kms:GenerateDataKey"] + resources = [aws_kms_key.aws_backup_key.arn] + } + statement { + effect = "Allow" + actions = [ + "backup:StartCopyJob", + "backup:DescribeRecoveryPoint", + "backup:ListRecoveryPointsByBackupVault" + ] + resources = ["*"] + } + statement { + effect = "Allow" + actions = ["iam:PassRole"] + resources = [aws_iam_role.backup.arn] + } +} + +resource "aws_iam_role_policy_attachment" "lambda_role_policy_attachment" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + role = aws_iam_role.iam_for_lambda_copy_job[0].name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} + +resource "aws_iam_role_policy" "cross_account_iam_permissions" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + name = "${var.name_prefix}-cross-account-copy-job-lambda" + role = aws_iam_role.iam_for_lambda_copy_job[0].id + policy = data.aws_iam_policy_document.lambda_copy_job_permissions.json +} + +data "archive_file" "start_cross_account_copy_job_lambda_zip" { + type = "zip" + source_dir = "${path.module}/../../lambdas/cross-account-copy-job/resources" + output_path = "${path.module}/.terraform/archive_files/start_cross_account_copy_job_lambda.zip" +} + +resource "aws_lambda_function" "start_cross_account_copy_job_lambda" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + filename = data.archive_file.start_cross_account_copy_job_lambda_zip.output_path + source_code_hash = data.archive_file.start_cross_account_copy_job_lambda_zip.output_base64sha256 + function_name = "${var.name_prefix}-backup-start-cross-account-copy-job" + role = aws_iam_role.iam_for_lambda_copy_job[0].arn + handler = "start_cross_account_copy_job.lambda_handler" + runtime = "python3.12" + environment { + variables = { + aws_account_id = data.aws_caller_identity.current.account_id, + backup_account_id = var.backup_copy_vault_account_id, + backup_copy_vault_arn = var.backup_copy_vault_arn, + backup_role_arn = aws_iam_role.backup.arn, + destination_vault_retention_period = var.destination_vault_retention_period + } + } +} + +resource "aws_lambda_permission" "allow_eventbridge" { + count = var.backup_plan_config_rds.enable ? 1 : 0 + statement_id = "AllowExecutionFromEventbridge" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.start_cross_account_copy_job_lambda[0].function_name + principal = "events.amazonaws.com" + source_arn = "arn:aws:events:eu-west-2:${data.aws_caller_identity.current.account_id}:rule/${var.name_prefix}-backup-start-cross-account-copy-job-rule" +} diff --git a/infrastructure/modules/aws-backup-source/lambda_copy_recovery_point.tf b/infrastructure/modules/aws-backup-source/lambda_copy_recovery_point.tf new file mode 100644 index 0000000..347204e --- /dev/null +++ b/infrastructure/modules/aws-backup-source/lambda_copy_recovery_point.tf @@ -0,0 +1,81 @@ +data "archive_file" "lambda_copy_recovery_point_zip" { + count = var.lambda_copy_recovery_point_enable ? 1 : 0 + type = "zip" + source_dir = "${path.module}/../../lambdas/copy_recovery_point/resources" + output_path = "${path.module}/.terraform/archive_files/lambda_copy_recovery_point.zip" +} + +resource "aws_iam_role" "iam_for_lambda_copy_recovery_point" { + count = var.lambda_copy_recovery_point_enable ? 1 : 0 + name = "${local.resource_name_prefix}-backup-copy-recovery-point-lambda" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { Service = "lambda.amazonaws.com" } + }] + }) +} + +resource "aws_iam_policy" "iam_policy_for_lambda_copy_recovery_point" { + count = var.lambda_copy_recovery_point_enable ? 1 : 0 + name = "${local.resource_name_prefix}-backup-copy-recovery-point-lambda" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:aws:logs:*:*:*" + Effect = "Allow" + }, + { + Action = [ + "backup:StartCopyJob", + "backup:DescribeCopyJob", + "backup:ListRecoveryPointsByBackupVault" + ] + Resource = "*" + Effect = "Allow" + }, + { + Action = ["sts:AssumeRole"] + Resource = var.lambda_copy_recovery_point_assume_role_arn == "" ? null : var.lambda_copy_recovery_point_assume_role_arn + Effect = "Allow" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_copy_recovery_point_policy_attach" { + count = var.lambda_copy_recovery_point_enable ? 1 : 0 + role = aws_iam_role.iam_for_lambda_copy_recovery_point[0].name + policy_arn = aws_iam_policy.iam_policy_for_lambda_copy_recovery_point[0].arn +} + +resource "aws_lambda_function" "lambda_copy_recovery_point" { + count = var.lambda_copy_recovery_point_enable ? 1 : 0 + function_name = "${local.resource_name_prefix}-backup-copy-recovery-point" + role = aws_iam_role.iam_for_lambda_copy_recovery_point[0].arn + handler = "lambda_function.lambda_handler" + runtime = "python3.12" + filename = data.archive_file.lambda_copy_recovery_point_zip[0].output_path + source_code_hash = data.archive_file.lambda_copy_recovery_point_zip[0].output_base64sha256 + timeout = var.lambda_copy_recovery_point_max_wait_minutes * 60 + + environment { + variables = { + POLL_INTERVAL_SECONDS = var.lambda_copy_recovery_point_poll_interval_seconds + MAX_WAIT_MINUTES = var.lambda_copy_recovery_point_max_wait_minutes + DESTINATION_VAULT_ARN = var.lambda_copy_recovery_point_destination_vault_arn != "" ? var.lambda_copy_recovery_point_destination_vault_arn : var.backup_copy_vault_arn + SOURCE_VAULT_ARN = var.lambda_copy_recovery_point_source_vault_arn != "" ? var.lambda_copy_recovery_point_source_vault_arn : aws_backup_vault.main.arn + ASSUME_ROLE_ARN = var.lambda_copy_recovery_point_assume_role_arn + } + } +} diff --git a/infrastructure/modules/aws-backup-source/lambda_restore_validation.tf b/infrastructure/modules/aws-backup-source/lambda_restore_validation.tf new file mode 100644 index 0000000..6f9de0c --- /dev/null +++ b/infrastructure/modules/aws-backup-source/lambda_restore_validation.tf @@ -0,0 +1,219 @@ +# Lambda function to validate restored RDS instances during backup restore testing +# This Lambda is triggered by EventBridge when a restore testing job completes + +data "archive_file" "lambda_restore_validation_zip" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + type = "zip" + source_dir = "${path.module}/../../lambdas/restore_validation/resources" + output_path = "${path.module}/.terraform/archive_files/lambda_restore_validation.zip" +} + +resource "aws_iam_role" "restore_validation_lambda" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + name = "${local.resource_name_prefix}-backup-restore-validation-lambda" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { Service = "lambda.amazonaws.com" } + }] + }) + + permissions_boundary = length(var.iam_role_permissions_boundary) > 0 ? var.iam_role_permissions_boundary : null +} + +resource "aws_iam_policy" "restore_validation_lambda" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + name = "${local.resource_name_prefix}-backup-restore-validation-lambda" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "CloudWatchLogs" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:aws:logs:*:*:*" + Effect = "Allow" + }, + { + Sid = "BackupDescribe" + Action = [ + "backup:DescribeRestoreJob", + "backup:PutRestoreValidationResult" + ] + Resource = "*" + Effect = "Allow" + }, + { + Sid = "RDSDescribe" + Action = [ + "rds:DescribeDBInstances", + "rds:DescribeDBClusters", + "rds:ListTagsForResource" + ] + Resource = "*" + Effect = "Allow" + }, + { + Sid = "SecretsManagerRead" + Action = ["secretsmanager:GetSecretValue"] + Resource = [ + "arn:aws:secretsmanager:${data.aws_region.current.region}:${local.local_account_id}:secret:bss-${var.environment_name}-${var.nation}-bss_user-*" + ] + Effect = "Allow" + }, + { + Sid = "EC2NetworkInfo" + Action = [ + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ] + Resource = "*" + Effect = "Allow" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "restore_validation_lambda_policy" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + role = aws_iam_role.restore_validation_lambda[0].name + policy_arn = aws_iam_policy.restore_validation_lambda[0].arn +} + +# VPC configuration for Lambda to access RDS in private subnets +resource "aws_iam_role_policy_attachment" "restore_validation_lambda_vpc" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + role = aws_iam_role.restore_validation_lambda[0].name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" +} + + +module "lambda_layer" { + source = "../../modules/lambda-layer" + name_prefix = var.name_prefix + layer_name = "psycopg" + compatible_runtimes = [var.python_version] + description = "Lambda layer for calling postgres" +} + +resource "aws_lambda_function" "restore_validation" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + function_name = "${local.resource_name_prefix}-backup-restore-validation" + role = aws_iam_role.restore_validation_lambda[0].arn + handler = "lambda_function.lambda_handler" + runtime = "python3.12" + filename = data.archive_file.lambda_restore_validation_zip[0].output_path + source_code_hash = data.archive_file.lambda_restore_validation_zip[0].output_base64sha256 + timeout = var.restore_validation_timeout_seconds + layers = [module.lambda_layer.layer_arn] + environment { + variables = { + DB_CREDENTIALS_SECRET = var.restore_validation_db_credentials_secret_name + EXPECTED_SUBNET_PATTERN = var.restore_validation_expected_subnet_pattern + RESTORE_DB_NAME = var.restore_testing_db_name + } + } + + vpc_config { + subnet_ids = data.terraform_remote_state.vpc.outputs.vpc_private_subnet_ids + security_group_ids = [aws_security_group.lambda.id] + } + + depends_on = [ + aws_iam_role_policy_attachment.restore_validation_lambda_policy + ] +} + +###################### +# Security Group +###################### + +resource "aws_security_group" "lambda" { + name = "${var.name_prefix}-dbbackup-restore-test-lambda" + description = "Security group for dbbackup-restore-test Lambda" + vpc_id = data.terraform_remote_state.vpc.outputs.vpc_id +} + +resource "aws_vpc_security_group_egress_rule" "lambda_egress_https" { + security_group_id = aws_security_group.lambda.id + cidr_ipv4 = "0.0.0.0/0" + from_port = 443 + ip_protocol = "tcp" + to_port = 443 + description = "Allow outbound HTTPS traffic for AWS service calls" + + tags = { + Name = "Allow Outbound 443" + } +} + +resource "aws_vpc_security_group_egress_rule" "lambda_egress_for_rds" { + security_group_id = aws_security_group.lambda.id + referenced_security_group_id = data.terraform_remote_state.rds_instance.outputs.rds_sg_id + from_port = 5432 + ip_protocol = "tcp" + to_port = 5432 + description = "A rule to allow outbound connections from the lambda Restore Validation SG to the RDS" +} + +resource "aws_vpc_security_group_ingress_rule" "lambda_ingress_for_rds" { + security_group_id = data.terraform_remote_state.rds_instance.outputs.rds_sg_id + referenced_security_group_id = aws_security_group.lambda.id + + from_port = 5432 + ip_protocol = "tcp" + to_port = 5432 + description = "A rule to allow inbound connections to RDS from the lambda Restore Validation SG" + +} + +# EventBridge rule to trigger validation when restore testing completes +resource "aws_cloudwatch_event_rule" "restore_testing_complete" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + name = "${local.resource_name_prefix}-backup-restore-testing-complete" + description = "Trigger validation when AWS Backup restore testing completes" + + event_pattern = jsonencode({ + source = ["aws.backup"] + detail-type = ["Restore Job State Change"] + detail = { + status = ["COMPLETED"] + } + }) +} + +resource "aws_cloudwatch_event_target" "restore_validation" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + rule = aws_cloudwatch_event_rule.restore_testing_complete[0].name + target_id = "RestoreValidationLambda" + arn = aws_lambda_function.restore_validation[0].arn +} + +resource "aws_lambda_permission" "eventbridge_invoke_validation" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + statement_id = "AllowExecutionFromEventBridge" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.restore_validation[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.restore_testing_complete[0].arn +} + +# CloudWatch Log Group for Lambda logs +resource "aws_cloudwatch_log_group" "restore_validation" { + count = var.backup_plan_config_rds.enable && var.restore_validation_enable ? 1 : 0 + name = "/aws/lambda/${local.resource_name_prefix}-backup-restore-validation" + retention_in_days = var.restore_validation_log_retention_days + + tags = { + Name = "${local.resource_name_prefix}-backup-restore-validation-logs" + Environment = var.environment_name + } +} diff --git a/infrastructure/modules/aws-backup-source/locals.tf b/infrastructure/modules/aws-backup-source/locals.tf new file mode 100644 index 0000000..3e2d635 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/locals.tf @@ -0,0 +1,11 @@ +locals { + resource_name_prefix = var.name_prefix != null ? var.name_prefix : "${data.aws_region.current.region}-${data.aws_caller_identity.current.account_id}-backup" + selection_tag_value_rds_null_checked = (var.backup_plan_config_rds.selection_tag_value == null) ? "True" : var.backup_plan_config_rds.selection_tag_value + selection_tags_rds_null_checked = (var.backup_plan_config_rds.selection_tags == null) ? [{ "key" : var.backup_plan_config_rds.selection_tag, "value" : local.selection_tag_value_rds_null_checked }] : var.backup_plan_config_rds.selection_tags + framework_arn_list = flatten(concat( + var.backup_plan_config_rds.enable ? [aws_backup_framework.rds[0].arn] : [] + )) + #aurora_overrides = var.backup_plan_config_aurora.restore_testing_overrides == null ? null : var.backup_plan_config_aurora.restore_testing_overrides + rds_overrides = var.backup_plan_config_rds.restore_testing_overrides == null ? null : var.backup_plan_config_rds.restore_testing_overrides + terraform_role_arns = length(var.terraform_role_arns) > 0 ? var.terraform_role_arns : [var.terraform_role_arn] +} diff --git a/infrastructure/modules/aws-backup-source/outputs.tf b/infrastructure/modules/aws-backup-source/outputs.tf new file mode 100644 index 0000000..8856d0b --- /dev/null +++ b/infrastructure/modules/aws-backup-source/outputs.tf @@ -0,0 +1,30 @@ +output "backup_role_arn" { + value = aws_iam_role.backup.arn + description = "ARN of the of the backup role" +} + +output "backup_vault_arn" { + value = aws_backup_vault.main.arn + description = "ARN of the of the vault" +} + +output "backup_vault_name" { + value = aws_backup_vault.main.name + description = "Name of the of the vault" +} + +output "restore_validation_lambda_arn" { + value = var.backup_plan_config_rds.enable && var.restore_validation_enable ? aws_lambda_function.restore_validation[0].arn : null + description = "ARN of the restore validation Lambda function" +} + +output "restore_validation_lambda_name" { + value = var.backup_plan_config_rds.enable && var.restore_validation_enable ? aws_lambda_function.restore_validation[0].function_name : null + description = "Name of the restore validation Lambda function" +} + +output "restore_validation_eventbridge_rule_name" { + value = var.backup_plan_config_rds.enable && var.restore_validation_enable ? aws_cloudwatch_event_rule.restore_testing_complete[0].name : null + description = "Name of the EventBridge rule that triggers restore validation" +} + diff --git a/infrastructure/modules/aws-backup-source/readme.md b/infrastructure/modules/aws-backup-source/readme.md new file mode 100644 index 0000000..1c09931 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/readme.md @@ -0,0 +1,161 @@ +# AWS Backup Module + +The AWS Backup Module helps automates the setup of AWS Backup resources in a source account. It streamlines the process of creating, managing, and standardising backup configurations. + +## Example + +```terraform +module "test_aws_backup" { + source = "./modules/aws-backup" + + environment_name = "environment_name" + bootstrap_kms_key_arn = kms_key[0].arn + project_name = "testproject" + reports_bucket = "compliance-reports" + terraform_role_arn = data.aws_iam_role.terraform_role.arn +} +``` + + + +## Requirements + +| Name | Version | +| ------------------------------------------------------------------------ | -------- | +| [terraform](#requirement_terraform) | >= 1.9.5 | +| [archive](#requirement_archive) | ~> 2 | +| [aws](#requirement_aws) | > 6 | + +## Providers + +| Name | Version | +| ------------------------------------------------------------------ | ------- | +| [archive](#provider_archive) | ~> 2 | +| [aws](#provider_aws) | > 6 | +| [terraform](#provider_terraform) | n/a | + +## Modules + +| Name | Source | Version | +| ----------------------------------------------------------------------- | ------------------------------------- | ------- | +| [eventbridge](#module_eventbridge) | terraform-aws-modules/eventbridge/aws | 4.3.0 | +| [lambda_layer](#module_lambda_layer) | ../../modules/lambda-layer | n/a | + +## Resources + +| Name | Type | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [aws_backup_framework.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_framework) | resource | +| [aws_backup_plan.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_plan) | resource | +| [aws_backup_report_plan.backup_jobs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_report_plan) | resource | +| [aws_backup_report_plan.backup_restore_testing_jobs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_report_plan) | resource | +| [aws_backup_report_plan.copy_jobs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_report_plan) | resource | +| [aws_backup_report_plan.resource_compliance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_report_plan) | resource | +| [aws_backup_restore_testing_plan.backup_restore_testing_plan](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_restore_testing_plan) | resource | +| [aws_backup_restore_testing_selection.backup_restore_testing_selection_rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_restore_testing_selection) | resource | +| [aws_backup_selection.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_selection) | resource | +| [aws_backup_vault.intermediary_vault](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault) | resource | +| [aws_backup_vault.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault) | resource | +| [aws_backup_vault_notifications.backup_notification](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault_notifications) | resource | +| [aws_backup_vault_policy.vault_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/backup_vault_policy) | resource | +| [aws_cloudwatch_event_rule.restore_testing_complete](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.restore_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_cloudwatch_log_group.restore_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_iam_policy.iam_policy_for_lambda_copy_recovery_point](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.restore_validation_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.backup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.iam_for_lambda_copy_job](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.iam_for_lambda_copy_recovery_point](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.restore_validation_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.cross_account_iam_permissions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [aws_iam_role_policy_attachment.backup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.lambda_copy_recovery_point_policy_attach](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.lambda_role_policy_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.restore](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.restore_validation_lambda_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.restore_validation_lambda_vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.s3_backup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.s3_restore](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_kms_alias.backup_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_alias) | resource | +| [aws_kms_key.aws_backup_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_kms_key_policy.backup_key_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key_policy) | resource | +| [aws_lambda_function.lambda_copy_recovery_point](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_function.restore_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_function.start_cross_account_copy_job_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_permission.allow_eventbridge](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_lambda_permission.eventbridge_invoke_validation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_security_group.lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_sns_topic.backup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic) | resource | +| [aws_sns_topic_subscription.aws_backup_notifications_email_target](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_subscription) | resource | +| [aws_vpc_security_group_egress_rule.lambda_egress_for_rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource | +| [aws_vpc_security_group_egress_rule.lambda_egress_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource | +| [aws_vpc_security_group_ingress_rule.lambda_ingress_for_rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource | +| [archive_file.lambda_copy_recovery_point_zip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [archive_file.lambda_restore_validation_zip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [archive_file.start_cross_account_copy_job_lambda_zip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_iam_policy_document.allow_backup_to_sns](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.backup_key_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambda_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambda_copy_job_permissions](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.vault_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_roles.roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_roles) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | +| [terraform_remote_state.rds_instance](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | +| [terraform_remote_state.vpc](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :------: | +| [api_endpoint](#input_api_endpoint) | API endpoint to send post build version notifications to | `string` | `""` | no | +| [api_token](#input_api_token) | API token to authenticate with the API endpoint | `string` | `""` | no | +| [backup_copy_vault_account_id](#input_backup_copy_vault_account_id) | The account id of the destination backup vault for allowing restores back into the source account. | `string` | `""` | no | +| [backup_copy_vault_arn](#input_backup_copy_vault_arn) | The arn of the destination backup vault for cross-account backup copies. | `string` | `""` | no | +| [backup_plan_config_rds](#input_backup_plan_config_rds) | Configuration for backup plans with RDS |
object({
enable = bool
selection_tag = string
selection_tag_value = optional(string)
selection_tags = optional(list(object({
key = optional(string)
value = optional(string)
})))
compliance_resource_types = list(string)
restore_testing_overrides = optional(map(string))
validation_window_hours = optional(number)
rules = optional(list(object({
name = string
schedule = string
completion_window = optional(number)
enable_continuous_backup = optional(bool)
lifecycle = object({
delete_after = number
cold_storage_after = optional(number)
})
copy_action = optional(object({
delete_after = optional(number)
}))
})))
})
|
{
"compliance_resource_types": [
"RDS"
],
"enable": true,
"rules": [
{
"completion_window": 24,
"copy_action": {
"delete_after": 365
},
"lifecycle": {
"delete_after": 35
},
"name": "rds_daily_kept_5_weeks",
"schedule": "cron(0 0 * * ? *)"
},
{
"completion_window": 48,
"copy_action": {
"delete_after": 365
},
"lifecycle": {
"delete_after": 90
},
"name": "rds_weekly_kept_3_months",
"schedule": "cron(0 1 ? * SUN *)"
},
{
"completion_window": 72,
"copy_action": {
"delete_after": 365
},
"lifecycle": {
"cold_storage_after": 30,
"delete_after": 2555
},
"name": "rds_monthly_kept_7_years",
"schedule": "cron(0 2 1 * ? *)"
}
],
"selection_tag": "BackupRDS",
"selection_tag_value": "True",
"selection_tags": [],
"validation_window_hours": 1
}
| no | +| [bootstrap_kms_key_arn](#input_bootstrap_kms_key_arn) | The arn of the bootstrap KMS key used for encryption at rest of the SNS topic. | `string` | n/a | yes | +| [deletion_allowed_principal_arns](#input_deletion_allowed_principal_arns) | List of ARNs of principals allowed to delete backups. | `list(string)` | `null` | no | +| [destination_vault_retention_period](#input_destination_vault_retention_period) | Retention period for recovery points made with the copy job lambda | `number` | `365` | no | +| [enable_notifications](#input_enable_notifications) | Flag to enable backup notifications. | `bool` | `false` | no | +| [environment_name](#input_environment_name) | The name of the environment where AWS Backup is configured. | `string` | n/a | yes | +| [iam_role_permissions_boundary](#input_iam_role_permissions_boundary) | Optional permissions boundary arn for backup role | `string` | `""` | no | +| [lambda_copy_recovery_point_assume_role_arn](#input_lambda_copy_recovery_point_assume_role_arn) | arn of role in destination account the lambda assumes to initiate the copy job (if required for cross-account). | `string` | `""` | no | +| [lambda_copy_recovery_point_destination_vault_arn](#input_lambda_copy_recovery_point_destination_vault_arn) | Destination vault arn containing the recovery point to be copied back (the air-gapped vault). | `string` | `""` | no | +| [lambda_copy_recovery_point_enable](#input_lambda_copy_recovery_point_enable) | Flag to enable the copy recovery point lambda (copy recovery point from destination vault back to source). | `bool` | `false` | no | +| [lambda_copy_recovery_point_max_wait_minutes](#input_lambda_copy_recovery_point_max_wait_minutes) | Maximum number of minutes to wait for a copy job to reach a terminal state before returning running status. | `number` | `10` | no | +| [lambda_copy_recovery_point_poll_interval_seconds](#input_lambda_copy_recovery_point_poll_interval_seconds) | Polling interval in seconds for copy job status checks. | `number` | `30` | no | +| [lambda_copy_recovery_point_source_vault_arn](#input_lambda_copy_recovery_point_source_vault_arn) | Source vault arn to which the recovery point will be copied back. | `string` | `""` | no | +| [name_prefix](#input_name_prefix) | Name prefix for vault resources | `string` | `null` | no | +| [nation](#input_nation) | The nation this environment is for (e.g. en, ni) | `string` | n/a | yes | +| [notifications_sns_topic_arn](#input_notifications_sns_topic_arn) | The arn of the SNS topic to use for backup notifications. | `string` | `""` | no | +| [notifications_target_email_address](#input_notifications_target_email_address) | The email address to which backup notifications will be sent via SNS. | `string` | `""` | no | +| [project_name](#input_project_name) | The name of the project this relates to. | `string` | n/a | yes | +| [python_version](#input_python_version) | The Python version to use for the Lambda function | `string` | `"3.12"` | no | +| [reports_bucket](#input_reports_bucket) | Bucket to drop backup reports into | `string` | n/a | yes | +| [restore_testing_db_name](#input_restore_testing_db_name) | Name of the database to use for restore validation | `string` | n/a | yes | +| [restore_testing_plan_algorithm](#input_restore_testing_plan_algorithm) | Algorithm of the Recovery Selection Point | `string` | `"LATEST_WITHIN_WINDOW"` | no | +| [restore_testing_plan_recovery_point_types](#input_restore_testing_plan_recovery_point_types) | Recovery Point Types | `list(string)` |
[
"SNAPSHOT"
]
| no | +| [restore_testing_plan_scheduled_expression](#input_restore_testing_plan_scheduled_expression) | Scheduled Expression of Recovery Selection Point | `string` | `"cron(0 1 ? * SUN *)"` | no | +| [restore_testing_plan_selection_window_days](#input_restore_testing_plan_selection_window_days) | Selection window days | `number` | `7` | no | +| [restore_testing_plan_start_window](#input_restore_testing_plan_start_window) | Start window from the scheduled time during which the test should start | `number` | `1` | no | +| [restore_validation_db_credentials_secret_name](#input_restore_validation_db_credentials_secret_name) | Name of the Secrets Manager secret containing database credentials for connectivity testing | `string` | n/a | yes | +| [restore_validation_enable](#input_restore_validation_enable) | Enable automated validation of restored RDS instances during backup restore testing | `bool` | `false` | no | +| [restore_validation_expected_subnet_pattern](#input_restore_validation_expected_subnet_pattern) | Expected pattern in the DB subnet group name for configuration validation | `string` | n/a | yes | +| [restore_validation_log_retention_days](#input_restore_validation_log_retention_days) | Number of days to retain restore validation Lambda logs | `number` | `30` | no | +| [restore_validation_timeout_seconds](#input_restore_validation_timeout_seconds) | Timeout for the restore validation Lambda function in seconds | `number` | `300` | no | +| [terraform_role_arn](#input_terraform_role_arn) | arn of Terraform role used to deploy to account (deprecated, please swap to terraform_role_arns) | `string` | `""` | no | +| [terraform_role_arns](#input_terraform_role_arns) | arn of Terraform roles used to deploy to account, defaults to caller arn if list is empty | `list(string)` | `[]` | no | + +## Outputs + +| Name | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | +| [backup_role_arn](#output_backup_role_arn) | arn of the of the backup role | +| [backup_vault_arn](#output_backup_vault_arn) | arn of the of the vault | +| [backup_vault_name](#output_backup_vault_name) | Name of the of the vault | +| [restore_validation_eventbridge_rule_name](#output_restore_validation_eventbridge_rule_name) | Name of the EventBridge rule that triggers restore validation | +| [restore_validation_lambda_arn](#output_restore_validation_lambda_arn) | arn of the restore validation Lambda function | +| [restore_validation_lambda_name](#output_restore_validation_lambda_name) | Name of the restore validation Lambda function | + + diff --git a/infrastructure/modules/aws-backup-source/sns.tf b/infrastructure/modules/aws-backup-source/sns.tf new file mode 100644 index 0000000..cba7a34 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/sns.tf @@ -0,0 +1,36 @@ +# new SNS topic for backup notifications created if an SNS topic ARN is not provided +resource "aws_sns_topic" "backup" { + count = var.notifications_sns_topic_arn == "" ? 1 : 0 + name = "${var.name_prefix}-notifications" + kms_master_key_id = var.bootstrap_kms_key_arn + policy = data.aws_iam_policy_document.allow_backup_to_sns.json +} + +data "aws_iam_policy_document" "allow_backup_to_sns" { + policy_id = "backup" + + statement { + actions = [ + "SNS:Publish", + ] + + effect = "Allow" + + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + + resources = ["*"] + + sid = "allow_backup" + } +} + +resource "aws_sns_topic_subscription" "aws_backup_notifications_email_target" { + count = var.notifications_target_email_address != "" ? 1 : 0 + topic_arn = aws_sns_topic.backup[0].arn + protocol = "email" + endpoint = var.notifications_target_email_address + filter_policy = jsonencode({ "State" : [{ "anything-but" : "COMPLETED" }] }) +} diff --git a/infrastructure/modules/aws-backup-source/variables.tf b/infrastructure/modules/aws-backup-source/variables.tf new file mode 100644 index 0000000..87ae903 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/variables.tf @@ -0,0 +1,285 @@ +variable "project_name" { + description = "The name of the project this relates to." + type = string +} + +variable "environment_name" { + description = "The name of the environment where AWS Backup is configured." + type = string +} +variable "nation" { + description = "The nation this environment is for (e.g. en, ni)" + type = string +} + +variable "notifications_target_email_address" { + description = "The email address to which backup notifications will be sent via SNS." + type = string + default = "" +} + +variable "notifications_sns_topic_arn" { + description = "The ARN of the SNS topic to use for backup notifications." + type = string + default = "" +} + +variable "enable_notifications" { + description = "Flag to enable backup notifications." + type = bool + default = false +} + +variable "bootstrap_kms_key_arn" { + description = "The ARN of the bootstrap KMS key used for encryption at rest of the SNS topic." + type = string +} + +variable "reports_bucket" { + description = "Bucket to drop backup reports into" + type = string +} + +variable "terraform_role_arn" { + description = "ARN of Terraform role used to deploy to account (deprecated, please swap to terraform_role_arns)" + type = string + default = "" +} + +variable "terraform_role_arns" { + description = "ARN of Terraform roles used to deploy to account, defaults to caller arn if list is empty" + type = list(string) + default = [] +} + +variable "deletion_allowed_principal_arns" { + description = "List of ARNs of principals allowed to delete backups." + type = list(string) + default = null + nullable = true +} + +variable "destination_vault_retention_period" { + description = "Retention period for recovery points made with the copy job lambda" + type = number + default = 365 +} + +variable "restore_testing_plan_algorithm" { + description = "Algorithm of the Recovery Selection Point" + type = string + default = "LATEST_WITHIN_WINDOW" +} + +variable "restore_testing_plan_start_window" { + description = "Start window from the scheduled time during which the test should start" + type = number + default = 1 +} + +variable "restore_testing_plan_scheduled_expression" { + description = "Scheduled Expression of Recovery Selection Point" + type = string + default = "cron(0 1 ? * SUN *)" +} + +variable "restore_testing_plan_recovery_point_types" { + description = "Recovery Point Types" + type = list(string) + default = ["SNAPSHOT"] +} + +variable "restore_testing_plan_selection_window_days" { + description = "Selection window days" + type = number + default = 7 +} + +variable "backup_copy_vault_arn" { + description = "The ARN of the destination backup vault for cross-account backup copies." + type = string + default = "" +} + +variable "backup_copy_vault_account_id" { + description = "The account id of the destination backup vault for allowing restores back into the source account." + type = string + default = "" +} + +variable "name_prefix" { + description = "Name prefix for vault resources" + type = string + default = null + validation { + condition = var.name_prefix == null || can(regex("^[^0-9]*$", var.name_prefix)) + error_message = "The name_prefix must not contain any numbers." + } +} + +variable "backup_plan_config_rds" { + description = "Configuration for backup plans with RDS" + type = object({ + enable = bool + selection_tag = string + selection_tag_value = optional(string) + selection_tags = optional(list(object({ + key = optional(string) + value = optional(string) + }))) + compliance_resource_types = list(string) + restore_testing_overrides = optional(map(string)) + validation_window_hours = optional(number) + rules = optional(list(object({ + name = string + schedule = string + completion_window = optional(number) + enable_continuous_backup = optional(bool) + lifecycle = object({ + delete_after = number + cold_storage_after = optional(number) + }) + copy_action = optional(object({ + delete_after = optional(number) + })) + }))) + }) + default = { + enable = true + selection_tag = "BackupRDS" + selection_tag_value = "True" + selection_tags = [] + compliance_resource_types = ["RDS"] + validation_window_hours = 1 + rules = [ + { + name = "rds_daily_kept_5_weeks" + schedule = "cron(0 0 * * ? *)" + completion_window = 24 + lifecycle = { + delete_after = 35 + } + copy_action = { + delete_after = 365 + } + }, + { + name = "rds_weekly_kept_3_months" + schedule = "cron(0 1 ? * SUN *)" + completion_window = 48 + lifecycle = { + delete_after = 90 + } + copy_action = { + delete_after = 365 + } + }, + { + name = "rds_monthly_kept_7_years" + schedule = "cron(0 2 1 * ? *)" + completion_window = 72 + lifecycle = { + cold_storage_after = 30 + delete_after = 2555 + } + copy_action = { + delete_after = 365 + } + } + ] + } +} + +variable "iam_role_permissions_boundary" { + description = "Optional permissions boundary ARN for backup role" + type = string + default = "" # Empty by default +} + +variable "api_endpoint" { + description = "API endpoint to send post build version notifications to" + type = string + default = "" +} + +variable "lambda_copy_recovery_point_enable" { + description = "Flag to enable the copy recovery point lambda (copy recovery point from destination vault back to source)." + type = bool + default = false +} + +variable "lambda_copy_recovery_point_poll_interval_seconds" { + description = "Polling interval in seconds for copy job status checks." + type = number + default = 30 +} + +variable "lambda_copy_recovery_point_max_wait_minutes" { + description = "Maximum number of minutes to wait for a copy job to reach a terminal state before returning running status." + type = number + default = 10 +} + +variable "lambda_copy_recovery_point_destination_vault_arn" { + description = "Destination vault ARN containing the recovery point to be copied back (the air-gapped vault)." + type = string + default = "" +} + +variable "api_token" { + description = "API token to authenticate with the API endpoint" + type = string + default = "" +} + +variable "lambda_copy_recovery_point_source_vault_arn" { + description = "Source vault ARN to which the recovery point will be copied back." + type = string + default = "" +} + +variable "lambda_copy_recovery_point_assume_role_arn" { + description = "ARN of role in destination account the lambda assumes to initiate the copy job (if required for cross-account)." + type = string + default = "" +} + +# Restore Validation Variables +variable "restore_validation_enable" { + description = "Enable automated validation of restored RDS instances during backup restore testing" + type = bool + default = false +} + +variable "restore_validation_db_credentials_secret_name" { + description = "Name of the Secrets Manager secret containing database credentials for connectivity testing" + type = string +} + +variable "restore_validation_expected_subnet_pattern" { + description = "Expected pattern in the DB subnet group name for configuration validation" + type = string +} + +variable "restore_validation_timeout_seconds" { + description = "Timeout for the restore validation Lambda function in seconds" + type = number + default = 300 +} + +variable "restore_validation_log_retention_days" { + description = "Number of days to retain restore validation Lambda logs" + type = number + default = 30 +} + +variable "python_version" { + description = "The Python version to use for the Lambda function" + type = string + default = "3.12" +} + +variable "restore_testing_db_name" { + description = "Name of the database to use for restore validation" + type = string +} diff --git a/infrastructure/modules/aws-backup-source/version b/infrastructure/modules/aws-backup-source/version new file mode 100644 index 0000000..66d62a8 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/version @@ -0,0 +1 @@ +v1.4.1 diff --git a/infrastructure/modules/aws-backup-source/versions.tf b/infrastructure/modules/aws-backup-source/versions.tf new file mode 100644 index 0000000..8aca061 --- /dev/null +++ b/infrastructure/modules/aws-backup-source/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + archive = { + source = "hashicorp/archive" + version = "~> 2" + } + + aws = { + source = "hashicorp/aws" + version = "> 6" + } + } + + required_version = ">= 1.9.5" +} diff --git a/infrastructure/modules/aws-scheduler/main.tf b/infrastructure/modules/aws-scheduler/main.tf new file mode 100644 index 0000000..57b551c --- /dev/null +++ b/infrastructure/modules/aws-scheduler/main.tf @@ -0,0 +1,57 @@ +data "aws_lambda_function" "lambda_function" { + function_name = "${var.name_prefix}-${var.function_name}" +} + +# ---- Allow EventBridge to invoke Lambda ---- +resource "aws_iam_role" "scheduler_invoke" { + name = "${var.name_prefix}-scheduler-invoke-${var.resource_suffix}" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "scheduler.amazonaws.com" + } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_role_policy" "scheduler_lambda_policy" { + role = aws_iam_role.scheduler_invoke.id + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = "lambda:InvokeFunction", + Resource = data.aws_lambda_function.lambda_function.arn + } + ] + }) +} + +# ---- EventBridge one-time schedule ---- +resource "aws_scheduler_schedule" "env_expiry" { + name = "${var.name_prefix}-expire-${var.resource_suffix}" + + + # Note schedulers often fire an initial event upon creation, setting the "start_date" prevents this + schedule_expression = var.schedule_expression + schedule_expression_timezone = "Europe/London" + start_date = var.start_time + + flexible_time_window { + mode = "OFF" + } + + target { + arn = data.aws_lambda_function.lambda_function.arn + role_arn = aws_iam_role.scheduler_invoke.arn + input = jsonencode(var.lambda_inputs) + } +} diff --git a/infrastructure/modules/aws-scheduler/readme.md b/infrastructure/modules/aws-scheduler/readme.md new file mode 100644 index 0000000..9befb21 --- /dev/null +++ b/infrastructure/modules/aws-scheduler/readme.md @@ -0,0 +1,42 @@ +# AWS scheduler + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role.scheduler_invoke](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.scheduler_lambda_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [aws_scheduler_schedule.env_expiry](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/scheduler_schedule) | resource | +| [aws_lambda_function.lambda_function](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_function) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [function_name](#input_function_name) | Lambda function name | `string` | n/a | yes | +| [lambda_inputs](#input_lambda_inputs) | Map of key-value pairs to send to the Lambda as input | `map(string)` | `{}` | no | +| [name_prefix](#input_name_prefix) | Prefix for naming resources | `string` | n/a | yes | +| [resource_suffix](#input_resource_suffix) | Sanitized environment name for resource naming | `string` | n/a | yes | +| [schedule_expression](#input_schedule_expression) | Schedule expression for the AWS Scheduler (e.g. rate(3 days) or cron(...)) | `string` | `null` | no | +| [start_time](#input_start_time) | RFC3339 timestamp to use as the scheduler start time | `string` | n/a | yes | + +## Outputs + +No outputs. + diff --git a/infrastructure/modules/aws-scheduler/variables.tf b/infrastructure/modules/aws-scheduler/variables.tf new file mode 100644 index 0000000..3acb2cd --- /dev/null +++ b/infrastructure/modules/aws-scheduler/variables.tf @@ -0,0 +1,35 @@ +# ---- Common ---- + +variable "name_prefix" { + description = "Prefix for naming resources" + type = string +} + +# ---- Lambda Function ---- + +variable "function_name" { + description = "Lambda function name" + type = string +} + +variable "resource_suffix" { + description = "Sanitized environment name for resource naming" + type = string +} + +variable "lambda_inputs" { + description = "Map of key-value pairs to send to the Lambda as input" + type = map(string) + default = {} +} + +variable "start_time" { + description = "RFC3339 timestamp to use as the scheduler start time" + type = string +} + +variable "schedule_expression" { + description = "Schedule expression for the AWS Scheduler (e.g. rate(3 days) or cron(...))" + type = string + default = null +} diff --git a/infrastructure/modules/cognito/README.md b/infrastructure/modules/cognito/README.md deleted file mode 100644 index 315505b..0000000 --- a/infrastructure/modules/cognito/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Cognito - -## Summary - -This is a OAuth2 client that allows us to log into the BS-Select application using the -same controls and security that CIS2 offers. We have the ability to control the configuration -of the client, including the users available for logging in. - -## Useful Values - -The Cognito client when created will be accessible via the following URL: - -* - -```java -The following values are needed by the BS-Select application to connect to this Cognito instance: -|Value|Default Profile Value|Description| -|-----|---------------------|-----------| -|spring.security.oauth2.client.registration.nhs-identity.scope|email, openid, profile, aws.cognito.signin.user.admin|The scope used by OAuth2 for the users.| -|spring.security.oauth2.client.registration.nhs-identity.client-id|COGNITO_CLIENT_ID_TO_BE_REPLACED|The client ID for the Cognito user client instance.| -|spring.security.oauth2.client.registration.nhs-identity.client-secret|COGNITO_CLIENT_SECRET_TO_BE_REPLACED|The client secret for the Cognito user client instance.| -|spring.security.oauth2.client.registration.nhs-identity.redirect-uri|https:///bss/login/oauth2/code/nhs-identity|The redirect once authentication has been completed.| -|spring.security.oauth2.client.provider.nhs-identity.issuer-uri||The issuer-uri, the full URL is required but main value required is the ID of the Cognito user pool| -| spring.security.oauth2.client.provider.nhs-identity.cognito-domain ||The domain to direct to for login.| -``` - -## Creating users - -Users for this Cognito client are managed via the users.csv file. The following values need to be -specified: - -|Column|Value| -|------|-----| -|UUID|The UUID associated with the user in the BS-Select database. If the user is not in the BS-Select database for the environment, login will fail.| -|bss_username|The BS-Select username associated with the user and the value used for the Username value in Cognito.| -|rbac_role|This replicates the roles CIS2 would provide by using a subset of the data provided. Use the following as the default value for a valid user: `"[{activities=[BS-Select], activity_codes=[B1808]}]"` -|id_assurance_level|This replicates the assurance level that CIS2 would provide for the user.| - -When running the nonprod-shared infrastructure pipeline, all the users listed in the CSV file will be created (or modified if a change is made) and -will be automatically marked as being valid. All users are created with the same default password specified in the variables.tf file. diff --git a/infrastructure/modules/cognito/main.tf b/infrastructure/modules/cognito/main.tf index 16d6628..4da3ac1 100644 --- a/infrastructure/modules/cognito/main.tf +++ b/infrastructure/modules/cognito/main.tf @@ -1,6 +1,34 @@ +data "aws_ssm_parameter" "cognito_users" { + name = "/${var.name_prefix}/cognito/users" +} + locals { - # userdata = csvdecode(file("${path.module}/user_csvs/${var.csv_file}")) - userdata = var.userdata + userdata = jsondecode(nonsensitive(data.aws_ssm_parameter.cognito_users.value)) +} + +resource "random_password" "password" { + length = 20 + special = true + override_special = "!%^*-_+=" +} + +resource "aws_secretsmanager_secret" "password" { + name = "${var.name_prefix}-cognito-user" + recovery_window_in_days = var.recovery_window + + dynamic "replica" { + for_each = var.secret_replication_regions + content { + region = replica.value + } + } +} + +resource "aws_secretsmanager_secret_version" "password" { + secret_id = aws_secretsmanager_secret.password.id + secret_string = jsonencode({ + password = random_password.password.result + }) } # Create user pool @@ -67,7 +95,7 @@ resource "aws_cognito_user_pool" "cognito_user_pool" { resource "aws_cognito_user_pool_domain" "main" { - domain = var.domain_name + domain = var.name_prefix user_pool_id = aws_cognito_user_pool.cognito_user_pool.id } @@ -76,7 +104,7 @@ resource "aws_cognito_user" "cognito_user_creation" { user_pool_id = aws_cognito_user_pool.cognito_user_pool.id username = each.value.bss_username - password = var.user_password + password = random_password.password.result message_action = var.message_action attributes = { diff --git a/infrastructure/modules/cognito/outputs.tf b/infrastructure/modules/cognito/outputs.tf index b26a20e..df19ed4 100644 --- a/infrastructure/modules/cognito/outputs.tf +++ b/infrastructure/modules/cognito/outputs.tf @@ -1,3 +1,11 @@ output "user_pool_id" { value = aws_cognito_user_pool.cognito_user_pool.id } + +output "secrets_manager_random_passsword_arn" { + value = aws_secretsmanager_secret.password.arn +} + +output "user_pool_domain_prefix" { + value = aws_cognito_user_pool_domain.main.domain +} diff --git a/infrastructure/modules/cognito/readme.md b/infrastructure/modules/cognito/readme.md new file mode 100644 index 0000000..68dc11b --- /dev/null +++ b/infrastructure/modules/cognito/readme.md @@ -0,0 +1,96 @@ +# Cognito + +## Summary + +This is a OAuth2 client that allows us to log into the BS-Select application using the +same controls and security that CIS2 offers. We have the ability to control the configuration +of the client, including the users available for logging in. + +## Useful Values + +The Cognito client when created will be accessible via the following url: + +- + +```java +The following values are needed by the BS-Select application to connect to this Cognito instance: +|Value|Default Profile Value|Description| +|-----|---------------------|-----------| +|spring.security.oauth2.client.registration.nhs-identity.scope|email, openid, profile, aws.cognito.signin.user.admin|The scope used by OAuth2 for the users.| +|spring.security.oauth2.client.registration.nhs-identity.client-id|COGNITO_CLIENT_ID_TO_BE_REPLACED|The client ID for the Cognito user client instance.| +|spring.security.oauth2.client.registration.nhs-identity.client-secret|COGNITO_CLIENT_SECRET_TO_BE_REPLACED|The client secret for the Cognito user client instance.| +|spring.security.oauth2.client.registration.nhs-identity.redirect-uri|https:///bss/login/oauth2/code/nhs-identity|The redirect once authentication has been completed.| +|spring.security.oauth2.client.provider.nhs-identity.issuer-uri||The issuer-uri, the full URL is required but main value required is the ID of the Cognito user pool| +| spring.security.oauth2.client.provider.nhs-identity.cognito-domain ||The domain to direct to for login.| +``` + +## Creating users + +Users for this Cognito client are managed via the users.csv file. The following values need to be +specified: + +| Column | Value | +| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| UUID | The UUID associated with the user in the BS-Select database. If the user is not in the BS-Select database for the environment, login will fail. | +| bss_username | The BS-Select username associated with the user and the value used for the Username value in Cognito. | +| rbac_role | This replicates the roles CIS2 would provide by using a subset of the data provided. Use the following as the default value for a valid user: `"[{activities=[BS-Select], activity_codes=[B1808]}]"` | +| id_assurance_level | This replicates the assurance level that CIS2 would provide for the user. | + +When running the nonprod-shared infrastructure pipeline, all the users listed in the CSV file will be created (or modified if a change is made) and +will be automatically marked as being valid. All users are created with the same default password specified in the variables.tf file. + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [random](#provider_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cognito_user.cognito_user_creation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cognito_user) | resource | +| [aws_cognito_user_pool.cognito_user_pool](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cognito_user_pool) | resource | +| [aws_cognito_user_pool_domain.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cognito_user_pool_domain) | resource | +| [aws_secretsmanager_secret.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) | resource | +| [aws_secretsmanager_secret_version.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource | +| [random_password.password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [aws_ssm_parameter.cognito_users](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [acr](#input_acr) | n/a | `string` | `"AAL1_USERPASS"` | no | +| [amr](#input_amr) | n/a | `string` | `"USERPASS"` | no | +| [attribute_names](#input_attribute_names) | n/a | `list(string)` |
[
"acr",
"amr",
"email",
"idassurancelevel",
"nhsid_nrbac_roles",
"bss_username",
"sid",
"uid"
]
| no | +| [deletion_protection](#input_deletion_protection) | ################################################################################# COGNITO ################################################################################# | `string` | `"INACTIVE"` | no | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [message_action](#input_message_action) | n/a | `string` | `"SUPPRESS"` | no | +| [mfa_configuration](#input_mfa_configuration) | n/a | `string` | `"OFF"` | no | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [recovery_window](#input_recovery_window) | The number of days that credentials should be retained for | `number` | n/a | yes | +| [secret_replication_regions](#input_secret_replication_regions) | List of additional regions where created secrets should be replicated | `list(string)` | n/a | yes | +| [user_email](#input_user_email) | n/a | `string` | `"nhsdigital.axe@nhs.net"` | no | +| [user_password](#input_user_password) | n/a | `string` | `"changeme"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [secrets_manager_random_passsword_arn](#output_secrets_manager_random_passsword_arn) | n/a | +| [user_pool_domain_prefix](#output_user_pool_domain_prefix) | n/a | +| [user_pool_id](#output_user_pool_id) | n/a | + + diff --git a/infrastructure/modules/cognito/variables.tf b/infrastructure/modules/cognito/variables.tf index 51e418b..1013c18 100644 --- a/infrastructure/modules/cognito/variables.tf +++ b/infrastructure/modules/cognito/variables.tf @@ -48,15 +48,12 @@ variable "user_password" { default = "changeme" } -variable "csv_file" { - type = string +variable "recovery_window" { + description = "The number of days that credentials should be retained for" + type = number } -variable "domain_name" { - type = string -} - -variable "userdata" { - description = "a csvdecode block that contains the userdata" - type = string +variable "secret_replication_regions" { + description = "List of additional regions where created secrets should be replicated" + type = list(string) } diff --git a/infrastructure/modules/cw-firehose-splunk/firehose.tf b/infrastructure/modules/cw-firehose-splunk/firehose.tf index 1363201..b9d3ee0 100644 --- a/infrastructure/modules/cw-firehose-splunk/firehose.tf +++ b/infrastructure/modules/cw-firehose-splunk/firehose.tf @@ -45,8 +45,14 @@ resource "aws_lambda_function" "preprocess-cw-logs" { role = "arn:aws:iam::${var.aws_account_id}:role/${var.name_prefix}_cw_lambda" handler = "${var.name_prefix}.lambda_handler" source_code_hash = data.archive_file.preprocess-cw-logs-zip.output_base64sha256 - runtime = "python3.12" + runtime = var.python_version timeout = "180" + environment { + variables = { + SPLUNK_INDEX = var.splunk_index + DEFAULT_SPLUNK_SOURCETYPE = "bs_select_app_logs" + } + } } ############ @@ -82,7 +88,7 @@ resource "aws_kinesis_firehose_delivery_stream" "cw_logs_splunk_stream" { hec_endpoint = var.firehose_splunk_url hec_token = var.splunk_hec_token hec_acknowledgment_timeout = 600 - hec_endpoint_type = "Raw" + hec_endpoint_type = "Event" s3_backup_mode = "FailedEventsOnly" s3_configuration { diff --git a/infrastructure/modules/cw-firehose-splunk/firehose_iam.tf b/infrastructure/modules/cw-firehose-splunk/firehose_iam.tf index 6690823..eff9dc2 100644 --- a/infrastructure/modules/cw-firehose-splunk/firehose_iam.tf +++ b/infrastructure/modules/cw-firehose-splunk/firehose_iam.tf @@ -56,7 +56,7 @@ data "aws_iam_policy_document" "cw_firehose_assume_role" { principals { type = "Service" - identifiers = ["firehose.amazonaws.com", "logs.amazonaws.com"] + identifiers = ["firehose.amazonaws.com", "logs.amazonaws.com", "waf.amazonaws.com"] } } } diff --git a/infrastructure/modules/cw-firehose-splunk/readme.md b/infrastructure/modules/cw-firehose-splunk/readme.md new file mode 100644 index 0000000..8f7426d --- /dev/null +++ b/infrastructure/modules/cw-firehose-splunk/readme.md @@ -0,0 +1,68 @@ +# CW-Firehose-Splunk + + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +| ------------------------------------------------------------ | ------- | +| [archive](#provider_archive) | n/a | +| [aws](#provider_aws) | n/a | +| [local](#provider_local) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [aws_iam_policy.cloudwatch_to_firehose](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.cw_firehose_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.cw_lambda_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.cloudwatch_to_firehose_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.cw_firehose_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.cw_lambda_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.cloudwatch_to_firehose_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.cw_firehose_att](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.cw_lambda_att](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_kinesis_firehose_delivery_stream.cw_logs_splunk_stream](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesis_firehose_delivery_stream) | resource | +| [aws_lambda_function.preprocess-cw-logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_s3_bucket.undelivered_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_public_access_block.block_public_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block) | resource | +| [aws_s3_bucket_server_side_encryption_configuration.undelivered_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | +| [local_file.preprocess-cw-logs-py](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [archive_file.preprocess-cw-logs-zip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | +| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cw_firehose_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cw_firehose_doc_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cw_lambda_doc_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| ------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------ | :------: | +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [exclude_extra_logging](#input_exclude_extra_logging) | Exclude extra logging information in the Lambda function that preprocesses the CW logs before sending to Splunk | `bool` | `false` | no | +| [firehose_splunk_url](#input_firehose_splunk_url) | URL for splunk | `string` | `"https://firehose.inputs.splunk.aws.digital.nhs.uk/services/collector"` | no | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [python_version](#input_python_version) | The Python version to use for the Lambda function | `string` | n/a | yes | +| [splunk_hec_token](#input_splunk_hec_token) | Splunk HEC token which points to a specific log index in Splunk | `any` | n/a | yes | +| [splunk_index](#input_splunk_index) | Name of the Splunk index to post logs to | `string` | n/a | yes | + +## Outputs + +| Name | Description | +| ----------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [cw_to_splunk_firehose_role_arn](#output_cw_to_splunk_firehose_role_arn) | n/a | +| [cw_to_splunk_firehose_stream_arn](#output_cw_to_splunk_firehose_stream_arn) | n/a | + + + diff --git a/infrastructure/modules/cw-firehose-splunk/templates/preprocess-cw-logs.py.tpl b/infrastructure/modules/cw-firehose-splunk/templates/preprocess-cw-logs.py.tpl index 8b134ae..2bdba05 100644 --- a/infrastructure/modules/cw-firehose-splunk/templates/preprocess-cw-logs.py.tpl +++ b/infrastructure/modules/cw-firehose-splunk/templates/preprocess-cw-logs.py.tpl @@ -41,142 +41,185 @@ Cloudwatch Logs sends to Firehose records that look like this: The data is additionally compressed with GZIP. -NOTE: It is suggested to test the cloudwatch logs processor lambda function in a pre-production environment to ensure -the 6000000 limit meets your requirements. If your data contains a sizable number of records that are classified as -Dropped/ProcessingFailed, then it is suggested to lower the 6000000 limit within the function to a smaller value -(eg: 5000000) in order to confine to the 6MB (6291456 bytes) payload limit imposed by lambda. You can find Lambda -quotas at https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html - The code below will: -1) Gunzip the data -2) Parse the json -3) Set the result to ProcessingFailed for any record whose messageType is not DATA_MESSAGE, thus redirecting them to the +1) Gunzip the data +2) Parse the json +3) Set the result to ProcessingFailed for any record whose messageType is not DATA_MESSAGE, thus redirecting them to the processing error output. Such records do not contain any log events. You can modify the code to set the result to Dropped instead to get rid of these records completely. -4) For records whose messageType is DATA_MESSAGE, extract the individual log events from the logEvents field, and pass +4) For records whose messageType is DATA_MESSAGE, extract the individual log events from the logEvents field, and pass each one to the transformLogEvent method. You can modify the transformLogEvent method to perform custom transformations on the log events. -5) Concatenate the result from (4) together and set the result as the data of the record returned to Firehose. Note that +5) Concatenate the result from (4) together and set the result as the data of the record returned to Firehose. Note that this step will not add any delimiters. Delimiters should be appended by the logic within the transformLogEvent method. -6) Any individual record exceeding 6,000,000 bytes in size after decompression and encoding is marked as - ProcessingFailed within the function. The original compressed record will be backed up to the S3 bucket - configured on the Firehose. -7) Any additional records which exceed 6MB will be re-ingested back into Firehose. -8) The retry count for intermittent failures during re-ingestion is set 20 attempts. If you wish to retry fewer number +6) Any individual record exceeding 6,000,000 bytes in size after decompression, processing and base64-encoding is marked + as Dropped, and the original record is split into two and re-ingested back into Firehose or Kinesis. The re-ingested + records should be about half the size compared to the original, and should fit within the size limit the second time + round. +7) When the total data size (i.e. the sum over multiple records) after decompression, processing and base64-encoding + exceeds 6,000,000 bytes, any additional records are re-ingested back into Firehose or Kinesis. +8) The retry count for intermittent failures during re-ingestion is set 20 attempts. If you wish to retry fewer number of times for intermittent failures you can lower this value. """ import base64 import json import gzip -from io import BytesIO import boto3 +import os -def transformLogEvent(log_event, logGroup, logStream): +def transformLogEvent(log_event, acct, arn, loggrp, logstrm, filterName): """Transform each log event. The default implementation below just extracts the message and appends a newline to it. Args: log_event (dict): The original log event. Structure is {"id": str, "timestamp": long, "message": str} - + acct: The aws account from where the Cloudwatch event came from + arn: The ARN of the Kinesis Stream + loggrp: The Cloudwatch log group name + logstrm: The Cloudwatch logStream name (not used below) + filterName: The Cloudwatch Subscription filter for the Stream Returns: str: The transformed log event. + In the case below, Splunk event details are set as: + time = event time for the Cloudwatch Log + host = ARN of Firehose + source = filterName (of cloudwatch Log) contatinated with LogGroup Name + sourcetype is set as - + aws:cloudtrail if the Log Group name contains CloudTrail + aws:cloudwatchlogs:vpcflow if the Log Group name contains VPC + the environment variable contents of SPLUNK_SOURCETYPE for all other cases """ - response='' - if '${exclude_extra_logging}': - if log_event['message'].find('START RequestId') <0 and log_event['message'].find('END RequestId') <0 and log_event['message'].find('REPORT RequestId') <0: - if logGroup: - logGroup = 'cwlogname:' + logGroup +' ' - if logStream: - logStream = 'cwstreamname:' + logStream + ' ' - response = logGroup + logStream + log_event['message'] + '\n' + + region_name = arn.split(":")[3] + index = os.environ.get("SPLUNK_INDEX") + + # note that the region_name is taken from the region for the Stream, this won't change if Cloudwatch from another account/region. Not used for this example function + if "CloudTrail" in loggrp: + sourcetype = "aws:cloudtrail" + elif "VPC" in loggrp: + sourcetype = "aws:cloudwatchlogs:vpcflow" + elif "waf-logs" in loggrp: + sourcetype = "aws:waf" else: - if logGroup: - logGroup = 'cwlogname:' + logGroup +' ' - if logStream: - logStream = 'cwstreamname:' + logStream + ' ' - response = logGroup + logStream + log_event['message'] + '\n' - return response - -def processRecords(records): + sourcetype = os.environ.get("DEFAULT_SPLUNK_SOURCETYPE") + + return_message = ( + '{"time": ' + + str(log_event["timestamp"]) + + ',"host": "' + + arn + + '","source": "' + + filterName + + ":" + + loggrp + + '"' + ) + return_message = return_message + ',"sourcetype":"' + sourcetype + '"' + return_message = return_message + ',"index":"' + index + '"' + return_message = ( + return_message + ',"event": ' + json.dumps(log_event["message"]) + "}\n" + ) + + return return_message + "\n" + + +def processRecords(records, arn): for r in records: - data = base64.b64decode(r['data']) - striodata = BytesIO(data) - with gzip.GzipFile(fileobj=striodata, mode='r') as f: - data = json.loads(f.read()) - recId = r['recordId'] - """ - CONTROL_MESSAGE are sent by CWL to check if the subscription is reachable. - They do not contain actual data. - """ - if data['messageType'] == 'CONTROL_MESSAGE': - yield { - 'result': 'Dropped', - 'recordId': recId - } - elif data['messageType'] == 'DATA_MESSAGE': - joinedData = ''.join([transformLogEvent(e, data['logGroup'], data['logStream']) for e in data['logEvents']]) + data = loadJsonGzipBase64(r["data"]) + recId = r["recordId"] + # CONTROL_MESSAGE are sent by CWL to check if the subscription is reachable. + # They do not contain actual data. + if data["messageType"] == "CONTROL_MESSAGE": + yield {"result": "Dropped", "recordId": recId} + elif data["messageType"] == "DATA_MESSAGE": + joinedData = "".join( + [ + transformLogEvent( + e, + data["owner"], + arn, + data["logGroup"], + data["logStream"], + data["subscriptionFilters"][0], + ) + for e in data["logEvents"] + ] + ) dataBytes = joinedData.encode("utf-8") - encodedData = base64.b64encode(dataBytes) - if len(encodedData) <= 6000000: - yield { - 'data': encodedData, - 'result': 'Ok', - 'recordId': recId - } - else: - yield { - 'result': 'ProcessingFailed', - 'recordId': recId - } + encodedData = base64.b64encode(dataBytes).decode("utf-8") + yield {"data": encodedData, "result": "Ok", "recordId": recId} else: - yield { - 'result': 'ProcessingFailed', - 'recordId': recId - } + yield {"result": "ProcessingFailed", "recordId": recId} + + +def splitCWLRecord(cwlRecord): + """ + Splits one CWL record into two, each containing half the log events. + Serializes and compreses the data before returning. That data can then be + re-ingested into the stream, and it'll appear as though they came from CWL + directly. + """ + logEvents = cwlRecord["logEvents"] + mid = len(logEvents) // 2 + rec1 = {k: v for k, v in cwlRecord.items()} + rec1["logEvents"] = logEvents[:mid] + rec2 = {k: v for k, v in cwlRecord.items()} + rec2["logEvents"] = logEvents[mid:] + return [gzip.compress(json.dumps(r).encode("utf-8")) for r in [rec1, rec2]] def putRecordsToFirehoseStream(streamName, records, client, attemptsMade, maxAttempts): failedRecords = [] codes = [] - errMsg = '' + errMsg = "" # if put_record_batch throws for whatever reason, response['xx'] will error out, adding a check for a valid # response will prevent this response = None try: - response = client.put_record_batch(DeliveryStreamName=streamName, Records=records) + response = client.put_record_batch( + DeliveryStreamName=streamName, Records=records + ) except Exception as e: failedRecords = records errMsg = str(e) # if there are no failedRecords (put_record_batch succeeded), iterate over the response to gather results - if not failedRecords and response and response['FailedPutCount'] > 0: - for idx, res in enumerate(response['RequestResponses']): + if not failedRecords and response and response["FailedPutCount"] > 0: + for idx, res in enumerate(response["RequestResponses"]): # (if the result does not have a key 'ErrorCode' OR if it does and is empty) => we do not need to re-ingest - if 'ErrorCode' not in res or not res['ErrorCode']: + if not res.get("ErrorCode"): continue - codes.append(res['ErrorCode']) + codes.append(res["ErrorCode"]) failedRecords.append(records[idx]) - errMsg = 'Individual error codes: ' + ','.join(codes) + errMsg = "Individual error codes: " + ",".join(codes) - if len(failedRecords) > 0: + if failedRecords: if attemptsMade + 1 < maxAttempts: - print('Some records failed while calling PutRecordBatch to Firehose stream, retrying. %s' % (errMsg)) - putRecordsToFirehoseStream(streamName, failedRecords, client, attemptsMade + 1, maxAttempts) + print( + "Some records failed while calling PutRecordBatch to Firehose stream, retrying. %s" + % (errMsg) + ) + putRecordsToFirehoseStream( + streamName, failedRecords, client, attemptsMade + 1, maxAttempts + ) else: - raise RuntimeError('Could not put records after %s attempts. %s' % (str(maxAttempts), errMsg)) + raise RuntimeError( + "Could not put records after %s attempts. %s" + % (str(maxAttempts), errMsg) + ) def putRecordsToKinesisStream(streamName, records, client, attemptsMade, maxAttempts): failedRecords = [] codes = [] - errMsg = '' + errMsg = "" # if put_records throws for whatever reason, response['xx'] will error out, adding a check for a valid # response will prevent this response = None @@ -187,85 +230,120 @@ def putRecordsToKinesisStream(streamName, records, client, attemptsMade, maxAtte errMsg = str(e) # if there are no failedRecords (put_record_batch succeeded), iterate over the response to gather results - if not failedRecords and response and response['FailedRecordCount'] > 0: - for idx, res in enumerate(response['Records']): + if not failedRecords and response and response["FailedRecordCount"] > 0: + for idx, res in enumerate(response["Records"]): # (if the result does not have a key 'ErrorCode' OR if it does and is empty) => we do not need to re-ingest - if 'ErrorCode' not in res or not res['ErrorCode']: + if not res.get("ErrorCode"): continue - codes.append(res['ErrorCode']) + codes.append(res["ErrorCode"]) failedRecords.append(records[idx]) - errMsg = 'Individual error codes: ' + ','.join(codes) + errMsg = "Individual error codes: " + ",".join(codes) - if len(failedRecords) > 0: + if failedRecords: if attemptsMade + 1 < maxAttempts: - print('Some records failed while calling PutRecords to Kinesis stream, retrying. %s' % (errMsg)) - putRecordsToKinesisStream(streamName, failedRecords, client, attemptsMade + 1, maxAttempts) + print( + "Some records failed while calling PutRecords to Kinesis stream, retrying. %s" + % (errMsg) + ) + putRecordsToKinesisStream( + streamName, failedRecords, client, attemptsMade + 1, maxAttempts + ) else: - raise RuntimeError('Could not put records after %s attempts. %s' % (str(maxAttempts), errMsg)) + raise RuntimeError( + "Could not put records after %s attempts. %s" + % (str(maxAttempts), errMsg) + ) -def createReingestionRecord(isSas, originalRecord): +def createReingestionRecord(isSas, originalRecord, data=None): + if data is None: + data = base64.b64decode(originalRecord["data"]) + r = {"Data": data} if isSas: - return {'data': base64.b64decode(originalRecord['data']), 'partitionKey': originalRecord['kinesisRecordMetadata']['partitionKey']} - else: - return {'data': base64.b64decode(originalRecord['data'])} + r["PartitionKey"] = originalRecord["kinesisRecordMetadata"]["partitionKey"] + return r -def getReingestionRecord(isSas, reIngestionRecord): - if isSas: - return {'Data': reIngestionRecord['data'], 'PartitionKey': reIngestionRecord['partitionKey']} - else: - return {'Data': reIngestionRecord['data']} +def loadJsonGzipBase64(base64Data): + return json.loads(gzip.decompress(base64.b64decode(base64Data))) def lambda_handler(event, context): - isSas = 'sourceKinesisStreamArn' in event - streamARN = event['sourceKinesisStreamArn'] if isSas else event['deliveryStreamArn'] - region = streamARN.split(':')[3] - streamName = streamARN.split('/')[1] - records = list(processRecords(event['records'])) + isSas = "sourceKinesisStreamArn" in event + streamARN = event["sourceKinesisStreamArn"] if isSas else event["deliveryStreamArn"] + region = streamARN.split(":")[3] + streamName = streamARN.split("/")[1] + records = list(processRecords(event["records"], streamARN)) projectedSize = 0 - dataByRecordId = {rec['recordId']: createReingestionRecord(isSas, rec) for rec in event['records']} - putRecordBatches = [] - recordsToReingest = [] - totalRecordsToBeReingested = 0 + recordListsToReingest = [] for idx, rec in enumerate(records): - if rec['result'] != 'Ok': + originalRecord = event["records"][idx] + + if rec["result"] != "Ok": continue - projectedSize += len(rec['data']) + len(rec['recordId']) - # 6000000 instead of 6291456 to leave ample headroom for the stuff we didn't account for - if projectedSize > 6000000: - totalRecordsToBeReingested += 1 - recordsToReingest.append( - getReingestionRecord(isSas, dataByRecordId[rec['recordId']]) - ) - records[idx]['result'] = 'Dropped' - del(records[idx]['data']) - - # split out the record batches into multiple groups, 500 records at max per group - if len(recordsToReingest) == 500: - putRecordBatches.append(recordsToReingest) - recordsToReingest = [] - - if len(recordsToReingest) > 0: - # add the last batch - putRecordBatches.append(recordsToReingest) - - # iterate and call putRecordBatch for each group - recordsReingestedSoFar = 0 - if len(putRecordBatches) > 0: - client = boto3.client('kinesis', region_name=region) if isSas else boto3.client('firehose', region_name=region) - for recordBatch in putRecordBatches: + + # If a single record is too large after processing, split the original CWL data into two, each containing half + # the log events, and re-ingest both of them (note that it is the original data that is re-ingested, not the + # processed data). If it's not possible to split because there is only one log event, then mark the record as + # ProcessingFailed, which sends it to error output. + if len(rec["data"]) > 6000000: + cwlRecord = loadJsonGzipBase64(originalRecord["data"]) + if len(cwlRecord["logEvents"]) > 1: + rec["result"] = "Dropped" + recordListsToReingest.append( + [ + createReingestionRecord(isSas, originalRecord, data) + for data in splitCWLRecord(cwlRecord) + ] + ) + else: + rec["result"] = "ProcessingFailed" + print( + ( + "Record %s contains only one log event but is still too large after processing (%d bytes), " + + "marking it as %s" + ) + % (rec["recordId"], len(rec["data"]), rec["result"]) + ) + del rec["data"] + else: + projectedSize += len(rec["data"]) + len(rec["recordId"]) + # 6000000 instead of 6291456 to leave ample headroom for the stuff we didn't account for + if projectedSize > 6000000: + recordListsToReingest.append( + [createReingestionRecord(isSas, originalRecord)] + ) + del rec["data"] + rec["result"] = "Dropped" + + # call putRecordBatch/putRecords for each group of up to 500 records to be re-ingested + if recordListsToReingest: + recordsReingestedSoFar = 0 + client = boto3.client("kinesis" if isSas else "firehose", region_name=region) + maxBatchSize = 500 + flattenedList = [r for sublist in recordListsToReingest for r in sublist] + for i in range(0, len(flattenedList), maxBatchSize): + recordBatch = flattenedList[i : i + maxBatchSize] + # last argument is maxAttempts + args = [streamName, recordBatch, client, 0, 20] if isSas: - putRecordsToKinesisStream(streamName, recordBatch, client, attemptsMade=0, maxAttempts=20) + putRecordsToKinesisStream(*args) else: - putRecordsToFirehoseStream(streamName, recordBatch, client, attemptsMade=0, maxAttempts=20) + putRecordsToFirehoseStream(*args) recordsReingestedSoFar += len(recordBatch) - print('Reingested %d/%d records out of %d' % (recordsReingestedSoFar, totalRecordsToBeReingested, len(event['records']))) - else: - print('No records to be reingested') + print("Reingested %d/%d" % (recordsReingestedSoFar, len(flattenedList))) + + print( + "%d input records, %d returned as Ok or ProcessingFailed, %d split and re-ingested, %d re-ingested as-is" + % ( + len(event["records"]), + len([r for r in records if r["result"] != "Dropped"]), + len([l for l in recordListsToReingest if len(l) > 1]), + len([l for l in recordListsToReingest if len(l) == 1]), + ) + ) return {"records": records} diff --git a/infrastructure/modules/cw-firehose-splunk/variables.tf b/infrastructure/modules/cw-firehose-splunk/variables.tf index df2e108..165bbf3 100644 --- a/infrastructure/modules/cw-firehose-splunk/variables.tf +++ b/infrastructure/modules/cw-firehose-splunk/variables.tf @@ -14,6 +14,11 @@ variable "splunk_hec_token" { sensitive = true } +variable "splunk_index" { + description = "Name of the Splunk index to post logs to" + type = string +} + variable "name_prefix" { description = "The account, environment etc" type = string @@ -29,3 +34,8 @@ variable "environment" { description = "The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD" type = string } + +variable "python_version" { + description = "The Python version to use for the Lambda function" + type = string +} diff --git a/infrastructure/modules/ecr/data.tf b/infrastructure/modules/ecr/data.tf new file mode 100644 index 0000000..df61312 --- /dev/null +++ b/infrastructure/modules/ecr/data.tf @@ -0,0 +1,14 @@ +data "aws_caller_identity" "current" {} + +locals { + aws_account_id = data.aws_caller_identity.current.account_id + aws_account_ids = jsondecode(data.aws_secretsmanager_secret_version.aws_account_ids.secret_string) +} + +data "aws_secretsmanager_secret" "aws_account_ids" { + name = "${var.name_prefix}-aws-account-ids" +} + +data "aws_secretsmanager_secret_version" "aws_account_ids" { + secret_id = data.aws_secretsmanager_secret.aws_account_ids.id +} diff --git a/infrastructure/modules/ecr/main.tf b/infrastructure/modules/ecr/main.tf new file mode 100644 index 0000000..c91c9d2 --- /dev/null +++ b/infrastructure/modules/ecr/main.tf @@ -0,0 +1,82 @@ +resource "aws_ecr_repository" "image_repository" { + name = "${var.name_prefix}-${var.repo_name}" +} + +resource "aws_ecr_repository_policy" "ecr_repo_policy" { + repository = aws_ecr_repository.image_repository.name + policy = data.aws_iam_policy_document.ecr_repo_policy_document.json +} + +data "aws_iam_policy_document" "ecr_repo_policy_document" { + # Pushing images restricted to GitHub actions and developer roles only + statement { + sid = "AllowGitHubBuildAndPush" + effect = "Allow" + actions = [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:CompleteLayerUpload", + "ecr:GetDownloadUrlForLayer", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + principals { + type = "AWS" + identifiers = [ + "arn:aws:iam::${local.aws_account_id}:root" + ] + } + condition { + test = "StringLike" + variable = "aws:PrincipalArn" + values = [ + "arn:aws:iam::${local.aws_account_id}:role/${var.name_prefix}-github-actions-role", + "arn:aws:iam::${local.aws_account_id}:role/aws-reserved/sso.amazonaws.com/eu-west-2/${var.developer_sso_role}" + ] + } + } + # Allow pulling of images from other BSS native accounts (e.g. read-only access by ECS) + statement { + sid = "AllowAllPullImage" + effect = "Allow" + actions = [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + ] + principals { + type = "AWS" + identifiers = [ + for id in local.aws_account_ids : "arn:aws:iam::${id}:root" + ] + } + } +} + +# Dynamic lifecycle policy rules +data "aws_ecr_lifecycle_policy_document" "ecr" { + count = length(var.lifecycle_rules) > 0 ? 1 : 0 + dynamic "rule" { + for_each = var.lifecycle_rules + content { + priority = rule.value.priority + description = rule.value.description + + selection { + tag_status = rule.value.selection.tag_status + tag_prefix_list = lookup(rule.value.selection, "tag_prefix_list", null) + tag_pattern_list = lookup(rule.value.selection, "tag_pattern_list", null) + count_type = rule.value.selection.count_type + count_number = rule.value.selection.count_number + count_unit = lookup(rule.value.selection, "count_unit", null) + } + } + } +} + +resource "aws_ecr_lifecycle_policy" "ecr" { + count = length(var.lifecycle_rules) > 0 ? 1 : 0 + repository = aws_ecr_repository.image_repository.name + policy = data.aws_ecr_lifecycle_policy_document.ecr[0].json +} diff --git a/infrastructure/modules/ecr/readme.md b/infrastructure/modules/ecr/readme.md new file mode 100644 index 0000000..aba5897 --- /dev/null +++ b/infrastructure/modules/ecr/readme.md @@ -0,0 +1,44 @@ +# ECR + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_ecr_lifecycle_policy.ecr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_lifecycle_policy) | resource | +| [aws_ecr_repository.image_repository](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository) | resource | +| [aws_ecr_repository_policy.ecr_repo_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository_policy) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_ecr_lifecycle_policy_document.ecr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecr_lifecycle_policy_document) | data source | +| [aws_iam_policy_document.ecr_repo_policy_document](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_secretsmanager_secret.aws_account_ids](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.aws_account_ids](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [developer_sso_role](#input_developer_sso_role) | The SSO role for developers | `string` | n/a | yes | +| [lifecycle_rules](#input_lifecycle_rules) | List of lifecycle rules. Each rule must be an object:
{
priority = number
description = string
selection = {
tag_status = string
tag_prefix_list = optional(list(string))
tag_pattern_list = optional(list(string))
count_type = string
count_number = number
count_unit = optional(string)
}
} |
list(object({
priority = number
description = string
selection = object({
tag_status = string
tag_prefix_list = optional(list(string))
tag_pattern_list = optional(list(string))
count_type = string
count_number = number
count_unit = optional(string)
})
}))
| `[]` | no | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [repo_name](#input_repo_name) | The name of the ECR repository | `string` | n/a | yes | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/ecr/variables.tf b/infrastructure/modules/ecr/variables.tf new file mode 100644 index 0000000..83e07aa --- /dev/null +++ b/infrastructure/modules/ecr/variables.tf @@ -0,0 +1,50 @@ +###################### +# Generic +###################### +variable "name_prefix" { + description = "The account, environment etc" + type = string +} + +variable "repo_name" { + description = "The name of the ECR repository" + type = string +} + +variable "developer_sso_role" { + description = "The SSO role for developers" + type = string +} + + +variable "lifecycle_rules" { + description = < + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_metric_alarm.task_cpu_utilization_alarm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource | +| [aws_cloudwatch_metric_alarm.task_memory_utilization_alarm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource | +| [aws_ecs_cluster.ecs_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_cluster) | resource | +| [aws_iam_service_linked_role.ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource | +| [aws_security_group.ecs_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.ecs_all_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_iam_role.ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source | +| [aws_sns_topic.alert](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/sns_topic) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [container_port](#input_container_port) | n/a | `number` | `4000` | no | +| [create_ecs_service_role](#input_create_ecs_service_role) | The service role can only be created once per account, only enable it in one stack | `bool` | `true` | no | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [name](#input_name) | the unique name of the resource | `string` | `"ecs"` | no | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [vpc_id](#input_vpc_id) | id of the vpc | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [ecs_cluster_arn](#output_ecs_cluster_arn) | n/a | +| [ecs_cluster_name](#output_ecs_cluster_name) | n/a | +| [ecs_sg_id](#output_ecs_sg_id) | n/a | + + diff --git a/infrastructure/modules/ecs-cluster/variables.tf b/infrastructure/modules/ecs-cluster/variables.tf index 904071c..301901e 100644 --- a/infrastructure/modules/ecs-cluster/variables.tf +++ b/infrastructure/modules/ecs-cluster/variables.tf @@ -29,3 +29,9 @@ variable "environment" { description = "The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD" type = string } + +variable "create_ecs_service_role" { + description = "The service role can only be created once per account, only enable it in one stack" + type = bool + default = true +} diff --git a/infrastructure/modules/elasticache/locals.tf b/infrastructure/modules/elasticache/locals.tf index 23d3625..466dcad 100644 --- a/infrastructure/modules/elasticache/locals.tf +++ b/infrastructure/modules/elasticache/locals.tf @@ -1,9 +1,9 @@ locals { - replication_group_id = "${var.name_prefix}${var.name}" + replication_group_id = "${var.name_prefix}-${var.name}" #for engine-log prefix https://docs.aws.amazon.com/step-functions/latest/dg/bp-cwl.html cw_redis_engine_log = "/aws/vendedlogs/${var.name_prefix}-redis-engine-logs" cw_redis_slow_log = "/aws/vendedlogs/${var.name_prefix}-redis-slow-logs" - subnet_group = "${var.name_prefix}${var.name}-subnet-group" - sg_name = "${var.name_prefix}${var.name}-sg" + subnet_group = "${var.name_prefix}-${var.name}" + sg_name = "${var.name_prefix}-${var.name}" parameter_group_name = var.name_prefix } diff --git a/infrastructure/modules/elasticache/main.tf b/infrastructure/modules/elasticache/main.tf index fcc677a..3adc117 100644 --- a/infrastructure/modules/elasticache/main.tf +++ b/infrastructure/modules/elasticache/main.tf @@ -1,12 +1,3 @@ -###################### -# SNS Topic -###################### - -# TODO -# data "aws_sns_topic" "alert" { -# name = var.sns_topic -# } - ###################### # Elasticache ###################### @@ -25,14 +16,13 @@ resource "aws_elasticache_replication_group" "elasticache_replication_group" { auto_minor_version_upgrade = true maintenance_window = "Mon:00:00-Mon:03:00" snapshot_window = "04:00-08:00" - # TODO add notification topic for alerting - #notification_topic_arn = data.aws_sns_topic.alert.arn - subnet_group_name = aws_elasticache_subnet_group.cache_subnet_group.name - security_group_ids = [aws_security_group.cache_sg.id] - engine_version = var.engine_version - cluster_mode = "enabled" - replicas_per_node_group = var.replicas_per_node_group - num_node_groups = var.number_of_shards + notification_topic_arn = var.notification_topic_arn + subnet_group_name = aws_elasticache_subnet_group.cache_subnet_group.name + security_group_ids = [aws_security_group.cache_sg.id] + engine_version = var.engine_version + cluster_mode = "enabled" + replicas_per_node_group = var.replicas_per_node_group + num_node_groups = var.number_of_shards log_delivery_configuration { destination = aws_cloudwatch_log_group.redis_engine_log.name @@ -51,9 +41,16 @@ resource "aws_elasticache_replication_group" "elasticache_replication_group" { } resource "aws_iam_service_linked_role" "elasticache" { + count = var.create_elasticache_service_role ? 1 : 0 aws_service_name = "elasticache.amazonaws.com" } +# to allow referencing the existing service linked role if not created +data "aws_iam_role" "elasticache" { + name = "AWSServiceRoleForElastiCache" + depends_on = [aws_iam_service_linked_role.elasticache] +} + resource "aws_elasticache_parameter_group" "bss_param_group_redis7" { name = "${local.parameter_group_name}-redis7" family = "redis7" @@ -65,7 +62,7 @@ resource "aws_elasticache_parameter_group" "bss_param_group_redis7" { lifecycle { create_before_destroy = true } - depends_on = [aws_iam_service_linked_role.elasticache] + depends_on = [data.aws_iam_role.elasticache] } ###################### @@ -75,9 +72,8 @@ resource "aws_elasticache_parameter_group" "bss_param_group_redis7" { resource "aws_elasticache_subnet_group" "cache_subnet_group" { name = local.subnet_group description = "Subnet group for Elasticache" - # subnet_ids = data.aws_subnets.private_subnets.ids - subnet_ids = var.subnet_ids - depends_on = [aws_iam_service_linked_role.elasticache] + subnet_ids = var.subnet_ids + depends_on = [data.aws_iam_role.elasticache] } resource "aws_security_group" "cache_sg" { @@ -86,6 +82,18 @@ resource "aws_security_group" "cache_sg" { vpc_id = var.vpc_id } +resource "aws_vpc_security_group_ingress_rule" "ecs-inbound" { + description = "Allows inbound connection from ECS cluster" + security_group_id = aws_security_group.cache_sg.id + referenced_security_group_id = var.ecs_sg_id + from_port = 6379 + to_port = 6379 + ip_protocol = "tcp" + tags = { + "Name" : "${var.name_prefix}-ecs" + } +} + resource "aws_cloudwatch_log_group" "redis_engine_log" { name = local.cw_redis_engine_log #kms_key_id = data.aws_kms_key.kms_key.arn diff --git a/infrastructure/modules/elasticache/readme.md b/infrastructure/modules/elasticache/readme.md new file mode 100644 index 0000000..ece474c --- /dev/null +++ b/infrastructure/modules/elasticache/readme.md @@ -0,0 +1,65 @@ +# Elasticache + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_log_group.redis_engine_log](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_cloudwatch_log_group.redis_slow_log](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_elasticache_parameter_group.bss_param_group_redis7](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_parameter_group) | resource | +| [aws_elasticache_replication_group.elasticache_replication_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group) | resource | +| [aws_elasticache_subnet_group.cache_subnet_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_subnet_group) | resource | +| [aws_iam_service_linked_role.elasticache](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource | +| [aws_security_group.cache_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_vpc_security_group_ingress_rule.ecs-inbound](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource | +| [aws_iam_role.elasticache](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [apply_immediately](#input_apply_immediately) | whether to apply changes immediately - false will apply in maintenance window | `bool` | `false` | no | +| [auto_failover_enabled](#input_auto_failover_enabled) | n/a | `any` | n/a | yes | +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [create_elasticache_service_role](#input_create_elasticache_service_role) | The service role can only be created once per account, only enable it in one stack | `bool` | `true` | no | +| [ecs_sg_id](#input_ecs_sg_id) | The id of the ECS security group to enable access for | `string` | n/a | yes | +| [elasticache_port](#input_elasticache_port) | Port on which Elasticache runs | `number` | `6379` | no | +| [engine_version](#input_engine_version) | The Elasticache engine version | `any` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [multi_az](#input_multi_az) | n/a | `any` | n/a | yes | +| [name](#input_name) | The name of the resource | `string` | `"elasticache"` | no | +| [name_prefix](#input_name_prefix) | the prefix for the name which containts the environment and business unit | `string` | n/a | yes | +| [node_type](#input_node_type) | n/a | `any` | n/a | yes | +| [notification_topic_arn](#input_notification_topic_arn) | Name of the SNS topic used for Elasticache alerts | `any` | n/a | yes | +| [number_of_shards](#input_number_of_shards) | n/a | `number` | `1` | no | +| [redis_auth_token](#input_redis_auth_token) | Auth token for Redis cache | `any` | n/a | yes | +| [replicas_per_node_group](#input_replicas_per_node_group) | n/a | `number` | `2` | no | +| [replication_group_description](#input_replication_group_description) | Description for replication group | `string` | `"Redis cache for BS-Select application"` | no | +| [subnet_ids](#input_subnet_ids) | The subnets that will be used for elasticache, usually private | `list(string)` | n/a | yes | +| [vpc_id](#input_vpc_id) | The ID for the VPC | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [redis_configuration_endpoint_address](#output_redis_configuration_endpoint_address) | n/a | +| [redis_configuration_endpoint_port](#output_redis_configuration_endpoint_port) | n/a | +| [redis_security_group_id](#output_redis_security_group_id) | n/a | + + diff --git a/infrastructure/modules/elasticache/variables.tf b/infrastructure/modules/elasticache/variables.tf index e434358..245a43e 100644 --- a/infrastructure/modules/elasticache/variables.tf +++ b/infrastructure/modules/elasticache/variables.tf @@ -36,10 +36,9 @@ variable "redis_auth_token" { sensitive = true } -# TODO add later -# variable "sns_topic" { -# description = "Name of the SNS topic used for Elasticache alerts" -# } +variable "notification_topic_arn" { + description = "Name of the SNS topic used for Elasticache alerts" +} variable "name_prefix" { description = "the prefix for the name which containts the environment and business unit" @@ -49,7 +48,7 @@ variable "name_prefix" { variable "name" { description = "The name of the resource" type = string - default = "-elasticache" + default = "elasticache" } variable "environment" { @@ -72,3 +71,14 @@ variable "subnet_ids" { description = "The subnets that will be used for elasticache, usually private" type = list(string) } + +variable "ecs_sg_id" { + description = "The id of the ECS security group to enable access for" + type = string +} + +variable "create_elasticache_service_role" { + description = "The service role can only be created once per account, only enable it in one stack" + type = bool + default = true +} diff --git a/infrastructure/modules/github-config/main.tf b/infrastructure/modules/github-config/main.tf index 2b7075b..e4899dd 100644 --- a/infrastructure/modules/github-config/main.tf +++ b/infrastructure/modules/github-config/main.tf @@ -9,7 +9,7 @@ terraform { provider "github" { owner = "NHSDigital" - app_auth {} + token = var.github_app_token } data "github_repository" "repo" { diff --git a/infrastructure/modules/github-config/readme.md b/infrastructure/modules/github-config/readme.md new file mode 100644 index 0000000..78cf516 --- /dev/null +++ b/infrastructure/modules/github-config/readme.md @@ -0,0 +1,44 @@ +# GitHub-config + + + + +## Requirements + +| Name | Version | +| --------------------------------------------------------------- | ------- | +| [github](#requirement_github) | ~> 6.0 | + +## Providers + +| Name | Version | +| --------------------------------------------------------- | ------- | +| [github](#provider_github) | ~> 6.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [github_actions_environment_secret.aws_account](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/actions_environment_secret) | resource | +| [github_repository_environment.repo_environment](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/repository_environment) | resource | +| [github_repository.repo](https://registry.terraform.io/providers/integrations/github/latest/docs/data-sources/repository) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -------- | ------- | :------: | +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [github_app_token](#input_github_app_token) | The GitHub App token used to authenticate with the GitHub provider | `string` | n/a | yes | +| [github_repo_name](#input_github_repo_name) | the name for the github repo | `string` | n/a | yes | + +## Outputs + +No outputs. + + + diff --git a/infrastructure/modules/github-config/variables.tf b/infrastructure/modules/github-config/variables.tf index fe4dcd3..5ae090e 100644 --- a/infrastructure/modules/github-config/variables.tf +++ b/infrastructure/modules/github-config/variables.tf @@ -13,3 +13,9 @@ variable "github_repo_name" { description = "the name for the github repo" type = string } + +variable "github_app_token" { + description = "The GitHub App token used to authenticate with the GitHub provider" + type = string + sensitive = true +} diff --git a/infrastructure/modules/lambda-layer/main.tf b/infrastructure/modules/lambda-layer/main.tf new file mode 100644 index 0000000..3de8ffe --- /dev/null +++ b/infrastructure/modules/lambda-layer/main.tf @@ -0,0 +1,35 @@ +# I've left it out as proof of concept but we could use the md5 of the lambda to trigger this only when something changes + +# Notes: A complication with building lambda layers is getting to rebuild the image on every apply the suggested base64 method throws +# an error if the file does not exist, which it never will as we are creating it in the shell script. + +# The basic setup here is that we generate a unique identifier (timestamp) then run a shell script from the layers/ dir +# containing a docker build script which once completed compresses the output into a zip file in the zips/ dir with a unique timestamp suffix. + +# By doing this we don't have to put the base64 method in the layer resource which trys to execute on plan and fails as the file does not exist yet. +# We can instead target a fixed path with the unique suffix which we know will exist by the time we get to apply. + +locals { + file_suffix = timestamp() # unique for every apply +} + +resource "null_resource" "build_lambda_layer" { + # Re-run if the script changes + triggers = { + always_on = timestamp() # forces re-run every time + } + + provisioner "local-exec" { + command = "${path.module}/${var.source_path}/${var.layer_name}.sh ${local.file_suffix}" + working_dir = path.module + quiet = false + } +} + +resource "aws_lambda_layer_version" "this" { + layer_name = var.layer_name + description = var.description + filename = "${path.module}/zips/${var.layer_name}-${local.file_suffix}.zip" + compatible_runtimes = ["python3.12"] + depends_on = [null_resource.build_lambda_layer] +} diff --git a/infrastructure/modules/lambda-layer/outputs.tf b/infrastructure/modules/lambda-layer/outputs.tf new file mode 100644 index 0000000..35bdd2a --- /dev/null +++ b/infrastructure/modules/lambda-layer/outputs.tf @@ -0,0 +1,4 @@ +output "layer_arn" { + value = aws_lambda_layer_version.this.arn + +} diff --git a/infrastructure/modules/lambda-layer/readme.md b/infrastructure/modules/lambda-layer/readme.md new file mode 100644 index 0000000..720ca28 --- /dev/null +++ b/infrastructure/modules/lambda-layer/readme.md @@ -0,0 +1,44 @@ +# Lambda layer + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [null](#provider_null) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_lambda_layer_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_layer_version) | resource | +| [null_resource.build_lambda_layer](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [compatible_runtimes](#input_compatible_runtimes) | Compatible Python runtimes for the Lambda layer | `list(string)` |
[
"python3.12"
]
| no | +| [description](#input_description) | The description for the Lambda layer | `string` | n/a | yes | +| [layer_name](#input_layer_name) | The name of the Lambda layer | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | the prefix standard | `string` | n/a | yes | +| [source_path](#input_source_path) | The path of the stored layer zip file | `string` | `"../../layers"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [layer_arn](#output_layer_arn) | n/a | + + + diff --git a/infrastructure/modules/lambda-layer/variables.tf b/infrastructure/modules/lambda-layer/variables.tf new file mode 100644 index 0000000..ca998c4 --- /dev/null +++ b/infrastructure/modules/lambda-layer/variables.tf @@ -0,0 +1,28 @@ +variable "name_prefix" { + description = "the prefix standard" + type = string + +} + +variable "layer_name" { + description = "The name of the Lambda layer" + type = string +} + +variable "compatible_runtimes" { + description = "Compatible Python runtimes for the Lambda layer" + type = list(string) + default = ["python3.12"] +} + + +variable "description" { + description = "The description for the Lambda layer" + type = string +} + +variable "source_path" { + description = "The path of the stored layer zip file" + type = string + default = "../../layers" +} diff --git a/infrastructure/modules/lambda/main.tf b/infrastructure/modules/lambda/main.tf new file mode 100644 index 0000000..3636e6e --- /dev/null +++ b/infrastructure/modules/lambda/main.tf @@ -0,0 +1,44 @@ +############### +# lambda # +############### + +module "lambda_function" { + source = "terraform-aws-modules/lambda/aws" + # downgrade version as workaround for bug https://github.com/terraform-aws-modules/terraform-aws-lambda/issues/733 + version = "8.7.0" + + function_name = "${var.name_prefix}-${var.function_name}" + description = var.function_description + handler = "${var.handler_prefix}.lambda_handler" + runtime = var.python_version + source_path = "../../lambdas/${var.handler_prefix}/" + timeout = var.timeout + layers = var.layers + environment_variables = var.environment + vpc_subnet_ids = var.vpc_subnet_ids + vpc_security_group_ids = var.vpc_security_group_ids +} + +############### +# IAM Policy # +############### + +resource "aws_iam_role_policy_attachment" "vpc_access_execution" { + role = module.lambda_function.lambda_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" +} + +resource "aws_iam_role_policy_attachment" "lambda_to_cw_policy" { + role = module.lambda_function.lambda_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} + +resource "aws_iam_role_policy_attachment" "push_to_cloudwatch" { + role = module.lambda_function.lambda_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs" +} + +resource "aws_iam_role_policy_attachment" "sqs" { + role = module.lambda_function.lambda_role_name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaSQSQueueExecutionRole" +} diff --git a/infrastructure/modules/lambda/outputs.tf b/infrastructure/modules/lambda/outputs.tf new file mode 100644 index 0000000..3e6670b --- /dev/null +++ b/infrastructure/modules/lambda/outputs.tf @@ -0,0 +1,20 @@ + +output "function_name" { + value = module.lambda_function.lambda_function_name +} + +output "arn" { + value = module.lambda_function.lambda_function_invoke_arn +} + +output "role_name" { + value = module.lambda_function.lambda_role_name +} + +output "lambda_arn" { + value = module.lambda_function.lambda_function_arn +} + +output "lambda_log_group_name" { + value = "/aws/lambda/${module.lambda_function.lambda_function_name}" +} diff --git a/infrastructure/modules/lambda/readme.md b/infrastructure/modules/lambda/readme.md new file mode 100644 index 0000000..132300d --- /dev/null +++ b/infrastructure/modules/lambda/readme.md @@ -0,0 +1,53 @@ +# Lambda + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [lambda_function](#module_lambda_function) | terraform-aws-modules/lambda/aws | 8.7.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role_policy_attachment.lambda_to_cw_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.push_to_cloudwatch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.sqs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.vpc_access_execution](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [environment](#input_environment) | Values to set in the Lambda function environment | `map(string)` | `{}` | no | +| [function_description](#input_function_description) | The description for the Lambda function | `string` | n/a | yes | +| [function_name](#input_function_name) | The name of the Lambda function | `string` | `"uk-forwarder"` | no | +| [handler_prefix](#input_handler_prefix) | The prefix for the Lambda handler function | `string` | n/a | yes | +| [layers](#input_layers) | List of Lambda Layer ARNs to attach to the function | `list(string)` | `[]` | no | +| [name_prefix](#input_name_prefix) | the prefix standard | `string` | n/a | yes | +| [python_version](#input_python_version) | The Python version to use for the Lambda function | `string` | n/a | yes | +| [timeout](#input_timeout) | Timeout for the Lambda function in seconds | `number` | `120` | no | +| [vpc_security_group_ids](#input_vpc_security_group_ids) | List of VPC security group IDs for the Lambda function | `list(string)` | `[]` | no | +| [vpc_subnet_ids](#input_vpc_subnet_ids) | List of VPC subnet IDs for the Lambda function | `list(string)` | `[]` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output_arn) | n/a | +| [function_name](#output_function_name) | n/a | +| [lambda_arn](#output_lambda_arn) | n/a | +| [lambda_log_group_name](#output_lambda_log_group_name) | n/a | +| [role_name](#output_role_name) | n/a | + diff --git a/infrastructure/modules/lambda/variables.tf b/infrastructure/modules/lambda/variables.tf new file mode 100644 index 0000000..0a8b675 --- /dev/null +++ b/infrastructure/modules/lambda/variables.tf @@ -0,0 +1,55 @@ +variable "name_prefix" { + description = "the prefix standard" + type = string +} + +variable "function_name" { + description = "The name of the Lambda function" + type = string + default = "uk-forwarder" +} + +variable "python_version" { + description = "The Python version to use for the Lambda function" + type = string +} + +variable "handler_prefix" { + description = "The prefix for the Lambda handler function" + type = string +} + +variable "function_description" { + description = "The description for the Lambda function" + type = string +} + +variable "environment" { + description = "Values to set in the Lambda function environment" + type = map(string) + default = {} +} + +variable "layers" { + description = "List of Lambda Layer ARNs to attach to the function" + type = list(string) + default = [] +} + +variable "vpc_subnet_ids" { + description = "List of VPC subnet IDs for the Lambda function" + type = list(string) + default = [] +} + +variable "vpc_security_group_ids" { + description = "List of VPC security group IDs for the Lambda function" + type = list(string) + default = [] +} + +variable "timeout" { + description = "Timeout for the Lambda function in seconds" + type = number + default = 120 +} diff --git a/infrastructure/modules/parameter_store/main.tf b/infrastructure/modules/parameter_store/main.tf new file mode 100644 index 0000000..a8cb2a4 --- /dev/null +++ b/infrastructure/modules/parameter_store/main.tf @@ -0,0 +1,338 @@ +locals { + standard_cognito_users = jsonencode([ + { + "uuid" : "100000000001", + "bss_username" : "BSS_NO_RBAC", + "rbac_role" : "[]", + "id_assurance_level" : 3 + }, + { + "uuid" : "100000000002", + "bss_username" : "BSS_NO_ID_ASSURANCE", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 0 + }, + { + "uuid" : "555033739104", + "bss_username" : "BSS_USER1", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033740107", + "bss_username" : "BSS_USER2", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033741108", + "bss_username" : "BSS_USER3", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033742109", + "bss_username" : "BSS_USER4", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033743100", + "bss_username" : "BSS_USER5", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033744101", + "bss_username" : "BSS_USER6", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "555033745102", + "bss_username" : "BSS_USER7", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000001", + "bss_username" : "BSS_PERF1", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000002", + "bss_username" : "BSS_PERF2", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000003", + "bss_username" : "BSS_PERF3", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000004", + "bss_username" : "BSS_PERF4", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000005", + "bss_username" : "BSS_PERF5", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000006", + "bss_username" : "BSS_PERF6", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000007", + "bss_username" : "BSS_PERF7", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000008", + "bss_username" : "BSS_PERF8", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000009", + "bss_username" : "BSS_PERF9", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000010", + "bss_username" : "BSS_PERF10", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000011", + "bss_username" : "BSS_PERF11", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000012", + "bss_username" : "BSS_PERF12", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000013", + "bss_username" : "BSS_PERF13", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000014", + "bss_username" : "BSS_PERF14", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000015", + "bss_username" : "BSS_PERF15", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000016", + "bss_username" : "BSS_PERF16", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000017", + "bss_username" : "BSS_PERF17", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000018", + "bss_username" : "BSS_PERF18", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000019", + "bss_username" : "BSS_PERF19", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000020", + "bss_username" : "BSS_PERF20", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + } + ] + ) + + training_cognito_users = jsonencode([ + { + "uuid" : "000000000001", + "bss_username" : "BSS_UAT1", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000002", + "bss_username" : "BSS_UAT2", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000003", + "bss_username" : "BSS_UAT3", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000004", + "bss_username" : "BSS_UAT4", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000005", + "bss_username" : "BSS_UAT5", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000006", + "bss_username" : "BSS_UAT6", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000007", + "bss_username" : "BSS_UAT7", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000008", + "bss_username" : "BSS_UAT8", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000009", + "bss_username" : "BSS_UAT9", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "0000000000010", + "bss_username" : "BSS_UAT10", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000011", + "bss_username" : "BSS_UAT11", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000012", + "bss_username" : "BSS_UAT12", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000013", + "bss_username" : "BSS_UAT13", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000014", + "bss_username" : "BSS_UAT14", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000015", + "bss_username" : "BSS_READ1", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000016", + "bss_username" : "BSS_READ2", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000017", + "bss_username" : "BSS_READ3", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000018", + "bss_username" : "BSS_READ4", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000019", + "bss_username" : "BSS_READ5", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000020", + "bss_username" : "BSS_READ6", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000021", + "bss_username" : "JILJOB", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000022", + "bss_username" : "SUZWRI", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + }, + { + "uuid" : "000000000023", + "bss_username" : "TOMMYS", + "rbac_role" : "[{activities=[BS-Select], activity_codes=[B1808]}]", + "id_assurance_level" : 3 + } + ] + ) +} + +resource "aws_ssm_parameter" "cognito_users" { + # don't deploy cognito users in prod or col as not used + count = var.environment != "prod" && var.environment != "col" ? 1 : 0 + name = "/${var.name_prefix}/cognito/users" + type = "String" + value = var.environment == "training" ? local.training_cognito_users : local.standard_cognito_users +} + +# For cloudwatch agent configuration for ECS tasks +resource "aws_ssm_parameter" "ecs_cw_agent_config_parameter" { + count = var.enable_cloudwatch_agent ? 1 : 0 + name = "/${var.name_prefix}/ecs-cw-agent-config" + description = "CloudWatch Agent configuration for ECS tasks in the ${var.name_prefix} environment" + type = "String" + value = var.cloudwatch_agent_config_json +} diff --git a/infrastructure/modules/parameter_store/readme.md b/infrastructure/modules/parameter_store/readme.md new file mode 100644 index 0000000..0b40a0f --- /dev/null +++ b/infrastructure/modules/parameter_store/readme.md @@ -0,0 +1,39 @@ +# Parameter Store + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_ssm_parameter.cognito_users](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.ecs_cw_agent_config_parameter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cloudwatch_agent_config_json](#input_cloudwatch_agent_config_json) | The CloudWatch Agent configuration JSON for ECS tasks | `string` | `""` | no | +| [enable_cloudwatch_agent](#input_enable_cloudwatch_agent) | Whether to create the CloudWatch Agent configuration parameter for ECS tasks | `bool` | `false` | no | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/parameter_store/variables.tf b/infrastructure/modules/parameter_store/variables.tf new file mode 100644 index 0000000..6973d9a --- /dev/null +++ b/infrastructure/modules/parameter_store/variables.tf @@ -0,0 +1,25 @@ +#################################################################################### +# BSS COMMON +#################################################################################### + +variable "name_prefix" { + description = "The account, environment etc" + type = string +} + +variable "environment" { + description = "The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD" + type = string +} + +variable "enable_cloudwatch_agent" { + description = "Whether to create the CloudWatch Agent configuration parameter for ECS tasks" + type = bool + default = false +} + +variable "cloudwatch_agent_config_json" { + description = "The CloudWatch Agent configuration JSON for ECS tasks" + type = string + default = "" +} diff --git a/infrastructure/modules/ecs-cluster/readme b/infrastructure/modules/r53-healthcheck/lambda_function/__init__.py similarity index 100% rename from infrastructure/modules/ecs-cluster/readme rename to infrastructure/modules/r53-healthcheck/lambda_function/__init__.py diff --git a/infrastructure/modules/r53-healthcheck/lambda_function/bs-select-sns-forwarder.py b/infrastructure/modules/r53-healthcheck/lambda_function/bs-select-sns-forwarder.py new file mode 100644 index 0000000..d6cfdda --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/lambda_function/bs-select-sns-forwarder.py @@ -0,0 +1,14 @@ +import logging +import boto3 +import os + +from resources.sns_forwarder import SNSForwarder + + +def lambda_handler(event, context): + logging.getLogger().setLevel(logging.INFO) + logging.info("event : {}".format(event)) + sns_client = boto3.client("sns", region_name="eu-west-2") + target_topic_arn = os.environ["EU_WEST_2_SNS"] + sns_forwarder = SNSForwarder(sns_client, target_topic_arn) + return sns_forwarder.forward_message(event["Records"][0]["Sns"]["Message"]) diff --git a/infrastructure/modules/r53-healthcheck/lambda_function/resources/__init__.py b/infrastructure/modules/r53-healthcheck/lambda_function/resources/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/infrastructure/modules/r53-healthcheck/lambda_function/resources/sns_forwarder.py b/infrastructure/modules/r53-healthcheck/lambda_function/resources/sns_forwarder.py new file mode 100644 index 0000000..b70d109 --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/lambda_function/resources/sns_forwarder.py @@ -0,0 +1,49 @@ +import json +import logging +import boto3 + + +class SNSForwarder: + + sns_client = None + + ALARM_NAME = "AlarmName" + NEW_STATE_VALUE = "NewStateValue" + NEW_STATE_REASON = "NewStateReason" + + def __init__(self, sns_client, target_topic_arn): + self.sns_client = sns_client + self.target_topic_arn = target_topic_arn + logging.getLogger().setLevel(logging.INFO) + + def forward_message(self, msg): + data = self.extract_record(msg) + logging.info("data : {}".format(data)) + return self.send(data) + + def extract_record(self, msg): + data = {} + json_msg = json.loads(msg) + data[self.ALARM_NAME] = json_msg[self.ALARM_NAME] + data[self.NEW_STATE_VALUE] = json_msg[self.NEW_STATE_VALUE] + data[self.NEW_STATE_REASON] = json_msg[self.NEW_STATE_REASON] + + return data + + def send(self, data): + msg_to_send = { + self.ALARM_NAME: data[self.ALARM_NAME], + self.NEW_STATE_VALUE: data[self.NEW_STATE_VALUE], + self.NEW_STATE_REASON: data[self.NEW_STATE_REASON], + } + + resp = self.sns_client.publish( + TargetArn=self.target_topic_arn, + Message=json.dumps({"default": json.dumps(msg_to_send)}), + Subject=data[self.ALARM_NAME], + MessageStructure="json", + ) + logging.info("notification sent to SNS") + logging.info("SNS response : {}".format(resp)) + + return {"statusCode": 200, "body": json.dumps(resp)} diff --git a/infrastructure/modules/r53-healthcheck/lambda_function/tests/__init__.py b/infrastructure/modules/r53-healthcheck/lambda_function/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/infrastructure/modules/r53-healthcheck/lambda_function/tests/test_sns_forwarder.py b/infrastructure/modules/r53-healthcheck/lambda_function/tests/test_sns_forwarder.py new file mode 100644 index 0000000..16ea24f --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/lambda_function/tests/test_sns_forwarder.py @@ -0,0 +1,33 @@ +import unittest +import json + +from resources.sns_forwarder import SNSForwarder + + +class TestSNSForwarder(unittest.TestCase): + + def test_get_new_ticket_body(self): + sns_forwarder = SNSForwarder(None) + + mock_name = "mock state name" + mock_state_value = "mock state value" + mock_state_reason = "mock state reason" + mock_record = self.get_mock_record( + mock_name, mock_state_value, mock_state_reason + ) + data = sns_forwarder.extract_record(mock_record) + + self.assertEqual(data[SNSForwarder.ALARM_NAME], mock_name) + self.assertEqual(data[SNSForwarder.NEW_STATE_VALUE], mock_state_value) + self.assertEqual(data[SNSForwarder.NEW_STATE_REASON], mock_state_reason) + + ####HELPER METHODS######## + + def get_mock_record(self, alarm_name, state_value, state_reason): + return json.dumps( + { + SNSForwarder.ALARM_NAME: alarm_name, + SNSForwarder.NEW_STATE_VALUE: state_value, + SNSForwarder.NEW_STATE_REASON: state_reason, + } + ) diff --git a/infrastructure/modules/r53-healthcheck/locals.tf b/infrastructure/modules/r53-healthcheck/locals.tf new file mode 100644 index 0000000..a1cd685 --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/locals.tf @@ -0,0 +1,22 @@ +locals { + env_map = { + prod = { + fqdn = "en.bs-select.nhs.uk" + env = "prod" + }, + col = { + fqdn = "col.bs-select.nhs.uk" + env = "col" + }, + preprod = { + fqdn = "training.bs-select.nhs.uk" + env = "training" + } + integration = { + fqdn = "integration.bs-select.nhs.uk" + env = "integration" + } + } + fqdn = local.env_map[var.environment].fqdn + env = local.env_map[var.environment].env +} diff --git a/infrastructure/modules/rds-database/outputs.tf b/infrastructure/modules/r53-healthcheck/outputs.tf similarity index 50% rename from infrastructure/modules/rds-database/outputs.tf rename to infrastructure/modules/r53-healthcheck/outputs.tf index 139597f..8b13789 100644 --- a/infrastructure/modules/rds-database/outputs.tf +++ b/infrastructure/modules/r53-healthcheck/outputs.tf @@ -1,2 +1 @@ - diff --git a/infrastructure/modules/r53-healthcheck/r53-healthcheck.tf b/infrastructure/modules/r53-healthcheck/r53-healthcheck.tf new file mode 100644 index 0000000..c634499 --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/r53-healthcheck.tf @@ -0,0 +1,135 @@ +# ################################################################ +# # R53 Health Check +# ################################################################ + +resource "aws_route53_health_check" "bs_select_health_check_web_app" { + + fqdn = local.fqdn #Change to local.fqdn + port = 443 + type = "HTTPS" + resource_path = "/bss/health" + failure_threshold = "3" + request_interval = "30" + regions = ["eu-west-1", "us-east-1", "us-west-1"] + + tags = { + Name = "${var.name_prefix}-${local.env}-web-app" + } +} + +# ############################################################## +# # Forwarder SNS (us-east-1) +# ############################################################## +resource "aws_sns_topic" "forwarder_topic" { + provider = aws.us_east_1 + name = "${var.name_prefix}-${local.env}-r53-forwarder" +} + +# ############################################################## +# # Lambda Role +# ############################################################## +resource "aws_iam_role" "lambda_role" { + provider = aws.us_east_1 + name = "${var.name_prefix}-${local.env}-sns-forwarder-role" + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Principal = { Service = "lambda.amazonaws.com" }, + Action = "sts:AssumeRole" + }] + }) +} + +resource "aws_iam_role_policy_attachment" "basic_lambda" { + provider = aws.us_east_1 + role = aws_iam_role.lambda_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} + +resource "aws_iam_role_policy" "sns_publish_policy" { + provider = aws.us_east_1 + role = aws_iam_role.lambda_role.id + name = "${var.name_prefix}-${local.env}-sns-forwarder-policy" + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Effect = "Allow", + Action = ["sns:Publish"], + Resource = var.sns_topic + }] + }) +} + +# ############################################################## +# # Lambda function +# ############################################################## +data "archive_file" "sns_forwarder_zip" { + type = "zip" + source_dir = "${path.module}/lambda_function/" + output_path = "${path.module}/.terraform/archive_files/bs-select-sns-forwarder.zip" +} + +resource "aws_lambda_function" "sns_forwarder" { + provider = aws.us_east_1 + filename = data.archive_file.sns_forwarder_zip.output_path + function_name = "${var.name_prefix}-${local.env}-sns-forwarder" + handler = "bs-select-sns-forwarder.lambda_handler" + role = aws_iam_role.lambda_role.arn + runtime = "python3.12" + timeout = 120 + + environment { + variables = { + EU_WEST_2_SNS = var.sns_topic + } + } +} + +# ############################################################## +# # SNS subscription to Lambda +# ############################################################## +resource "aws_sns_topic_subscription" "forwarder_sub" { + provider = aws.us_east_1 + topic_arn = aws_sns_topic.forwarder_topic.arn + protocol = "lambda" + endpoint = aws_lambda_function.sns_forwarder.arn +} + +resource "aws_lambda_permission" "allow_sns" { + provider = aws.us_east_1 + statement_id = "AllowExecutionFromSNS" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.sns_forwarder.arn + principal = "sns.amazonaws.com" + source_arn = aws_sns_topic.forwarder_topic.arn +} + + +# ############################################################## +# # Route53 Health Checks Cloud Watch Alarm +# ############################################################## + +resource "aws_cloudwatch_metric_alarm" "bs_select_health_check_web_app_healthy" { + provider = aws.us_east_1 + alarm_name = "${var.name_prefix}-${local.env}-web-app-healthy" + namespace = "AWS/Route53" + metric_name = "HealthCheckStatus" + comparison_operator = "LessThanThreshold" + evaluation_periods = "1" + period = "60" + statistic = "Minimum" + threshold = "1" + unit = "None" + dimensions = { + HealthCheckId = aws_route53_health_check.bs_select_health_check_web_app.id + } + alarm_description = "When in alarm, send message to topic ${aws_sns_topic.forwarder_topic.arn}" + alarm_actions = [aws_sns_topic.forwarder_topic.arn] + ok_actions = [aws_sns_topic.forwarder_topic.arn] + insufficient_data_actions = [] + treat_missing_data = "notBreaching" +} + + diff --git a/infrastructure/modules/r53-healthcheck/readme.md b/infrastructure/modules/r53-healthcheck/readme.md new file mode 100644 index 0000000..360de83 --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/readme.md @@ -0,0 +1,46 @@ +# Route 53 Health Check + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [archive](#provider_archive) | n/a | +| [aws](#provider_aws) | n/a | +| [aws.us_east_1](#provider_aws.us_east_1) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_metric_alarm.bs_select_health_check_web_app_healthy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource | +| [aws_iam_role.lambda_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.sns_publish_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [aws_iam_role_policy_attachment.basic_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_lambda_function.sns_forwarder](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | +| [aws_lambda_permission.allow_sns](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | +| [aws_route53_health_check.bs_select_health_check_web_app](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_health_check) | resource | +| [aws_sns_topic.forwarder_topic](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic) | resource | +| [aws_sns_topic_subscription.forwarder_sub](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_subscription) | resource | +| [archive_file.sns_forwarder_zip](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [environment](#input_environment) | the environment the healthcheck is deployed into | `any` | n/a | yes | +| [name_prefix](#input_name_prefix) | the name prefix for the healthcheck | `any` | n/a | yes | +| [sns_topic](#input_sns_topic) | Existing SNS topic in eu-west-2 for notifications | `string` | n/a | yes | + +## Outputs + +No outputs. + diff --git a/infrastructure/modules/r53-healthcheck/variables.tf b/infrastructure/modules/r53-healthcheck/variables.tf new file mode 100644 index 0000000..74b7d04 --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/variables.tf @@ -0,0 +1,13 @@ +variable "environment" { + description = "the environment the healthcheck is deployed into" +} + +variable "name_prefix" { + description = "the name prefix for the healthcheck" +} + + +variable "sns_topic" { + type = string + description = "Existing SNS topic in eu-west-2 for notifications" +} diff --git a/infrastructure/modules/r53-healthcheck/versions.tf b/infrastructure/modules/r53-healthcheck/versions.tf new file mode 100644 index 0000000..fcf43ff --- /dev/null +++ b/infrastructure/modules/r53-healthcheck/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + configuration_aliases = [aws.us_east_1] + } + } +} diff --git a/infrastructure/modules/rds-database/main.tf b/infrastructure/modules/rds-database/main.tf index c86e462..d10db39 100644 --- a/infrastructure/modules/rds-database/main.tf +++ b/infrastructure/modules/rds-database/main.tf @@ -37,7 +37,7 @@ provider "postgresql" { } resource "postgresql_database" "my_db" { - name = "${var.name_prefix}-${var.db_name}" + name = var.db_name owner = "release_manager" lc_collate = "C" connection_limit = -1 diff --git a/infrastructure/modules/rds-database/readme.md b/infrastructure/modules/rds-database/readme.md new file mode 100644 index 0000000..22eb3cf --- /dev/null +++ b/infrastructure/modules/rds-database/readme.md @@ -0,0 +1,44 @@ +# RDS-Database + + + +## Requirements + +| Name | Version | +|------|---------| +| [postgresql](#requirement_postgresql) | >= 1.25.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [postgresql](#provider_postgresql) | >= 1.25.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [postgresql_database.my_db](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/database) | resource | +| [aws_db_instance.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/db_instance) | data source | +| [aws_secretsmanager_secret.release_manager_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.release_manager_password_version](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [db_name](#input_db_name) | the name for the users database | `string` | n/a | yes | +| [environment](#input_environment) | the environment the resource is deployed into | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | The name prefix which includes environment and region details | `string` | n/a | yes | +| [rds_name](#input_rds_name) | the name of the service | `string` | `"postgres"` | no | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/rds-gateway-ecs-task/main.tf b/infrastructure/modules/rds-gateway-ecs-task/main.tf new file mode 100644 index 0000000..e541119 --- /dev/null +++ b/infrastructure/modules/rds-gateway-ecs-task/main.tf @@ -0,0 +1,148 @@ +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ecs-tasks.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "ecs_task_role" { + name = trimsuffix(substr("${var.name_prefix}-rds-access-gateway-ecs-task", 0, 64), "-") + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role" "ecs_execution_role" { + # limit to 64 characters and trim any trailing hyphen + name = trimsuffix(substr("${var.name_prefix}-rds-access-gateway-ecs-execution", 0, 64), "-") + assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "ssm" { + role = aws_iam_role.ecs_task_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +resource "aws_iam_policy" "ecs_execution_role_policy" { + name = "${var.name_prefix}-rds-access-gateway-ecs-execution" + description = "Policy for ${var.name_prefix} ECS execution" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:aws:logs:eu-west-2:${var.aws_account_id}:log-group:/ecs/${var.name_prefix}-rds-access-gateway-ecs-task*" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "execution_role" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = aws_iam_policy.ecs_execution_role_policy.arn +} + + +resource "aws_ecs_service" "ecs_service" { + name = "${var.name_prefix}-rds-access-gateway" + cluster = var.ecs_cluster_name + task_definition = aws_ecs_task_definition.task_definition.arn + launch_type = "FARGATE" + scheduling_strategy = "REPLICA" + desired_count = var.replica_task_count + enable_execute_command = true + + network_configuration { + subnets = var.private_subnet_ids + assign_public_ip = false + security_groups = [ + aws_security_group.ecs_task_sg.id + ] + } +} + +resource "aws_ecs_task_definition" "task_definition" { + family = "${var.name_prefix}-rds-access-gateway" + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = "512" + memory = "1024" + execution_role_arn = aws_iam_role.ecs_execution_role.arn + task_role_arn = aws_iam_role.ecs_task_role.arn + container_definitions = jsonencode( + [ + { + "name" : "${var.name_prefix}-rds-access-gateway", + "image" : "${var.image_name}", + "essential" : true, + "command" : ["sleep", "infinity"], + "readonlyRootFilesystem" : true, + "environment" : [], + "logConfiguration" : { + "logDriver" : "awslogs", + "options" : { + "awslogs-group" : aws_cloudwatch_log_group.log_group.name, + "awslogs-region" : "eu-west-2", + "awslogs-stream-prefix" : "ecs" + } + }, + "networkMode" : "awsvpc", + "linuxParameters" : { + # temporary filesystem for SSM agent so these directories are writable + # size is in MiB and uses task memory + "tmpfs" : [ + { + "containerPath" : "/var/log/amazon", + "size" : 200, + "mountOptions" : ["noexec", "nosuid", "nodev"] + }, + { + "containerPath" : "/var/lib/amazon", + "size" : 200, + "mountOptions" : ["noexec", "nosuid", "nodev"] + } + ] + } + } + ] + ) + depends_on = [aws_cloudwatch_log_group.log_group] +} + +resource "aws_cloudwatch_log_group" "log_group" { + name = "/ecs/${var.name_prefix}-rds-access-gateway-ecs-task" + retention_in_days = 14 +} + +resource "aws_security_group" "ecs_task_sg" { + name = "${var.name_prefix}-rds-access-gateway-ecs-task" + description = "Allow rds-access-gateway connections" + vpc_id = var.vpc_id +} + +resource "aws_security_group_rule" "allow_all_outbound" { + description = "Allow outbound traffic" + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = aws_security_group.ecs_task_sg.id +} + +# Allow traffic in to the RDS SG from the ec2 instance +resource "aws_security_group_rule" "rds_ingress" { + type = "ingress" + from_port = 5432 + to_port = 5432 + protocol = "tcp" + security_group_id = var.rds_sg_id + source_security_group_id = aws_security_group.ecs_task_sg.id + description = "Allow access in from the rds-access-gateway ecs task instance" +} diff --git a/infrastructure/modules/rds-gateway-ecs-task/outputs.tf b/infrastructure/modules/rds-gateway-ecs-task/outputs.tf new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/infrastructure/modules/rds-gateway-ecs-task/outputs.tf @@ -0,0 +1 @@ + diff --git a/infrastructure/modules/rds-gateway-ecs-task/readme.md b/infrastructure/modules/rds-gateway-ecs-task/readme.md new file mode 100644 index 0000000..54f2f8d --- /dev/null +++ b/infrastructure/modules/rds-gateway-ecs-task/readme.md @@ -0,0 +1,52 @@ +# rds-gateway-ecs-task + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_log_group.log_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_ecs_service.ecs_service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service) | resource | +| [aws_ecs_task_definition.task_definition](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_task_definition) | resource | +| [aws_iam_policy.ecs_execution_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.ecs_execution_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.ecs_task_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.execution_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_security_group.ecs_task_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.allow_all_outbound](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.rds_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws_account_id](#input_aws_account_id) | The aws account id | `string` | n/a | yes | +| [ecs_cluster_name](#input_ecs_cluster_name) | The ECS cluster name | `string` | n/a | yes | +| [image_name](#input_image_name) | The image name for the ECS task | `string` | `"public.ecr.aws/docker/library/busybox:stable"` | no | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [private_subnet_ids](#input_private_subnet_ids) | List of private subnet IDs | `list(string)` | n/a | yes | +| [rds_sg_id](#input_rds_sg_id) | The security group ID of the RDS instance | `string` | n/a | yes | +| [replica_task_count](#input_replica_task_count) | The number of task replicas to run | `number` | `1` | no | +| [vpc_id](#input_vpc_id) | id of the vpc | `string` | n/a | yes | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/rds-gateway-ecs-task/variables.tf b/infrastructure/modules/rds-gateway-ecs-task/variables.tf new file mode 100644 index 0000000..39a1dbf --- /dev/null +++ b/infrastructure/modules/rds-gateway-ecs-task/variables.tf @@ -0,0 +1,44 @@ +#################################################################################### +# BSS COMMON +#################################################################################### +variable "name_prefix" { + description = "The account, environment etc" + type = string +} + +variable "image_name" { + description = "The image name for the ECS task" + type = string + default = "public.ecr.aws/docker/library/busybox:stable" +} + +variable "vpc_id" { + description = "id of the vpc" + type = string +} + +variable "private_subnet_ids" { + description = "List of private subnet IDs" + type = list(string) +} + +variable "rds_sg_id" { + description = "The security group ID of the RDS instance" + type = string +} + +variable "aws_account_id" { + description = "The aws account id" + type = string +} + +variable "ecs_cluster_name" { + description = "The ECS cluster name" + type = string +} + +variable "replica_task_count" { + description = "The number of task replicas to run" + type = number + default = 1 +} diff --git a/infrastructure/modules/rds-instance/main.tf b/infrastructure/modules/rds-instance/main.tf index 6dfa4fd..200bf75 100644 --- a/infrastructure/modules/rds-instance/main.tf +++ b/infrastructure/modules/rds-instance/main.tf @@ -7,47 +7,39 @@ terraform { } } -provider "postgresql" { - host = aws_db_instance.rds.address - username = "postgres" - password = random_password.password["postgres"].result - superuser = false - expected_version = var.rds_engine_version -} - resource "random_password" "password" { - for_each = toset(var.users) length = 20 special = true override_special = "!%^*-_+=" } resource "aws_secretsmanager_secret" "password" { - for_each = toset(var.users) - name = "${var.name_prefix}-${each.key}" + name = "${var.name_prefix}-${var.user}" + recovery_window_in_days = var.recovery_window + + dynamic "replica" { + for_each = var.secret_replication_regions + content { + region = replica.value + } + } } resource "aws_secretsmanager_secret_version" "password" { - for_each = toset(var.users) - secret_id = aws_secretsmanager_secret.password[each.key].id + secret_id = aws_secretsmanager_secret.password.id secret_string = jsonencode({ - user = each.key - password = random_password.password[each.key].result + user = var.user + password = random_password.password.result }) } - -# locals { -# subnet_ids = var.environment == "cicd" ? data.aws_subnets.public_subnets.ids : data.aws_subnets.private_subnets.ids -# } - -resource "aws_db_subnet_group" "bss" { - name = "rds_subnet_group" - subnet_ids = var.subnet_ids +resource "aws_db_subnet_group" "private_bss" { + name = "${var.name_prefix}-rds-private-${var.name}" + subnet_ids = var.private_subnet_ids } resource "aws_db_parameter_group" "parameter_group" { - name = var.name + name = "${var.name_prefix}-${var.name}-${var.rds_engine_version}" family = "postgres${var.rds_engine_version}" parameter { @@ -90,9 +82,13 @@ resource "aws_db_parameter_group" "parameter_group" { # Auditing all user insert,update,delete and ddl parameter { name = "log_statement" - value = "mod" # Logs INSERT, UPDATE, DELETE, DDL only + value = "none" apply_method = "pending-reboot" } + parameter { + name = "log_min_duration_statement" + value = "1000" + } parameter { name = "log_destination" @@ -111,43 +107,58 @@ resource "aws_db_parameter_group" "parameter_group" { value = "1" apply_method = "pending-reboot" } + + lifecycle { + create_before_destroy = true + } } resource "aws_db_instance" "rds" { - identifier = "${var.name_prefix}-${var.name}" - instance_class = var.rds_instance_class - engine = var.rds_engine - engine_version = var.rds_engine_version - username = "postgres" - password = random_password.password["postgres"].result - db_subnet_group_name = aws_db_subnet_group.bss.id - allocated_storage = var.storage - iops = var.storage >= 400 ? var.iops : null # Sets iops to null if storage is less than 400 - storage_encrypted = var.encryption - storage_type = var.storage_type - multi_az = var.multi_az - parameter_group_name = aws_db_parameter_group.parameter_group.id - skip_final_snapshot = var.skip_final_snapshot - monitoring_interval = var.monitoring_interval - monitoring_role_arn = aws_iam_role.enhanced_monitoring.arn - performance_insights_enabled = var.performance_insights_enabled - port = var.port - maintenance_window = var.maintenance_window - backup_window = var.backup_window - backup_retention_period = var.backup_retention_period - final_snapshot_identifier = "final-${random_id.final_name.hex}" - publicly_accessible = var.publicly_accessible - auto_minor_version_upgrade = var.auto_minor_version_upgrade - copy_tags_to_snapshot = var.copy_tags_to_snapshot - apply_immediately = var.apply_immediately - deletion_protection = var.deletion_protection - allow_major_version_upgrade = var.allow_major_version_upgrade - enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports - vpc_security_group_ids = [aws_security_group.bss.id] + identifier = "${var.name_prefix}-${var.name}" + instance_class = var.rds_instance_class + engine = var.rds_engine + engine_version = var.rds_engine_version + username = "postgres" + password = random_password.password.result + db_subnet_group_name = aws_db_subnet_group.private_bss.id + allocated_storage = var.storage + iops = var.storage >= 400 ? var.iops : null # Sets iops to null if storage is less than 400 + storage_encrypted = var.encryption + storage_type = var.storage_type + multi_az = var.multi_az + parameter_group_name = aws_db_parameter_group.parameter_group.id + skip_final_snapshot = var.skip_final_snapshot + monitoring_interval = var.monitoring_interval + monitoring_role_arn = aws_iam_role.enhanced_monitoring.arn + performance_insights_enabled = var.performance_insights_enabled + performance_insights_retention_period = var.performance_insights_retention_period + database_insights_mode = var.database_insights_mode + port = var.port + maintenance_window = var.maintenance_window + backup_window = var.backup_window + backup_retention_period = var.backup_retention_period + final_snapshot_identifier = "final-${random_id.final_name.hex}" + publicly_accessible = var.publicly_accessible + auto_minor_version_upgrade = var.auto_minor_version_upgrade + copy_tags_to_snapshot = var.copy_tags_to_snapshot + apply_immediately = var.apply_immediately + deletion_protection = var.deletion_protection + allow_major_version_upgrade = var.allow_major_version_upgrade + enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports # log group is created automatically by aws_db_instance, no need to create separately + vpc_security_group_ids = [aws_security_group.rds.id] + tags = var.tags + + snapshot_identifier = var.snapshot_identifier != "" ? var.snapshot_identifier : null + + lifecycle { + ignore_changes = [ + snapshot_identifier + ] + } } resource "aws_iam_role" "enhanced_monitoring" { - name = "rds-enhanced-monitoring-role" + name = "${var.name_prefix}-rds-enhanced-monitoring" description = "Role for RDS Enhanced Monitoring" assume_role_policy = jsonencode({ @@ -165,7 +176,7 @@ resource "aws_iam_role" "enhanced_monitoring" { } resource "aws_iam_policy" "enhanced_monitoring" { - name = "rds-enhanced-monitoring-policy" + name = "${var.name_prefix}-rds-enhanced-monitoring" description = "Policy for RDS Enhanced Monitoring" policy = jsonencode({ @@ -190,20 +201,20 @@ resource "aws_iam_role_policy_attachment" "enhanced_monitoring" { policy_arn = aws_iam_policy.enhanced_monitoring.arn } -resource "aws_security_group" "bss" { - name = "rds-${var.name}" +resource "aws_security_group" "rds" { + name = "${var.name_prefix}-rds-${var.name}" description = "Allow connection by appointed rds postgres clients" vpc_id = var.vpc_id } -resource "aws_security_group_rule" "bss_ingress" { - type = "ingress" - from_port = var.port - to_port = var.port - protocol = "tcp" - security_group_id = aws_security_group.bss.id - cidr_blocks = var.ingress_cidr - description = "Allow access to rds postgres" +resource "aws_security_group_rule" "ecs_ingress" { + type = "ingress" + from_port = var.port + to_port = var.port + protocol = "tcp" + security_group_id = aws_security_group.rds.id + source_security_group_id = var.ecs_sg_id + description = "Allow ecs access to rds postgres" } resource "random_string" "final-name" { @@ -222,71 +233,3 @@ resource "random_string" "final-name" { resource "random_id" "final_name" { byte_length = 1 } - -# ROLES - -resource "postgresql_role" "release_manager_role" { - name = "release_manager" - login = true - password = random_password.password["release_manager"].result - encrypted_password = true - create_database = true - inherit = true - provider = postgresql - search_path = ["$user", "extn_pgtap", "extn_dblink", "extn_postgres_fdw", "extn_file_fdw", "audit", "bss_audit", "bss", - "bss_migration", "bss_kc63", "bss_sspi", "bss_cspna", "bss_integrity", "bss_support", "pi_4", "bss_reports"] - depends_on = [aws_db_instance.rds] -} - -resource "postgresql_role" "audit_user_role" { - name = "audit_user" - login = true - password = random_password.password["audit_user"].result - encrypted_password = true - create_database = false - inherit = true - provider = postgresql - search_path = ["audit"] - depends_on = [aws_db_instance.rds] -} - -resource "postgresql_role" "bss_readonly_role" { - name = "bss_readonly" - login = false - create_database = false - inherit = true - provider = postgresql - depends_on = [aws_db_instance.rds] -} -resource "postgresql_role" "bss_readwrite_role" { - name = "bss_readwrite" - login = false - create_database = false - inherit = true - provider = postgresql - depends_on = [aws_db_instance.rds] -} - -resource "postgresql_role" "bss_user_role" { - name = "bss_user" - login = true - password = random_password.password["bss_user"].result - create_database = false - inherit = true - provider = postgresql - search_path = ["$user", "extn_pgtap", "extn_dblink", "extn_postgres_fdw", "extn_file_fdw", "audit", "bss_audit", "bss", - "bss_migration", "bss_kc63", "bss_sspi", "bss_cspna", "bss_integrity", "bss_support"] - depends_on = [aws_db_instance.rds] -} - -resource "postgresql_role" "pi_4_user_role" { - name = "pi_4_user" - login = true - password = random_password.password["pi_4_user"].result - create_database = false - inherit = true - provider = postgresql - search_path = ["pi_4"] - depends_on = [aws_db_instance.rds] -} - diff --git a/infrastructure/modules/rds-instance/outputs.tf b/infrastructure/modules/rds-instance/outputs.tf index 26a21e8..6f111d3 100644 --- a/infrastructure/modules/rds-instance/outputs.tf +++ b/infrastructure/modules/rds-instance/outputs.tf @@ -17,7 +17,12 @@ output "rds_name" { value = aws_db_instance.rds.identifier } -output "bss_user_secret_arn" { - value = aws_secretsmanager_secret.password["bss_user"].arn +output "rds_instance_id" { + value = aws_db_instance.rds.id + description = "The ID of the RDS instance" } +output "rds_sg_id" { + value = aws_security_group.rds.id + description = "The security group ID for the RDS instance" +} diff --git a/infrastructure/modules/rds-instance/readme.md b/infrastructure/modules/rds-instance/readme.md new file mode 100644 index 0000000..068e8aa --- /dev/null +++ b/infrastructure/modules/rds-instance/readme.md @@ -0,0 +1,100 @@ +# RDS-Instance + + + +## Requirements + +| Name | Version | +|------|---------| +| [postgresql](#requirement_postgresql) | >= 1.25.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [random](#provider_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_db_instance.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance) | resource | +| [aws_db_parameter_group.parameter_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_parameter_group) | resource | +| [aws_db_subnet_group.private_bss](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_subnet_group) | resource | +| [aws_iam_policy.enhanced_monitoring](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.enhanced_monitoring](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.enhanced_monitoring](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_secretsmanager_secret.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) | resource | +| [aws_secretsmanager_secret_version.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource | +| [aws_security_group.rds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.ecs_ingress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [random_id.final_name](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) | resource | +| [random_password.password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [random_string.final-name](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [allocated_storage](#input_allocated_storage) | The amount of storage to allocate to the database in GB | `number` | `50` | no | +| [allow_major_version_upgrade](#input_allow_major_version_upgrade) | Whether to allow major version upgrades to the database | `bool` | `false` | no | +| [apply_immediately](#input_apply_immediately) | Whether to apply changes to the database immediately | `bool` | `true` | no | +| [auto_minor_version_upgrade](#input_auto_minor_version_upgrade) | Whether to automatically upgrade the database to the latest minor version | `bool` | `true` | no | +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [aws_secret_id](#input_aws_secret_id) | The name of the secret that holds the postgresql login details | `string` | n/a | yes | +| [backup_retention_period](#input_backup_retention_period) | The number of days to retain automated backups for | `number` | `4` | no | +| [backup_window](#input_backup_window) | The time window to perform automated backups in UTC (HH:MM-HH:MM) | `string` | `"01:00-02:00"` | no | +| [cloudwatch_log_retention_days](#input_cloudwatch_log_retention_days) | Number of days to retain CloudWatch logs | `number` | `7` | no | +| [copy_tags_to_snapshot](#input_copy_tags_to_snapshot) | Whether to copy tags to database snapshots | `bool` | `true` | no | +| [database_insights_mode](#input_database_insights_mode) | Whether to set database insights mode to standard or advanced | `string` | n/a | yes | +| [db_max_connections](#input_db_max_connections) | how many connections are allowed | `number` | `5000` | no | +| [db_storage_encryption](#input_db_storage_encryption) | Whether the database storage should be encrypted | `bool` | `true` | no | +| [deletion_protection](#input_deletion_protection) | Whether to enable deletion protection for the database | `bool` | `false` | no | +| [ecs_sg_id](#input_ecs_sg_id) | The security group ID for the ECS service | `string` | n/a | yes | +| [enable_backup](#input_enable_backup) | Whether to enable automated backups for the database | `bool` | `false` | no | +| [enabled_cloudwatch_logs_exports](#input_enabled_cloudwatch_logs_exports) | Which logs should be exported | `list(string)` |
[
"postgresql"
]
| no | +| [encryption](#input_encryption) | If encryption should be enabled | `bool` | `true` | no | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [iops](#input_iops) | specify the provisioned IOPS, cannot be used if gp3 storage allocation is below 400 | `number` | `3000` | no | +| [is_temporary_shutdown](#input_is_temporary_shutdown) | Whether the database is in a temporary shutdown state (not a standard AWS attribute) | `bool` | `false` | no | +| [maintenance_window](#input_maintenance_window) | The time window to perform maintenance on the database in UTC (Day:HH:MM-Day:HH:MM) | `string` | `"Tue:02:30-Tue:03:30"` | no | +| [monitoring_interval](#input_monitoring_interval) | The interval in seconds to monitor the database | `number` | `10` | no | +| [multi_az](#input_multi_az) | Whether to deploy the database in multiple Availability Zones | `bool` | `true` | no | +| [name](#input_name) | The name of the resource | `any` | n/a | yes | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [performance_insights_enabled](#input_performance_insights_enabled) | Whether to enable Performance Insights for the database | `bool` | `false` | no | +| [performance_insights_retention_period](#input_performance_insights_retention_period) | The number of days to retain Performance Insights data for | `number` | `7` | no | +| [port](#input_port) | The port the database will listen on | `number` | `5432` | no | +| [private_subnet_ids](#input_private_subnet_ids) | A list of private subnets to use | `list(string)` | n/a | yes | +| [publicly_accessible](#input_publicly_accessible) | Whether the database is publicly accessible | `bool` | `false` | no | +| [rds_engine](#input_rds_engine) | The engine for the RDS instance | `string` | `"postgres"` | no | +| [rds_engine_version](#input_rds_engine_version) | The engine version for the RDS instance | `string` | `"16"` | no | +| [rds_instance_class](#input_rds_instance_class) | The instance class for the RDS instance | `string` | n/a | yes | +| [recovery_window](#input_recovery_window) | The number of days that credentials should be retained for | `number` | n/a | yes | +| [secret_replication_regions](#input_secret_replication_regions) | List of additional regions where created secrets should be replicated | `list(string)` | n/a | yes | +| [skip_final_snapshot](#input_skip_final_snapshot) | Should there be a snapshot taken when instance destroyed | `bool` | `false` | no | +| [snapshot_identifier](#input_snapshot_identifier) | Optional snapshot identifier to restore from (e.g. if on performance environent) | `string` | `""` | no | +| [storage](#input_storage) | The storage size for the instance | `string` | `100` | no | +| [storage_type](#input_storage_type) | The type of storage used, options are 'standard', 'gp2', 'gp3', 'io1' or 'io2' | `string` | `"gp3"` | no | +| [tags](#input_tags) | A map of tags to assign to the RDS instance in addition to the default tags | `map(string)` | `{}` | no | +| [user](#input_user) | username for postgres instance to use | `string` | `"postgres"` | no | +| [vpc_id](#input_vpc_id) | The id for the vpc | `string` | n/a | yes | +| [vpc_name](#input_vpc_name) | vpc name | `string` | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [rds_instance_address](#output_rds_instance_address) | Endpoint of the instance excluding port | +| [rds_instance_arn](#output_rds_instance_arn) | The ARN of the RDS instance | +| [rds_instance_endpoint](#output_rds_instance_endpoint) | The endpoint of the RDS instance including port | +| [rds_instance_id](#output_rds_instance_id) | The ID of the RDS instance | +| [rds_name](#output_rds_name) | n/a | +| [rds_sg_id](#output_rds_sg_id) | The security group ID for the RDS instance | + + diff --git a/infrastructure/modules/rds-instance/variables.tf b/infrastructure/modules/rds-instance/variables.tf index c4856e4..b541fd3 100644 --- a/infrastructure/modules/rds-instance/variables.tf +++ b/infrastructure/modules/rds-instance/variables.tf @@ -16,7 +16,7 @@ variable "rds_engine" { variable "rds_engine_version" { type = string description = "The engine version for the RDS instance" - default = "12.5" + default = "16" } variable "aws_secret_id" { @@ -102,6 +102,12 @@ variable "performance_insights_enabled" { default = false } +variable "performance_insights_retention_period" { + description = "The number of days to retain Performance Insights data for" + type = number + default = 7 +} + variable "enable_backup" { description = "Whether to enable automated backups for the database" type = bool @@ -141,7 +147,7 @@ variable "apply_immediately" { variable "allow_major_version_upgrade" { description = "Whether to allow major version upgrades to the database" type = bool - default = true + default = false } variable "multi_az" { @@ -168,9 +174,10 @@ variable "enabled_cloudwatch_logs_exports" { default = ["postgresql"] } -variable "ingress_cidr" { - description = "a list of the cidr's that can access the postgresql instance" - type = list(string) +variable "cloudwatch_log_retention_days" { + description = "Number of days to retain CloudWatch logs" + type = number + default = 7 } variable "name_prefix" { @@ -195,14 +202,14 @@ variable "vpc_name" { default = "" } -variable "users" { - description = "List of usernames to generate passwords and secrets for" - type = list(string) - default = ["pi_4_user", "bss_user", "bss_readwrite", "bss_readonly", "audit_user", "release_manager", "postgres"] +variable "user" { + description = "username for postgres instance to use" + type = string + default = "postgres" } -variable "subnet_ids" { - description = "A list of subnets to use" +variable "private_subnet_ids" { + description = "A list of private subnets to use" type = list(string) } @@ -210,3 +217,35 @@ variable "vpc_id" { description = "The id for the vpc" type = string } + +variable "ecs_sg_id" { + description = "The security group ID for the ECS service" + type = string +} + +variable "recovery_window" { + description = "The number of days that credentials should be retained for" + type = number +} + +variable "secret_replication_regions" { + description = "List of additional regions where created secrets should be replicated" + type = list(string) +} + +variable "snapshot_identifier" { + description = "Optional snapshot identifier to restore from (e.g. if on performance environent)" + type = string + default = "" +} + +variable "database_insights_mode" { + description = "Whether to set database insights mode to standard or advanced" + type = string +} + +variable "tags" { + description = "A map of tags to assign to the RDS instance in addition to the default tags" + type = map(string) + default = {} +} diff --git a/infrastructure/modules/rds-users/main.tf b/infrastructure/modules/rds-users/main.tf new file mode 100644 index 0000000..ad69f61 --- /dev/null +++ b/infrastructure/modules/rds-users/main.tf @@ -0,0 +1,106 @@ +terraform { + required_providers { + postgresql = { + source = "cyrilgdn/postgresql" + version = ">= 1.25.0" + } + } +} + +provider "postgresql" { + host = var.rds_endpoint + username = "postgres" + password = var.rds_password + superuser = false + expected_version = var.rds_engine_version +} + +resource "random_password" "password" { + for_each = toset(var.users) + length = 20 + special = true + override_special = "!%^*-_+=" +} + +resource "aws_secretsmanager_secret" "password" { + for_each = toset(var.users) + name = "${var.name_prefix}-${each.key}" + recovery_window_in_days = var.recovery_window + + dynamic "replica" { + for_each = var.secret_replication_regions + content { + region = replica.value + } + } +} + +resource "aws_secretsmanager_secret_version" "password" { + for_each = toset(var.users) + secret_id = aws_secretsmanager_secret.password[each.key].id + secret_string = jsonencode({ + user = each.key + password = random_password.password[each.key].result + }) +} + +# ROLES + +resource "postgresql_role" "release_manager_role" { + name = "release_manager" + login = true + password = random_password.password["release_manager"].result + encrypted_password = true + create_database = true + inherit = true + provider = postgresql + search_path = ["$user", "extn_pgtap", "extn_dblink", "extn_postgres_fdw", "extn_file_fdw", "audit", "bss_audit", "bss", + "bss_migration", "bss_kc63", "bss_sspi", "bss_cspna", "bss_integrity", "bss_support", "pi_4", "bss_reports"] +} + +resource "postgresql_role" "audit_user_role" { + name = "audit_user" + login = true + password = random_password.password["audit_user"].result + encrypted_password = true + create_database = false + inherit = true + provider = postgresql + search_path = ["audit"] +} + +resource "postgresql_role" "bss_readonly_role" { + name = "bss_readonly" + login = false + create_database = false + inherit = true + provider = postgresql +} +resource "postgresql_role" "bss_readwrite_role" { + name = "bss_readwrite" + login = false + create_database = false + inherit = true + provider = postgresql +} + +resource "postgresql_role" "bss_user_role" { + name = "bss_user" + login = true + password = random_password.password["bss_user"].result + create_database = false + inherit = true + provider = postgresql + search_path = ["$user", "extn_pgtap", "extn_dblink", "extn_postgres_fdw", "extn_file_fdw", "audit", "bss_audit", "bss", "bss_migration", "bss_kc63", "bss_sspi", "bss_cspna", "bss_integrity", "bss_support"] +} + +resource "postgresql_role" "pi_4_user_role" { + name = "pi_4_user" + login = true + password = random_password.password["pi_4_user"].result + create_database = false + inherit = true + provider = postgresql + search_path = ["pi_4"] +} + diff --git a/infrastructure/modules/rds-users/outputs.tf b/infrastructure/modules/rds-users/outputs.tf new file mode 100644 index 0000000..0d6bbf1 --- /dev/null +++ b/infrastructure/modules/rds-users/outputs.tf @@ -0,0 +1,3 @@ +output "bss_user_secret_arn" { + value = aws_secretsmanager_secret.password["bss_user"].arn +} diff --git a/infrastructure/modules/rds-users/readme.md b/infrastructure/modules/rds-users/readme.md new file mode 100644 index 0000000..92f43ba --- /dev/null +++ b/infrastructure/modules/rds-users/readme.md @@ -0,0 +1,55 @@ +# RDS-Instance + + + +## Requirements + +| Name | Version | +|------|---------| +| [postgresql](#requirement_postgresql) | >= 1.25.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [postgresql](#provider_postgresql) | >= 1.25.0 | +| [random](#provider_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_secretsmanager_secret.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) | resource | +| [aws_secretsmanager_secret_version.password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource | +| [postgresql_role.audit_user_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [postgresql_role.bss_readonly_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [postgresql_role.bss_readwrite_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [postgresql_role.bss_user_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [postgresql_role.pi_4_user_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [postgresql_role.release_manager_role](https://registry.terraform.io/providers/cyrilgdn/postgresql/latest/docs/resources/role) | resource | +| [random_password.password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [rds_endpoint](#input_rds_endpoint) | The endpoint to connect to the rds instance | `string` | n/a | yes | +| [rds_engine_version](#input_rds_engine_version) | The engine version for the RDS instance | `string` | `"12.5"` | no | +| [rds_password](#input_rds_password) | the password to login to rds with | `string` | n/a | yes | +| [recovery_window](#input_recovery_window) | The number of days that credentials should be retained for | `number` | n/a | yes | +| [secret_replication_regions](#input_secret_replication_regions) | List of additional regions where created secrets should be replicated | `list(string)` | `[]` | no | +| [users](#input_users) | List of usernames to generate passwords and secrets for | `list(string)` |
[
"pi_4_user",
"bss_user",
"bss_readwrite",
"bss_readonly",
"audit_user",
"release_manager"
]
| no | + +## Outputs + +| Name | Description | +|------|-------------| +| [bss_user_secret_arn](#output_bss_user_secret_arn) | n/a | + + diff --git a/infrastructure/modules/rds-users/variables.tf b/infrastructure/modules/rds-users/variables.tf new file mode 100644 index 0000000..4dffa83 --- /dev/null +++ b/infrastructure/modules/rds-users/variables.tf @@ -0,0 +1,37 @@ +variable "rds_engine_version" { + type = string + description = "The engine version for the RDS instance" + default = "12.5" +} + +variable "name_prefix" { + description = "The account, environment etc" + type = string +} +variable "users" { + description = "List of usernames to generate passwords and secrets for" + type = list(string) + default = ["pi_4_user", "bss_user", "bss_readwrite", "bss_readonly", "audit_user", "release_manager"] +} + +variable "recovery_window" { + description = "The number of days that credentials should be retained for" + type = number +} + +variable "secret_replication_regions" { + description = "List of additional regions where created secrets should be replicated" + type = list(string) + default = [] +} + +variable "rds_endpoint" { + description = "The endpoint to connect to the rds instance" + type = string +} + +variable "rds_password" { + description = "the password to login to rds with" + type = string + sensitive = true +} diff --git a/infrastructure/modules/s3/main.tf b/infrastructure/modules/s3/main.tf index 97f9790..6b2f5ce 100644 --- a/infrastructure/modules/s3/main.tf +++ b/infrastructure/modules/s3/main.tf @@ -1,7 +1,4 @@ -resource "aws_kms_key" "key" { - description = "The key used to encrypt the data_bucket" - deletion_window_in_days = 10 -} +data "aws_caller_identity" "current" {} resource "aws_s3_bucket" "bucket" { bucket = "${var.name_prefix}-${var.bucket_name}" @@ -11,17 +8,6 @@ resource "aws_s3_bucket" "bucket" { } } -resource "aws_s3_bucket_server_side_encryption_configuration" "encryption" { - bucket = aws_s3_bucket.bucket.id - - rule { - apply_server_side_encryption_by_default { - kms_master_key_id = aws_kms_key.key.arn - sse_algorithm = "aws:kms" - } - } -} - resource "aws_s3_bucket_ownership_controls" "ownership" { bucket = aws_s3_bucket.bucket.id rule { @@ -29,14 +15,6 @@ resource "aws_s3_bucket_ownership_controls" "ownership" { } } -# AWS recommended ACLs to be disabled and use bucket policy instead to control access -# resource "aws_s3_bucket_acl" "acl" { -# bucket = aws_s3_bucket.bucket.id -# acl = "private" - -# depends_on = [aws_s3_bucket_ownership_controls.ownership] -# } - resource "aws_s3_bucket_versioning" "versioning" { bucket = aws_s3_bucket.bucket.id versioning_configuration { diff --git a/infrastructure/modules/s3/readme.md b/infrastructure/modules/s3/readme.md new file mode 100644 index 0000000..92d2c07 --- /dev/null +++ b/infrastructure/modules/s3/readme.md @@ -0,0 +1,46 @@ +# S3 + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_s3_bucket.bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_logging.bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_logging) | resource | +| [aws_s3_bucket_ownership_controls.ownership](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_ownership_controls) | resource | +| [aws_s3_bucket_policy.access_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_policy) | resource | +| [aws_s3_bucket_public_access_block.public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block) | resource | +| [aws_s3_bucket_versioning.versioning](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [additional_kms_key_policy_statements](#input_additional_kms_key_policy_statements) | Additional statements to add to the kms key policy | `list(any)` | `[]` | no | +| [bucket_name](#input_bucket_name) | The name of the bucket | `string` | n/a | yes | +| [bucket_policy](#input_bucket_policy) | The access policy for the bucket | `string` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [logging_bucket](#input_logging_bucket) | The bucket where logs are stored for s3 events | `string` | `"logging"` | no | +| [name_prefix](#input_name_prefix) | provides the prefix to keep consistancy | `string` | n/a | yes | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/s3/variables.tf b/infrastructure/modules/s3/variables.tf index 872fc32..62a4c3e 100644 --- a/infrastructure/modules/s3/variables.tf +++ b/infrastructure/modules/s3/variables.tf @@ -24,3 +24,8 @@ variable "bucket_policy" { type = string } +variable "additional_kms_key_policy_statements" { + description = "Additional statements to add to the kms key policy" + type = list(any) + default = [] +} diff --git a/infrastructure/modules/sns/main.tf b/infrastructure/modules/sns/main.tf index a6a3c84..f561744 100644 --- a/infrastructure/modules/sns/main.tf +++ b/infrastructure/modules/sns/main.tf @@ -73,6 +73,21 @@ data "aws_iam_policy_document" "sns_topic_policy" { resources = ["arn:aws:sns:eu-west-2:${var.aws_account_id}:${aws_sns_topic.sns_topic.name}"] } + # Allows AWS Backup to publish to our topic + statement { + sid = "allow event from backup" + actions = [ + "sns:Publish", + ] + principals { + type = "Service" + identifiers = ["backup.amazonaws.com"] + } + effect = "Allow" + + resources = ["arn:aws:sns:eu-west-2:${var.aws_account_id}:${aws_sns_topic.sns_topic.name}"] + } + # Allows our S3 to publish to our topic # statement { # sid = "allow event from alb-logs s3 bucket" diff --git a/infrastructure/modules/sns/outputs.tf b/infrastructure/modules/sns/outputs.tf new file mode 100644 index 0000000..1001e9e --- /dev/null +++ b/infrastructure/modules/sns/outputs.tf @@ -0,0 +1,7 @@ +output "sns_topic_arn" { + value = aws_sns_topic.sns_topic.arn +} + +output "sns_topic_name" { + value = aws_sns_topic.sns_topic.name +} diff --git a/infrastructure/modules/sns/readme.md b/infrastructure/modules/sns/readme.md new file mode 100644 index 0000000..e05a855 --- /dev/null +++ b/infrastructure/modules/sns/readme.md @@ -0,0 +1,42 @@ +# SNS + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_sns_topic.sns_topic](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic) | resource | +| [aws_sns_topic_policy.sns_topic_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sns_topic_policy) | resource | +| [aws_iam_policy_document.sns_topic_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws_account_id](#input_aws_account_id) | The AWS account ID | `string` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [sns_topic_arn](#output_sns_topic_arn) | n/a | +| [sns_topic_name](#output_sns_topic_name) | n/a | + + diff --git a/infrastructure/modules/sqs/main.tf b/infrastructure/modules/sqs/main.tf new file mode 100644 index 0000000..9e0ae4a --- /dev/null +++ b/infrastructure/modules/sqs/main.tf @@ -0,0 +1,53 @@ +########################### +# SQS # +########################### + +resource "aws_sqs_queue" "sqs_queue" { + name = "${var.name_prefix}-${var.stack_name}" + delay_seconds = 0 + max_message_size = 2048 + receive_wait_time_seconds = 0 + visibility_timeout_seconds = 120 + fifo_queue = false + redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.queue.arn}\",\"maxReceiveCount\":4}" + depends_on = [aws_sqs_queue.queue] +} + +# Deadletter queue for messages that can't be delivered +resource "aws_sqs_queue" "queue" { + name = "${var.name_prefix}-${var.stack_name}-deadletter-queue" + delay_seconds = 90 + max_message_size = 2048 + message_retention_seconds = 86400 + receive_wait_time_seconds = 10 + visibility_timeout_seconds = 120 + fifo_queue = false + content_based_deduplication = false +} + +resource "aws_sqs_queue_policy" "allow_sns_publish" { + queue_url = aws_sqs_queue.sqs_queue.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "Allow-SNS-SendMessage" + Effect = "Allow" + + Principal = { + Service = "sns.amazonaws.com" + } + + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.sqs_queue.arn + + Condition = { + ArnLike = { + "aws:SourceArn" = var.topic_arn + } + } + } + ] + }) +} diff --git a/infrastructure/modules/sqs/outputs.tf b/infrastructure/modules/sqs/outputs.tf new file mode 100644 index 0000000..77cbab0 --- /dev/null +++ b/infrastructure/modules/sqs/outputs.tf @@ -0,0 +1,3 @@ +output "arn" { + value = aws_sqs_queue.sqs_queue.arn +} diff --git a/infrastructure/modules/sqs/readme.md b/infrastructure/modules/sqs/readme.md new file mode 100644 index 0000000..cc08c01 --- /dev/null +++ b/infrastructure/modules/sqs/readme.md @@ -0,0 +1,41 @@ +# SQS + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_sqs_queue.queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | +| [aws_sqs_queue.sqs_queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | +| [aws_sqs_queue_policy.allow_sns_publish](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [name_prefix](#input_name_prefix) | The account, environment etc | `string` | n/a | yes | +| [stack_name](#input_stack_name) | Name of stack calling the module to use in resource naming | `string` | n/a | yes | +| [topic_arn](#input_topic_arn) | Source SNS topic arn | `any` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [arn](#output_arn) | n/a | + + diff --git a/infrastructure/modules/sqs/variables.tf b/infrastructure/modules/sqs/variables.tf new file mode 100644 index 0000000..a4c4ab8 --- /dev/null +++ b/infrastructure/modules/sqs/variables.tf @@ -0,0 +1,21 @@ +###################### +# Common +###################### + +variable "name_prefix" { + description = "The account, environment etc" + type = string +} + +###################### +# Module +###################### + +variable "stack_name" { + description = "Name of stack calling the module to use in resource naming" + type = string +} + +variable "topic_arn" { + description = "Source SNS topic arn" +} diff --git a/infrastructure/modules/vpc/main.tf b/infrastructure/modules/vpc/main.tf index c7cc15d..9be66bd 100644 --- a/infrastructure/modules/vpc/main.tf +++ b/infrastructure/modules/vpc/main.tf @@ -6,7 +6,7 @@ # Create the VPC resource "aws_vpc" "vpc" { - cidr_block = "10.0.0.0/16" + cidr_block = "${var.vpc_cidr_prefix}.0.0/16" instance_tenancy = "default" enable_dns_support = true enable_dns_hostnames = true @@ -17,67 +17,69 @@ resource "aws_vpc" "vpc" { # attach public subnets to vpc resource "aws_subnet" "public_subnet_a" { - cidr_block = "10.0.0.0/24" + cidr_block = "${var.vpc_cidr_prefix}.0.0/24" availability_zone = "eu-west-2a" vpc_id = aws_vpc.vpc.id map_public_ip_on_launch = true tags = { "Name" = "${var.name_prefix}-public-a" "Type" = "public" - # "kubernetes.io/role/elb" = "1" - # "mapPublicIpOnLaunch" = "TRUE" - # "kubernetes.io/role/internal-elb" = "1", - # "karpenter.sh/discovery" = "${var.name_prefix}-eks" } } resource "aws_subnet" "public_subnet_b" { - cidr_block = "10.0.1.0/24" + cidr_block = "${var.vpc_cidr_prefix}.1.0/24" availability_zone = "eu-west-2b" vpc_id = aws_vpc.vpc.id map_public_ip_on_launch = true tags = { "Name" = "${var.name_prefix}-public-b" "Type" = "public" - # "kubernetes.io/role/elb" = "1" - # "mapPublicIpOnLaunch" = "TRUE" - # "kubernetes.io/role/internal-elb" = "1", - # "karpenter.sh/discovery" = "${var.name_prefix}-eks" + } +} + +resource "aws_subnet" "public_subnet_c" { + cidr_block = "${var.vpc_cidr_prefix}.4.0/24" + availability_zone = "eu-west-2c" + vpc_id = aws_vpc.vpc.id + map_public_ip_on_launch = true + tags = { + "Name" = "${var.name_prefix}-public-c" + "Type" = "public" } } # attach private subnets to vpc resource "aws_subnet" "private_subnet_a" { - cidr_block = "10.0.2.0/24" + cidr_block = "${var.vpc_cidr_prefix}.2.0/24" availability_zone = "eu-west-2a" vpc_id = aws_vpc.vpc.id map_public_ip_on_launch = false tags = { "Name" = "${var.name_prefix}-private-a" "Type" = "private" - # "kubernetes.io/cluster/${var.name_prefix}-eks" = "shared" - # "kubernetes.io/role/internal-elb" = "1", - # "mapPublicIpOnLaunch" = "FALSE" - # "karpenter.sh/discovery" = "${var.name_prefix}" - # "kubernetes.io/role/cni" = "1" - # "mapPublicIpOnLaunch" = "FALSE" } } resource "aws_subnet" "private_subnet_b" { - cidr_block = "10.0.3.0/24" + cidr_block = "${var.vpc_cidr_prefix}.3.0/24" availability_zone = "eu-west-2b" vpc_id = aws_vpc.vpc.id map_public_ip_on_launch = false tags = { "Name" = "${var.name_prefix}-private-b" "Type" = "private" - # "kubernetes.io/cluster/${var.name_prefix}-eks" = "shared" - # "kubernetes.io/role/internal-elb" = "1", - # "mapPublicIpOnLaunch" = "FALSE" - # "karpenter.sh/discovery" = "${var.name_prefix}" - # "kubernetes.io/role/cni" = "1" - # "mapPublicIpOnLaunch" = "FALSE" + } +} + +resource "aws_subnet" "private_subnet_c" { + cidr_block = "${var.vpc_cidr_prefix}.5.0/24" + availability_zone = "eu-west-2c" + vpc_id = aws_vpc.vpc.id + map_public_ip_on_launch = false + tags = { + "Name" = "${var.name_prefix}-private-c" + "Type" = "private" } } @@ -134,6 +136,20 @@ resource "aws_eip" "eip_b" { } } +resource "aws_nat_gateway" "nat_gw_c" { + allocation_id = aws_eip.eip_c.id + subnet_id = aws_subnet.public_subnet_c.id + tags = { + Name = "${var.name_prefix}" + } +} + +resource "aws_eip" "eip_c" { + tags = { + Name = "${var.name_prefix}" + } +} + # create a route table so traffic in the private subnets # can use the nat gateways @@ -160,7 +176,17 @@ resource "aws_route_table" "private_rt_b" { Name = "${var.name_prefix}" } } +resource "aws_route_table" "private_rt_c" { + vpc_id = aws_vpc.vpc.id + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat_gw_c.id + } + tags = { + Name = "${var.name_prefix}" + } +} # associate the route tables with the subnets resource "aws_route_table_association" "private_rta_a" { @@ -173,6 +199,11 @@ resource "aws_route_table_association" "private_rta_b" { route_table_id = aws_route_table.private_rt_b.id } +resource "aws_route_table_association" "private_rta_c" { + subnet_id = aws_subnet.private_subnet_c.id + route_table_id = aws_route_table.private_rt_c.id +} + resource "aws_route_table_association" "public_rta_a" { subnet_id = aws_subnet.public_subnet_a.id route_table_id = aws_route_table.public_rt.id @@ -182,3 +213,8 @@ resource "aws_route_table_association" "public_rta_b" { subnet_id = aws_subnet.public_subnet_b.id route_table_id = aws_route_table.public_rt.id } + +resource "aws_route_table_association" "public_rta_c" { + subnet_id = aws_subnet.public_subnet_c.id + route_table_id = aws_route_table.public_rt.id +} diff --git a/infrastructure/modules/vpc/outputs.tf b/infrastructure/modules/vpc/outputs.tf index 160d825..86e1412 100644 --- a/infrastructure/modules/vpc/outputs.tf +++ b/infrastructure/modules/vpc/outputs.tf @@ -5,10 +5,15 @@ output "vpc_id" { output "private_subnet_ids" { description = "IDs of the public subnets" - value = [aws_subnet.private_subnet_a.id, aws_subnet.private_subnet_b.id] + value = [aws_subnet.private_subnet_a.id, aws_subnet.private_subnet_b.id, aws_subnet.private_subnet_c.id] } output "public_subnet_ids" { description = "IDs of the public subnets" - value = [aws_subnet.public_subnet_a.id, aws_subnet.public_subnet_b.id] + value = [aws_subnet.public_subnet_a.id, aws_subnet.public_subnet_b.id, aws_subnet.public_subnet_c.id] +} + +output "vpc_cidr_block" { + description = "CIDR range of the VPC" + value = aws_vpc.vpc.cidr_block } diff --git a/infrastructure/modules/vpc/readme.md b/infrastructure/modules/vpc/readme.md index 8c63e73..e1482bb 100644 --- a/infrastructure/modules/vpc/readme.md +++ b/infrastructure/modules/vpc/readme.md @@ -1,6 +1,6 @@ # VPC -This module will create an RDS Instance, This instance can then have multiple databases created within it. In the BSS environment we have a single RDS instance and all the developers have databases created within it which are created by Github pipelines. +This module will create an RDS Instance, This instance can then have multiple databases created within it. In the BSS environment we have a single RDS instance and all the developers have databases created within it which are created by GitHub pipelines. ## Preprequisites @@ -20,7 +20,6 @@ terraform { use_lockfile = true } } - provider "aws" { region = "eu-west-2" default_tags { @@ -31,7 +30,6 @@ provider "aws" { } } } - module "vpc" { source = "./modules/" environment = var.environment @@ -46,7 +44,7 @@ There are a few key values that need to be passed in: ### prefix -The `name_prefix` is the consistant part of the name which will be applied to all resources. In BSS that is `bss-cicd-en` for england and `bss-cicd-ni` for northern ireland. These would usually be passed in via either a `tfvar` file or via the command line interface from a pipeline, we use Github actions in the BSS team. +The `name_prefix` is the consistant part of the name which will be applied to all resources. In BSS that is `bss-cicd-en` for England and `bss-cicd-ni` for Northern Ireland. These would usually be passed in via either a `tfvar` file or via the command line interface from a pipeline, we use GitHub actions in the BSS team. ### name @@ -59,3 +57,68 @@ This is the name of the environment it is deployed into, this might be `CICD`, ` ### Optional variables There are many other variables which have default values which can be overwritten if desired, you can look in the variables.tf file for the full list which should all have descriptions explaining what they do. + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_eip.eip_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eip) | resource | +| [aws_eip.eip_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eip) | resource | +| [aws_eip.eip_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eip) | resource | +| [aws_internet_gateway.igw](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | +| [aws_nat_gateway.nat_gw_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) | resource | +| [aws_nat_gateway.nat_gw_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) | resource | +| [aws_nat_gateway.nat_gw_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) | resource | +| [aws_route_table.private_rt_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table.private_rt_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table.private_rt_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table.public_rt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.private_rta_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table_association.private_rta_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table_association.private_rta_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table_association.public_rta_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table_association.public_rta_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table_association.public_rta_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_subnet.private_subnet_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_subnet.private_subnet_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_subnet.private_subnet_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_subnet.public_subnet_a](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_subnet.public_subnet_b](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_subnet.public_subnet_c](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_vpc.vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `any` | n/a | yes | +| [name](#input_name) | The name of the resource | `string` | `""` | no | +| [name_prefix](#input_name_prefix) | the environment and project | `any` | n/a | yes | +| [vpc_cidr_prefix](#input_vpc_cidr_prefix) | The CIDR block prefix for the VPC | `any` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [private_subnet_ids](#output_private_subnet_ids) | IDs of the public subnets | +| [public_subnet_ids](#output_public_subnet_ids) | IDs of the public subnets | +| [vpc_cidr_block](#output_vpc_cidr_block) | CIDR range of the VPC | +| [vpc_id](#output_vpc_id) | ID of the VPC | + + diff --git a/infrastructure/modules/vpc/variables.tf b/infrastructure/modules/vpc/variables.tf index 4b7313a..cca0dbf 100644 --- a/infrastructure/modules/vpc/variables.tf +++ b/infrastructure/modules/vpc/variables.tf @@ -11,3 +11,7 @@ variable "name_prefix" { description = "the environment and project" } +variable "vpc_cidr_prefix" { + description = "The CIDR block prefix for the VPC" +} + diff --git a/infrastructure/modules/vpce/main.tf b/infrastructure/modules/vpce/main.tf new file mode 100644 index 0000000..6c51f58 --- /dev/null +++ b/infrastructure/modules/vpce/main.tf @@ -0,0 +1,78 @@ +resource "aws_security_group" "vpce" { + name = "${var.name_prefix}-${var.vpce_name}-vpce" + vpc_id = var.vpc_id + description = "${var.name_prefix}-${var.vpce_name} VPCE Security Group" + + tags = { + # Used for naming resource in AWS console + Name = "${var.name_prefix}-${var.vpce_name}-vpce" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_security_group_rule" "vpce_ingress_from_sg" { + # only add this rule if a source security group id is specified + count = var.source_sg_id != "" ? 1 : 0 + type = "ingress" + from_port = var.inbound_port + to_port = var.outbound_port + protocol = "tcp" + security_group_id = aws_security_group.vpce.id + source_security_group_id = var.source_sg_id + description = "Allow ingress to VPCE on port ${var.inbound_port} from ${var.source_sg_id}" +} + +resource "aws_security_group_rule" "vpce_ingress_from_cidr_range" { + count = var.ingress_cidr_range != "" ? 1 : 0 + type = "ingress" + from_port = var.inbound_port + to_port = var.outbound_port + protocol = "tcp" + security_group_id = aws_security_group.vpce.id + cidr_blocks = [var.ingress_cidr_range] + description = "Allow ingress to VPCE on port ${var.inbound_port} from ${var.ingress_cidr_range}" +} + +resource "aws_security_group_rule" "vpce_egress" { + type = "egress" + from_port = var.outbound_port + to_port = var.outbound_port + protocol = "tcp" + security_group_id = aws_security_group.vpce.id + cidr_blocks = ["0.0.0.0/0"] + description = "Allow outbound traffic from VPCE on port ${var.outbound_port}" +} + +resource "aws_vpc_endpoint" "endpoint" { + vpc_id = var.vpc_id + service_name = var.service_name + vpc_endpoint_type = "Interface" + security_group_ids = [ + aws_security_group.vpce.id + ] + private_dns_enabled = false + + subnet_ids = var.subnet_ids + + tags = { + # Used for naming resource in AWS console + Name = "${var.name_prefix}-${var.vpce_name}" + } +} + +resource "aws_route53_record" "vpc_endpoint" { + count = var.hosted_zone_name != "" ? 1 : 0 + zone_id = var.hosted_zone_id + name = "${var.vpce_name}.${var.hosted_zone_name}" + type = "A" + + alias { + evaluate_target_health = true + name = aws_vpc_endpoint.endpoint.dns_entry[0]["dns_name"] + zone_id = aws_vpc_endpoint.endpoint.dns_entry[0]["hosted_zone_id"] + } +} + diff --git a/infrastructure/modules/vpce/outputs.tf b/infrastructure/modules/vpce/outputs.tf new file mode 100644 index 0000000..3637b02 --- /dev/null +++ b/infrastructure/modules/vpce/outputs.tf @@ -0,0 +1,11 @@ +output "vpce_arn" { + value = aws_vpc_endpoint.endpoint.arn +} + +output "vpce_dns_name" { + value = aws_vpc_endpoint.endpoint.dns_entry[0]["dns_name"] +} + +output "vpce_hosted_zone_id" { + value = aws_vpc_endpoint.endpoint.dns_entry[0]["hosted_zone_id"] +} diff --git a/infrastructure/modules/vpce/readme.md b/infrastructure/modules/vpce/readme.md new file mode 100644 index 0000000..e2fdac4 --- /dev/null +++ b/infrastructure/modules/vpce/readme.md @@ -0,0 +1,55 @@ +# VPC Endpoints + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_route53_record.vpc_endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | resource | +| [aws_security_group.vpce](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.vpce_egress](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.vpce_ingress_from_cidr_range](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.vpce_ingress_from_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_vpc_endpoint.endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [hosted_zone_id](#input_hosted_zone_id) | Set the hosted zone id if you would like a R53 alias record set up for this VPCE | `any` | n/a | yes | +| [hosted_zone_name](#input_hosted_zone_name) | Set the hosted zone name if you would like a R53 alias record set up for this VPCE | `any` | n/a | yes | +| [inbound_port](#input_inbound_port) | TCP port for which ingress will be allowed to VPCE | `any` | n/a | yes | +| [ingress_cidr_range](#input_ingress_cidr_range) | Optional CIDR range that will be allowed to send traffic to VPCE e.g. the VPC cidr range | `string` | `""` | no | +| [name_prefix](#input_name_prefix) | the environment and project | `any` | n/a | yes | +| [outbound_port](#input_outbound_port) | TCP port for which egress will be allowed to VPCE | `any` | n/a | yes | +| [service_name](#input_service_name) | VPC endpoint service name to connect to | `any` | n/a | yes | +| [source_sg_id](#input_source_sg_id) | Optional id of source SG that will be allowed to send traffic to VPCE e.g. RDS SG | `string` | `""` | no | +| [subnet_azs](#input_subnet_azs) | AZs of subnets to associate - this must match the subnets of the remote VPC endpoint service e.g. euw2-az2, euw2-az3 | `list(string)` | `[]` | no | +| [subnet_ids](#input_subnet_ids) | Subnet ids | `any` | n/a | yes | +| [vpc_id](#input_vpc_id) | VPC id | `any` | n/a | yes | +| [vpce_name](#input_vpce_name) | The name of the VPCE | `any` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [vpce_arn](#output_vpce_arn) | n/a | +| [vpce_dns_name](#output_vpce_dns_name) | n/a | +| [vpce_hosted_zone_id](#output_vpce_hosted_zone_id) | n/a | + + diff --git a/infrastructure/modules/vpce/variables.tf b/infrastructure/modules/vpce/variables.tf new file mode 100644 index 0000000..5c0db05 --- /dev/null +++ b/infrastructure/modules/vpce/variables.tf @@ -0,0 +1,51 @@ +variable "name_prefix" { + description = "the environment and project" +} + +variable "vpce_name" { + description = "The name of the VPCE" +} + +variable "hosted_zone_name" { + description = "Set the hosted zone name if you would like a R53 alias record set up for this VPCE" +} + +variable "hosted_zone_id" { + description = "Set the hosted zone id if you would like a R53 alias record set up for this VPCE" +} + +variable "inbound_port" { + description = "TCP port for which ingress will be allowed to VPCE" +} + +variable "outbound_port" { + description = "TCP port for which egress will be allowed to VPCE" +} + +variable "service_name" { + description = "VPC endpoint service name to connect to" +} + +variable "source_sg_id" { + description = "Optional id of source SG that will be allowed to send traffic to VPCE e.g. RDS SG" + default = "" +} + +variable "ingress_cidr_range" { + description = "Optional CIDR range that will be allowed to send traffic to VPCE e.g. the VPC cidr range" + default = "" +} + +variable "subnet_ids" { + description = "Subnet ids" +} + +variable "subnet_azs" { + description = "AZs of subnets to associate - this must match the subnets of the remote VPC endpoint service e.g. euw2-az2, euw2-az3" + type = list(string) + default = [] +} + +variable "vpc_id" { + description = "VPC id" +} diff --git a/infrastructure/modules/vpces/main.tf b/infrastructure/modules/vpces/main.tf new file mode 100644 index 0000000..8c73261 --- /dev/null +++ b/infrastructure/modules/vpces/main.tf @@ -0,0 +1,137 @@ + +# VPC Endpoint Service +resource "aws_vpc_endpoint_service" "service" { + acceptance_required = true + network_load_balancer_arns = [aws_lb.nlb.arn] + tags = { + Name = "${var.vpces_name}" + } +} + +# Get Subnet CIDRs from Subnet IDs +data "aws_subnet" "selected" { + for_each = toset(var.subnet_ids) + + id = each.value +} + +# NLB that forwards to ALB IPs +resource "aws_lb" "nlb" { + name = var.nlb_name + internal = true + load_balancer_type = "network" + subnets = var.subnet_ids + security_groups = [aws_security_group.nlb_sg.id] + + access_logs { + bucket = var.access_logs_bucket + prefix = var.access_logs_prefix + enabled = true + } +} +resource "aws_security_group" "nlb_sg" { + name = var.nlb_name + description = "Security group for NLB" + vpc_id = var.vpc_id +} + + +resource "aws_lb_target_group" "nlb_tg" { + name = var.tg_name + port = 443 + protocol = "TCP" + vpc_id = var.vpc_id + target_type = "alb" + + health_check { + protocol = "HTTPS" + path = "/bss/info" + matcher = "200-404" # if ALB returns 404 its enough for now to confirm its healthy + port = "traffic-port" + interval = 30 + timeout = 10 + healthy_threshold = 2 + unhealthy_threshold = 2 + } +} + + +resource "aws_lb_target_group_attachment" "alb_ip_targets" { + + target_group_arn = aws_lb_target_group.nlb_tg.arn + target_id = var.alb_arn + port = 443 +} + + +# NLB Listener +resource "aws_lb_listener" "listener" { + load_balancer_arn = aws_lb.nlb.arn + port = 443 + protocol = "TCP" + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.nlb_tg.arn + } +} + +#Allow other accounts to use this VPCE service +data "aws_secretsmanager_secret" "pi-account-id" { + name = var.allowed_principal_secret_name +} + +data "aws_secretsmanager_secret_version" "pi-account-id-version" { + secret_id = data.aws_secretsmanager_secret.pi-account-id.id +} + +locals { + pi_account_id = jsondecode(data.aws_secretsmanager_secret_version.pi-account-id-version.secret_string)["aws_account_id"] +} + +resource "aws_vpc_endpoint_service_allowed_principal" "allowed_principal" { + vpc_endpoint_service_id = aws_vpc_endpoint_service.service.id + principal_arn = "arn:aws:iam::${local.pi_account_id}:root" +} + + +resource "aws_security_group_rule" "allow_https_from_nlb" { + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + security_group_id = var.target_alb_sg_id + source_security_group_id = aws_security_group.nlb_sg.id + description = "Allow HTTPS traffic from IUVO from IUVO NLB TF" +} + + + +data "aws_ssm_parameter" "allowed_vmc_ips" { + name = var.ssm_parameter_name +} +locals { + vmc_ips = [for ip in split(",", data.aws_ssm_parameter.allowed_vmc_ips.value) : trimspace(ip)] +} + +resource "aws_security_group_rule" "allowed_https" { + for_each = { for ip in nonsensitive(local.vmc_ips) : ip => ip } + + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = [each.value] + security_group_id = aws_security_group.nlb_sg.id + description = "ingress from IUVO VMC ${each.value}" +} + +resource "aws_security_group_rule" "allowed_egress_to_alb" { + type = "egress" + from_port = 443 + to_port = 443 + protocol = "tcp" + source_security_group_id = var.target_alb_sg_id + security_group_id = aws_security_group.nlb_sg.id + description = "egress to app load balancer" +} diff --git a/infrastructure/modules/vpces/outputs.tf b/infrastructure/modules/vpces/outputs.tf new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/infrastructure/modules/vpces/outputs.tf @@ -0,0 +1 @@ + diff --git a/infrastructure/modules/vpces/readme.md b/infrastructure/modules/vpces/readme.md new file mode 100644 index 0000000..dd2b191 --- /dev/null +++ b/infrastructure/modules/vpces/readme.md @@ -0,0 +1,62 @@ +# VPCE-Service + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_lb.nlb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb) | resource | +| [aws_lb_listener.listener](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener) | resource | +| [aws_lb_target_group.nlb_tg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group) | resource | +| [aws_lb_target_group_attachment.alb_ip_targets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group_attachment) | resource | +| [aws_security_group.nlb_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.allow_https_from_nlb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.allowed_egress_to_alb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.allowed_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_vpc_endpoint_service.service](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint_service) | resource | +| [aws_vpc_endpoint_service_allowed_principal.allowed_principal](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint_service_allowed_principal) | resource | +| [aws_secretsmanager_secret.pi-account-id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.pi-account-id-version](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_ssm_parameter.allowed_vmc_ips](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_subnet.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [access_logs_bucket](#input_access_logs_bucket) | The S3 bucket to store access logs | `string` | n/a | yes | +| [access_logs_prefix](#input_access_logs_prefix) | The S3 prefix for access logs | `string` | n/a | yes | +| [alb_arn](#input_alb_arn) | The ARN of the ALB to target | `string` | n/a | yes | +| [alb_listener](#input_alb_listener) | The ARN of the ALB listener to target | `string` | n/a | yes | +| [allowed_principal_secret_name](#input_allowed_principal_secret_name) | The name of the Secrets Manager secret containing the AWS account ID allowed to use this VPCE service | `string` | n/a | yes | +| [environment](#input_environment) | The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD | `string` | n/a | yes | +| [nation](#input_nation) | en for england or ni for northern ireland | `string` | `"en"` | no | +| [nlb_name](#input_nlb_name) | The name of the Network Load Balancer | `string` | n/a | yes | +| [prefix](#input_prefix) | The prefix to use for naming resources | `string` | n/a | yes | +| [ssm_parameter_name](#input_ssm_parameter_name) | The name of the SSM parameter to store the allowed IPs | `string` | n/a | yes | +| [subnet_ids](#input_subnet_ids) | The Subnet IDs where the Network Load Balancer will be created | `list(string)` | n/a | yes | +| [target_alb_sg_id](#input_target_alb_sg_id) | The security group ID of the target ALB to allow inbound from the NLB | `string` | n/a | yes | +| [tg_name](#input_tg_name) | The name of the Target Group | `string` | n/a | yes | +| [vpc_id](#input_vpc_id) | The VPC ID where the VPC Endpoint Service will be created | `string` | n/a | yes | +| [vpces_name](#input_vpces_name) | The name of the VPC Endpoint Service | `string` | n/a | yes | + +## Outputs + +No outputs. + + diff --git a/infrastructure/modules/vpces/variables.tf b/infrastructure/modules/vpces/variables.tf new file mode 100644 index 0000000..41497a9 --- /dev/null +++ b/infrastructure/modules/vpces/variables.tf @@ -0,0 +1,79 @@ +variable "environment" { + description = "The name of the Environment this is deployed into, for example CICD, NFT, UAT or PROD" + type = string +} + +variable "nation" { + description = "en for england or ni for northern ireland" + type = string + default = "en" +} + +variable "prefix" { + description = "The prefix to use for naming resources" + type = string +} + + +variable "vpces_name" { + description = "The name of the VPC Endpoint Service" + type = string +} + +variable "nlb_name" { + description = "The name of the Network Load Balancer" + type = string +} + +variable "tg_name" { + description = "The name of the Target Group" + type = string +} + + +variable "vpc_id" { + description = "The VPC ID where the VPC Endpoint Service will be created" + type = string + +} + +variable "subnet_ids" { + description = "The Subnet IDs where the Network Load Balancer will be created" + type = list(string) +} + +variable "alb_arn" { + description = "The ARN of the ALB to target" + type = string + +} + +variable "alb_listener" { + description = "The ARN of the ALB listener to target" + type = string +} + +variable "allowed_principal_secret_name" { + description = "The name of the Secrets Manager secret containing the AWS account ID allowed to use this VPCE service" + type = string +} + +variable "target_alb_sg_id" { + description = "The security group ID of the target ALB to allow inbound from the NLB" + type = string +} + +variable "ssm_parameter_name" { + description = "The name of the SSM parameter to store the allowed IPs" + type = string +} + +variable "access_logs_bucket" { + description = "The S3 bucket to store access logs" + type = string +} + +variable "access_logs_prefix" { + description = "The S3 prefix for access logs" + type = string +} diff --git a/infrastructure/modules/waf/main.tf b/infrastructure/modules/waf/main.tf new file mode 100644 index 0000000..173a77a --- /dev/null +++ b/infrastructure/modules/waf/main.tf @@ -0,0 +1,551 @@ + +data "aws_secretsmanager_secret_version" "waf_ips" { + secret_id = "${var.name_prefix}-waf-ip-set" +} +data "aws_secretsmanager_secret_version" "waf_bsis_ip_range" { + secret_id = "${var.name_prefix}-waf-bsis-ip" +} + +data "aws_sns_topic" "alert" { + name = var.name_prefix +} + +locals { + ip_list = jsondecode(data.aws_secretsmanager_secret_version.waf_ips.secret_string).ips + bsis_ips = jsondecode(data.aws_secretsmanager_secret_version.waf_bsis_ip_range.secret_string).bsis_ip +} + +####################### +# IP Sets ToDo: Check if the is relevant to our environment +####################### +#### Please note this resource creation might fail on the first run with error stating resource already exists (eventhough Terraform logs shows it is destroyrd) +# whenever there is change ticket raised to investigate this https://nhsd-jira.digital.nhs.uk/browse/SCM-726 +##### +resource "aws_wafv2_ip_set" "bs-select-exclude-ip-set" { + name = var.exclude_ip_set_name + description = "This set of IPs are excluded from Anonymous and linux rule" + scope = "REGIONAL" + ip_address_version = "IPV4" + addresses = local.ip_list +} + +#########For web Services add/remove on tfvars######### +resource "aws_wafv2_ip_set" "bs-select-webservices-ip-set" { + name = var.web_services_ip_set_name + description = "This set of IPs are excluded from Anonymous and linux rule" + scope = "REGIONAL" + ip_address_version = "IPV4" + addresses = local.bsis_ips +} + +###################### +# WAF +###################### + + +resource "aws_wafv2_web_acl" "bss-waf-acl" { + name = var.waf_name + scope = "REGIONAL" + #checkov:skip=CKV_AWS_192:Even after adding required code to manage log4j still checkov failing ,New ticket- https://nhsd-jira.digital.nhs.uk/browse/SCM-695 raised to check this + + default_action { + allow {} + } + + # Primary Web ACL metric + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-acl-metric" + sampled_requests_enabled = true + } + + # Custom rule for paths and IP set exclusion + rule { + name = "bss-webservices-rule" + priority = 80 + + action { + block {} + } + # web service rules + statement { + and_statement { + + statement { + or_statement { + statement { + byte_match_statement { + search_string = "/bss/dashboardExtracts" + field_to_match { + uri_path {} + } + text_transformation { + priority = 0 + type = "NONE" + } + positional_constraint = "CONTAINS" + } + } + # Statements not currently in live, ticket SCM-1826 created to investigate + # statement { + # byte_match_statement { + # search_string = "/bss/screeningbatchresults" + # field_to_match { + # uri_path {} + # } + # text_transformation { + # priority = 0 + # type = "NONE" + # } + # positional_constraint = "CONTAINS" + # } + # } + # statement { + # byte_match_statement { + # search_string = "/bss/nonbatchreferrals" + # field_to_match { + # uri_path {} + # } + # text_transformation { + # priority = 0 + # type = "NONE" + # } + # positional_constraint = "CONTAINS" + # } + # } + statement { + byte_match_statement { + search_string = "/bss/rawdatamigration" + field_to_match { + uri_path {} + } + text_transformation { + priority = 0 + type = "NONE" + } + positional_constraint = "CONTAINS" + } + } + } + } + + # Not statement to block requests that are not from the allowed IP set + statement { + not_statement { + statement { + ip_set_reference_statement { + arn = aws_wafv2_ip_set.bs-select-webservices-ip-set.arn + } + } + } + } + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "bss-webservices-rule" + sampled_requests_enabled = true + } + } + + # Base rules for all service teams + rule { + name = "${var.name_prefix}-aws-common-rule-set" + priority = 10 + + override_action { + count {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-common-rule-set-metric" + sampled_requests_enabled = true + } + } + + rule { + name = "${var.name_prefix}-aws-bad-inputs-rule-set" + priority = 20 + + override_action { + count {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesKnownBadInputsRuleSet" + vendor_name = "AWS" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-bad-inputs-rule-set-metric" + sampled_requests_enabled = true + } + } + + rule { + name = "${var.name_prefix}-aws-ip-reputation-list" + priority = 30 + + override_action { + count {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesAmazonIpReputationList" + vendor_name = "AWS" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-ip-reputation-list-metric" + sampled_requests_enabled = true + } + } + + rule { + name = "${var.name_prefix}-aws-sql-injection-rules" + priority = 40 + + override_action { + count {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesSQLiRuleSet" + vendor_name = "AWS" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-sql-injection-rules-metric" + sampled_requests_enabled = true + } + } + + # Service-team specfic rules + rule { + name = "${var.name_prefix}-waf-non-GB-geo-match" + priority = 100 + action { + count {} + } + statement { + not_statement { + statement { + geo_match_statement { + country_codes = ["GB"] + } + } + } + } + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-non-GB-geo-match-metric" + sampled_requests_enabled = true + } + } + + rule { + name = "${var.name_prefix}-waf-aws-anonymous-ip-list-set" + priority = 50 + + override_action { + none {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesAnonymousIpList" + vendor_name = "AWS" + scope_down_statement { + not_statement { + statement { + ip_set_reference_statement { + arn = aws_wafv2_ip_set.bs-select-exclude-ip-set.arn + } + } + + } + } + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-anonymous-ip-list-set-metric" + sampled_requests_enabled = true + } + } + + + rule { + name = "${var.name_prefix}-waf-aws-linux-rule-set" + priority = 60 + + override_action { + none {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesLinuxRuleSet" + vendor_name = "AWS" + + scope_down_statement { + not_statement { + statement { + ip_set_reference_statement { + arn = aws_wafv2_ip_set.bs-select-exclude-ip-set.arn + } + } + + } + } + } + } + + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-aws-linux-rule-set-metric" + sampled_requests_enabled = true + } + } + rule { + name = "AWS-AWSManagedRulesKnownBadInputsRuleSet" + priority = 70 + + override_action { + none {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesKnownBadInputsRuleSet" + vendor_name = "AWS" + } + } + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "${var.name_prefix}-waf-known-bad-inputs-rules" + sampled_requests_enabled = true + } + + } + +} + +resource "aws_cloudwatch_log_group" "waf_logs" { + // Note CW log group name should begin aws-waf-logs + name = var.waf_log_group_name + retention_in_days = 365 +} + +resource "aws_wafv2_web_acl_logging_configuration" "waf_acl_lc" { + log_destination_configs = [aws_cloudwatch_log_group.waf_logs.arn] + resource_arn = aws_wafv2_web_acl.bss-waf-acl.arn +} +# Create a CloudWatch Log Group with KMS Encryption + +################################## +####### Forward logs to CSOC ##### +################################## + +# Create IAM role necessary for cross-account log subscriptions +resource "aws_iam_role" "cw_to_subscription_filter_role" { + name = "${var.name_prefix}_CWLtoSubscriptionFilterRole" + assume_role_policy = data.aws_iam_policy_document.central_logs_assume_role.json +} + +data "aws_iam_policy_document" "central_logs_assume_role" { + statement { + sid = "centralLogsAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["logs.${var.aws_region}.amazonaws.com"] + } + } +} + + + +# Permissions policy to define actions cloudwatch logs can perform +resource "aws_iam_policy" "central_cw_subscription_iam_policy" { + name = "${var.name_prefix}_central_cw_subscription" + policy = data.aws_iam_policy_document.central_cw_subscription_doc_policy.json +} + +data "aws_iam_policy_document" "central_cw_subscription_doc_policy" { + statement { + actions = [ + "logs:PutLogEvents" + ] + resources = [ + "arn:aws:logs:${var.aws_region}:${var.aws_account_id}:log-group:aws-waf-logs-${var.name_prefix}:*" + ] + } +} + +resource "aws_iam_role_policy_attachment" "central_logging_att" { + policy_arn = aws_iam_policy.central_cw_subscription_iam_policy.arn + role = aws_iam_role.cw_to_subscription_filter_role.id +} + +data "aws_secretsmanager_secret" "cloudwatch-cross-accounts" { + name = "${var.name_prefix}-cloudwatch-cross-account-logging" +} + +data "aws_secretsmanager_secret_version" "cloudwatch-cross-accounts" { + secret_id = data.aws_secretsmanager_secret.cloudwatch-cross-accounts.id +} + +locals { + cross_account_id = jsondecode(data.aws_secretsmanager_secret_version.cloudwatch-cross-accounts.secret_string)["central-logging"] +} + +resource "time_sleep" "wait_30_seconds" { + depends_on = [aws_iam_role.cw_to_subscription_filter_role] + create_duration = "30s" +} +# The subscription filter to send to the central logging +resource "aws_cloudwatch_log_subscription_filter" "central_logging" { + name = "${var.name_prefix}_central_logging" + role_arn = aws_iam_role.cw_to_subscription_filter_role.arn + log_group_name = var.waf_log_group_name + filter_pattern = "" + destination_arn = "arn:aws:logs:${var.aws_region}:${local.cross_account_id}:destination:waf_log_destination" + distribution = "ByLogStream" + + depends_on = [ + aws_iam_role.cw_to_subscription_filter_role, + aws_cloudwatch_log_group.waf_logs, + time_sleep.wait_30_seconds + ] +} + +# Send to splunk as well for our own logging/troubleshooting +resource "aws_cloudwatch_log_subscription_filter" "splunk_subscr_filter" { + name = "${var.name_prefix}_splunk_subscr_filter" + role_arn = "arn:aws:iam::${var.aws_account_id}:role/${var.name_prefix}-CloudWatchToFirehoseRole" + log_group_name = var.waf_log_group_name + filter_pattern = "" + destination_arn = "arn:aws:firehose:${var.aws_region}:${var.aws_account_id}:deliverystream/${var.name_prefix}-cw-logs-firehose" + distribution = "ByLogStream" + + depends_on = [ + aws_cloudwatch_log_group.waf_logs + ] +} + +############################## +# DDoS Alarm logs forwarding to CSOC +############################## +resource "aws_iam_role" "eventbridge_role" { + count = contains(["prod"], var.environment) ? 1 : 0 + name = "${var.name_prefix}-eventbridge-trust-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "TrustEventBridgeService" + Effect = "Allow" + Principal = { + Service = "events.amazonaws.com" + } + Action = "sts:AssumeRole" + Condition = { + StringEquals = { + "aws:SourceAccount" = "${var.aws_account_id}" + } + } + } + ] + }) +} +resource "aws_iam_role_policy" "eventbridge_put_events" { + count = contains(["prod"], var.environment) ? 1 : 0 + + name = "${var.name_prefix}-eventbridge-put-events" + role = aws_iam_role.eventbridge_role[0].id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "ActionsForResource" + Effect = "Allow" + Action = [ + "events:PutEvents" + ] + Resource = [ + "arn:aws:events:eu-west-2:${local.cross_account_id}:event-bus/shield-eventbus" + ] + } + ] + }) +} + +resource "aws_cloudwatch_metric_alarm" "shield_ddos_alarm" { + count = contains(["prod"], var.environment) ? 1 : 0 + alarm_name = "${var.name_prefix}_shield_ddos_WAF" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 20 + datapoints_to_alarm = 1 + metric_name = "DDoSDetected" + namespace = "AWS/DDoSProtection" + period = 60 + statistic = "Maximum" + threshold = 0 + treat_missing_data = "notBreaching" + + dimensions = { + ResourceArn = aws_wafv2_web_acl.bss-waf-acl.arn + } + + alarm_actions = [data.aws_sns_topic.alert.arn] + ok_actions = [data.aws_sns_topic.alert.arn] + insufficient_data_actions = [] + + alarm_description = "Alarm triggers when Shield Advanced detects a DDoS attack on production WAF" +} + +resource "aws_cloudwatch_event_rule" "shield_ddos_rule" { + count = contains(["prod"], var.environment) ? 1 : 0 + name = "${var.name_prefix}_shield_ddos_rules" + description = "Forward DDoS alarm state change events to cross-account EventBridge bus" + + event_pattern = jsonencode({ + source = ["aws.cloudwatch"] + "detail-type" = ["CloudWatch Alarm State Change"] + resources = [aws_cloudwatch_metric_alarm.shield_ddos_alarm[0].arn] + }) +} + +resource "aws_cloudwatch_event_target" "shield_ddos_target" { + count = contains(["prod"], var.environment) ? 1 : 0 + + rule = aws_cloudwatch_event_rule.shield_ddos_rule[count.index].name + target_id = "${var.name_prefix}-shield-ddos-target" + arn = "arn:aws:events:eu-west-2:${local.cross_account_id}:event-bus/shield-eventbus" + role_arn = aws_iam_role.eventbridge_role[count.index].arn +} diff --git a/infrastructure/modules/waf/outputs.tf b/infrastructure/modules/waf/outputs.tf new file mode 100644 index 0000000..3d5cc37 --- /dev/null +++ b/infrastructure/modules/waf/outputs.tf @@ -0,0 +1,4 @@ +output "web_acl_arn" { + value = aws_wafv2_web_acl.bss-waf-acl.arn +} + diff --git a/infrastructure/modules/waf/readme.md b/infrastructure/modules/waf/readme.md new file mode 100644 index 0000000..b1b1051 --- /dev/null +++ b/infrastructure/modules/waf/readme.md @@ -0,0 +1,68 @@ +# WAF + + + +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider_aws) | n/a | +| [time](#provider_time) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_event_rule.shield_ddos_rule](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | +| [aws_cloudwatch_event_target.shield_ddos_target](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_cloudwatch_log_group.waf_logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | +| [aws_cloudwatch_log_subscription_filter.central_logging](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_subscription_filter) | resource | +| [aws_cloudwatch_log_subscription_filter.splunk_subscr_filter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_subscription_filter) | resource | +| [aws_cloudwatch_metric_alarm.shield_ddos_alarm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource | +| [aws_iam_policy.central_cw_subscription_iam_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.cw_to_subscription_filter_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.eventbridge_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.eventbridge_put_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [aws_iam_role_policy_attachment.central_logging_att](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_wafv2_ip_set.bs-select-exclude-ip-set](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_ip_set) | resource | +| [aws_wafv2_ip_set.bs-select-webservices-ip-set](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_ip_set) | resource | +| [aws_wafv2_web_acl.bss-waf-acl](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl) | resource | +| [aws_wafv2_web_acl_logging_configuration.waf_acl_lc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/wafv2_web_acl_logging_configuration) | resource | +| [time_sleep.wait_30_seconds](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | +| [aws_iam_policy_document.central_cw_subscription_doc_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.central_logs_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_secretsmanager_secret.cloudwatch-cross-accounts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.cloudwatch-cross-accounts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_secretsmanager_secret_version.waf_bsis_ip_range](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_secretsmanager_secret_version.waf_ips](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | +| [aws_sns_topic.alert](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/sns_topic) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws_account_id](#input_aws_account_id) | n/a | `any` | n/a | yes | +| [aws_region](#input_aws_region) | n/a | `any` | n/a | yes | +| [environment](#input_environment) | Environment i.e prod, nonprod | `any` | n/a | yes | +| [exclude_ip_set_name](#input_exclude_ip_set_name) | Service | `any` | n/a | yes | +| [name_prefix](#input_name_prefix) | n/a | `any` | n/a | yes | +| [waf_log_group_name](#input_waf_log_group_name) | waf log group | `any` | n/a | yes | +| [waf_name](#input_waf_name) | waf name | `any` | n/a | yes | +| [web_services_ip_set_name](#input_web_services_ip_set_name) | n/a | `any` | n/a | yes | +| [webservices_ip_set_addresses](#input_webservices_ip_set_addresses) | List of IP addresses for web services | `list(string)` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [web_acl_arn](#output_web_acl_arn) | n/a | + + diff --git a/infrastructure/modules/waf/variables.tf b/infrastructure/modules/waf/variables.tf new file mode 100644 index 0000000..f06e2dc --- /dev/null +++ b/infrastructure/modules/waf/variables.tf @@ -0,0 +1,36 @@ +variable "waf_log_group_name" { + description = "waf log group" +} + +variable "waf_name" { + description = "waf name" +} + + +variable "exclude_ip_set_name" { + description = "Service" +} +variable "web_services_ip_set_name" { + +} + +variable "aws_account_id" { + +} + +variable "name_prefix" { + +} + +variable "aws_region" { + +} + +variable "webservices_ip_set_addresses" { + description = "List of IP addresses for web services" + type = list(string) +} +variable "environment" { + description = "Environment i.e prod, nonprod" +} + diff --git a/scripts/config/vale/styles/config/vocabularies/words/accept.txt b/scripts/config/vale/styles/config/vocabularies/words/accept.txt index e3ed868..0d6cc19 100644 --- a/scripts/config/vale/styles/config/vocabularies/words/accept.txt +++ b/scripts/config/vale/styles/config/vocabularies/words/accept.txt @@ -1,24 +1,93 @@ [A-Z]+s +[Aa]gex +[Aa][Pp][Ii] +[Aa]utotest +Adhoc +account_id +api_gateway_name +api_path_part +arn +backup_copy_vault_account_id +backup_vault_name +batch_id Bitwarden bot +branchType +buildDir +build_type +callrecall +[Cc][Ii][Cc][Dd] +[Cc]ognito +[Cc]ron +checkdigit +Concat +config Cyber -Cognito +[Dd]ebounced Dependabot -england +Dockerfile +domain_name_prefix +en +full[Bb]uild +function_name +[Gg][Pp] +GitHub Gitleaks -Github Grype +handler_prefix +healthcheck +http_method idempotence -ireland +iam +itterate +Jira +jQuery +lambda_function +lambda_layer +lambda_log_group_name +lambda_source +[Mm]akefile +Medifact +mgmt +name_prefix +ni +nonfemale OAuth Octokit onboarding +[Oo]pen[Ii][Dd] +[Oo]utcode Podman +preprod +prewritten +project_name +printFooter +printHeader Python -repo rbac_role +readonly +recovery_window +repo +role_name +[Ss]martcard +[Ss]onarcloud +[Ss]ql sed +shortcode +source_account_name +stage_name +start_time +subnet Syft -Terraform +[Tt]eardown +[Tt]erraform +[Tt]utum +terraform-aws-modules toolchain +tracing_mode Trufflehog +URL +url +vault_lock_type +vault_name +[Uu][Uu][Ii][Dd]