diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 2b1266c3f4..0000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM mcr.microsoft.com/devcontainers/java:25 - -# Install essential tools -RUN apt-get update && \ - apt-get install -y vim curl wget git jq && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Install sbt -RUN curl -fL "https://github.com/sbt/sbt/releases/download/v1.12.0/sbt-1.12.0.tgz" | tar xz -C /tmp && \ - mv /tmp/sbt/bin/sbt /usr/local/bin/ && \ - mv /tmp/sbt/bin/sbt-launch.jar /usr/local/bin/ && \ - chmod +x /usr/local/bin/sbt && \ - rm -rf /tmp/sbt \ No newline at end of file diff --git a/.devcontainer/Dockerfile.app b/.devcontainer/Dockerfile.app new file mode 100644 index 0000000000..888de2b479 --- /dev/null +++ b/.devcontainer/Dockerfile.app @@ -0,0 +1,63 @@ +FROM mcr.microsoft.com/devcontainers/base:debian + +# ca-certificates, curl, git are already in the devcontainers base image. +# fd-find: fast file finder (aliased to fd below) +# fzf: fuzzy finder for files and command history +# gh: GitHub CLI +# gosu: drops privileges in the entrypoint +# jq: JSON processor +# ripgrep: fast recursive grep (rg) +# tmux: terminal multiplexer +# vim: text editor +RUN apt-get update \ + && apt-get install -y --no-install-recommends fd-find fzf gh gosu jq ripgrep tmux vim \ + && ln -s $(which fdfind) /usr/local/bin/fd \ + && rm -rf /var/lib/apt/lists/* + +COPY --chmod=755 sandcat/scripts/app-init.sh /usr/local/bin/app-init.sh +COPY --chmod=755 sandcat/scripts/app-user-init.sh /usr/local/bin/app-user-init.sh +COPY --chown=vscode:vscode sandcat/tmux.conf /home/vscode/.tmux.conf + +USER vscode + +# Install Claude Code (native binary — no Node.js required). +RUN curl -fsSL https://claude.ai/install.sh | bash + +# Install mise (SDK manager) for language toolchains. +RUN curl https://mise.run | sh +# Make mise available in login shells (su - vscode) and Docker CMD/RUN. +RUN echo 'export PATH="/home/vscode/.local/bin:/home/vscode/.local/share/mise/shims:$PATH"' >> /home/vscode/.profile +ENV PATH="/home/vscode/.local/bin:/home/vscode/.local/share/mise/shims:$PATH" + +# Development stacks (managed by sandcat init --stacks): +RUN mise use -g java@lts +RUN mise use -g scala@latest && mise use -g sbt@latest +# END STACKS + +# If Java was installed above, bake JAVA_HOME and JAVA_TOOL_OPTIONS into +# .bashrc so VS Code's env probe picks them up before the entrypoint runs. +# Without JAVA_HOME, JVM tooling like Metals fails to find the JDK. +# JAVA_TOOL_OPTIONS points to a trust store copy that the entrypoint will +# populate with the mitmproxy CA at runtime; until then it holds the default +# Java CAs (harmless — equivalent to not setting it at all). +# A version-independent symlink is used so .bashrc doesn't need updating +# when the Java version changes — only the symlink target is updated. +RUN if MISE_JAVA=$(mise where java 2>/dev/null); then \ + dir="$HOME/.local/share/sandcat"; mkdir -p "$dir"; \ + ln -sfn "$MISE_JAVA" "$dir/java-home"; \ + cp "$MISE_JAVA/lib/security/cacerts" "$dir/cacerts" 2>/dev/null || true; \ + { echo ''; \ + echo '# sandcat-java-env'; \ + echo '[ -L "$HOME/.local/share/sandcat/java-home" ] && export JAVA_HOME="$HOME/.local/share/sandcat/java-home"'; \ + echo '[ -f "$HOME/.local/share/sandcat/cacerts" ] && export JAVA_TOOL_OPTIONS="-Djavax.net.ssl.trustStore=$HOME/.local/share/sandcat/cacerts -Djavax.net.ssl.trustStorePassword=changeit"'; \ + } >> "$HOME/.bashrc"; \ + fi + +# Pre-create ~/.claude so Docker bind-mounts (CLAUDE.md, agents/, commands/) +# don't cause it to be created as root-owned. +RUN mkdir -p /home/vscode/.claude + +RUN echo 'alias claude-yolo="claude --dangerously-skip-permissions"' >> /home/vscode/.bashrc + +USER root +ENTRYPOINT ["/usr/local/bin/app-init.sh"] diff --git a/.devcontainer/compose-all.yml b/.devcontainer/compose-all.yml new file mode 100644 index 0000000000..268259db60 --- /dev/null +++ b/.devcontainer/compose-all.yml @@ -0,0 +1,49 @@ +name: tapir-sandbox +include: + - path: sandcat/compose-proxy.yml +services: + agent: + build: + context: . + dockerfile: Dockerfile.app + # Share wg-client's network namespace so all traffic goes through its + # WireGuard tunnel. The app container has no NET_ADMIN capability, + # so processes inside cannot modify routing, iptables, or the tunnel. + network_mode: "service:wg-client" + volumes: + # Named volume for the home directory so Claude Code auth state, + # shell history, and other user-level config persist across rebuilds. + - app-home:/home/vscode + # Shared volume from mitmproxy containing the CA cert and + # sandcat.env (env vars + secret placeholders). Read-only — app + # containers should never write to this. + - mitmproxy-config:/mitmproxy-config:ro + # Mount the project's code + - ..:/workspaces/tapir-sandbox + # Read-only devcontainer directory + - ../.devcontainer:/workspaces/tapir-sandbox/.devcontainer:ro + # Read-only settings directory + - ../.sandcat:/workspaces/tapir-sandbox/.sandcat:ro + # Host Claude config (optional) + - ${HOME}/.claude/CLAUDE.md:/home/vscode/.claude/CLAUDE.md:ro + - ${HOME}/.claude/agents:/home/vscode/.claude/agents:ro + - ${HOME}/.claude/commands:/home/vscode/.claude/commands:ro + # Read-only Git directory + # - ../.git:/workspace/.git:ro + # Read-only IntelliJ IDEA project directory + # - ../.idea:/workspace/.idea:ro + command: sleep infinity + environment: + - CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 + depends_on: + wg-client: + condition: service_healthy + working_dir: /workspaces/tapir-sandbox + mitmproxy: + volumes: + - ../.sandcat:/config/project:ro + # Project-level settings (.sandcat/ directory). If the directory does + # not exist on the host, Docker creates an empty one and the addon + # simply finds no files — no error. +volumes: + app-home: diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 62e2c389ed..df8c932bf7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,46 +1,53 @@ { - "name": "tapir", - "build": { - "dockerfile": "Dockerfile" - }, - "features": { - "ghcr.io/devcontainers/features/node:1": { - "version": "24" - }, - "ghcr.io/anthropics/devcontainer-features/claude-code:1.0": {}, - "ghcr.io/devcontainers/features/github-cli:1": { - "installDirectlyFromGitHubRelease": true, - "version": "latest" - } - }, - "customizations": { - "vscode": { - "extensions": [ - "anthropic.claude-code", - "scalameta.metals", - "scala-lang.scala", - "github.vscode-pull-request-github" - ], - "settings": { - "terminal.integrated.defaultProfile.linux": "bash", - "claudeCode.allowDangerouslySkipPermissions": true, - "claudeCode.initialPermissionMode": "bypassPermissions", - "claudeCode.selectedModel": "opus" - } - } - }, - "postCreateCommand": "bash .devcontainer/post-create.sh", - "postStartCommand": "bash .devcontainer/post-start.sh", - "mounts": [ - "type=volume,source=tapir-vscode-home,target=/home/vscode", - "type=bind,source=${localEnv:HOME}/.claude/CLAUDE.md,target=/home/vscode/.claude/CLAUDE.md,readonly", - "type=bind,source=${localEnv:HOME}/.claude/commands,target=/home/vscode/.claude/commands,readonly", - "type=bind,source=${localEnv:HOME}/.claude/agents,target=/home/vscode/.claude/agents,readonly" - ], - "containerEnv": { - "SSH_AUTH_SOCK": "" - }, - "runArgs": [ - "--env-file=${localWorkspaceFolder}/../dev-container-oss.env" - ] -} \ No newline at end of file + "name": "tapir-sandbox", + "dockerComposeFile": "compose-all.yml", + "service": "agent", + "workspaceFolder": "/workspaces/tapir-sandbox", + // Remove credential sockets that VS Code forwards into the container + // (SSH agent, git credential helper). Clearing env vars alone only + // hides the paths — the socket files in /tmp can still be discovered + // by scanning. The post-start script deletes them as best-effort + // hardening. + "postStartCommand": "bash /workspaces/tapir-sandbox/.devcontainer/sandcat/scripts/app-post-start.sh", + // VS Code forwards host credential sockets into containers by default. + // Clear them so container code cannot use host SSH keys, GPG signing, + // or VS Code's git credential helpers to authenticate as the host user. + "remoteEnv": { + "SSH_AUTH_SOCK": "", + "GPG_AGENT_INFO": "", + "GIT_ASKPASS": "" + }, + "customizations": { + "vscode": { + "extensions": [ + "anthropic.claude-code", + "github.vscode-pull-request-github", + "redhat.java", + "scalameta.metals", + ], + "settings": { + // Prevent VS Code from copying host .gitconfig into the + // container, which can leak credential helpers and signing + // key references. + "dev.containers.copyGitConfig": false, + // Prompt before trusting workspace settings, which container + // code could modify via the bind-mounted project folder. + "security.workspace.trust.enabled": true, + // Block container extensions from opening a host-side + // terminal, which would bypass the WireGuard tunnel entirely. + // For maximum protection, also set this in your host user + // settings (workspace settings could theoretically override it). + "terminal.integrated.allowLocalTerminal": false, + // Sandcat provides the security boundary (network isolation, + // secret substitution, iptables kill-switch), so permission + // prompts inside the container add friction without meaningful + // security benefit. Remove these if you prefer interactive + // permission approval. + "claudeCode.allowDangerouslySkipPermissions": true, + "claudeCode.initialPermissionMode": "bypassPermissions", + // Optional: override the default Claude model. + "claudeCode.selectedModel": "opus" + } + } + } +} diff --git a/.devcontainer/post-create.sh b/.devcontainer/post-create.sh deleted file mode 100644 index e99542ec06..0000000000 --- a/.devcontainer/post-create.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -e - -echo "Running post-create setup..." - -# Override git config with environment variables if provided -if [ -n "$GIT_USER_NAME" ]; then - git config --global user.name "$GIT_USER_NAME" -fi -if [ -n "$GIT_USER_EMAIL" ]; then - git config --global user.email "$GIT_USER_EMAIL" -fi - -# Add claude-yolo alias -echo 'alias claude-yolo="claude --dangerously-skip-permissions"' >> ~/.bashrc - -# Ensure mounted directories exist & fix ownership (Docker volumes are created as root) -mkdir -p /home/vscode/.claude -sudo chown -R vscode:vscode /home/vscode/.claude 2>/dev/null || true - -echo "Post-create setup complete." diff --git a/.devcontainer/post-start.sh b/.devcontainer/post-start.sh deleted file mode 100644 index 0d2ccc77f5..0000000000 --- a/.devcontainer/post-start.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -e - -sudo env PATH="$PATH" claude update diff --git a/.devcontainer/sandcat/Dockerfile.wg-client b/.devcontainer/sandcat/Dockerfile.wg-client new file mode 100644 index 0000000000..81434bc85b --- /dev/null +++ b/.devcontainer/sandcat/Dockerfile.wg-client @@ -0,0 +1,17 @@ +FROM debian:trixie-slim + +# Dependencies for the WireGuard tunnel and iptables kill switch: +# wireguard-tools - `wg` command to configure WireGuard interfaces +# iproute2 - `ip` command for interface, address, route, and rule management +# iptables - firewall rules used as a kill switch (blocks traffic if tunnel drops) +# jq - parse mitmproxy's wireguard.conf JSON to extract key pairs +# openresolv - `resolvconf` command to configure DNS through the tunnel +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + wireguard-tools iproute2 iptables jq openresolv \ + && rm -rf /var/lib/apt/lists/* + +COPY scripts/wg-client-init.sh /usr/local/bin/wg-client-init.sh +RUN chmod +x /usr/local/bin/wg-client-init.sh + +ENTRYPOINT ["/usr/local/bin/wg-client-init.sh"] diff --git a/.devcontainer/sandcat/compose-proxy.yml b/.devcontainer/sandcat/compose-proxy.yml new file mode 100644 index 0000000000..c497232291 --- /dev/null +++ b/.devcontainer/sandcat/compose-proxy.yml @@ -0,0 +1,40 @@ +services: + # Dedicated networking container that manages the WireGuard tunnel to + # mitmproxy. Only this container has NET_ADMIN — no user code runs here. + wg-client: + build: + context: . + dockerfile: Dockerfile.wg-client + volumes: + - mitmproxy-config:/mitmproxy-config:ro + cap_add: + - NET_ADMIN # required for WireGuard interface and iptables setup + sysctls: + - net.ipv4.conf.all.src_valid_mark=1 # required by WireGuard fwmark routing; can't be set inside the container (/proc/sys is read-only) + command: sleep infinity + depends_on: + mitmproxy: + condition: service_healthy + healthcheck: + test: ["CMD", "test", "-f", "/tmp/wg-ready"] + interval: 2s + timeout: 2s + retries: 15 + mitmproxy: + image: ghcr.io/virtuslab/sandcat-mitmproxy-op:latest + command: mitmweb --mode wireguard --web-host 0.0.0.0 --set web_password=mitmproxy -s /scripts/mitmproxy_addon.py + ports: + - "8081" # mitmweb UI; host port assigned dynamically to avoid conflicts + volumes: + - mitmproxy-config:/home/mitmproxy/.mitmproxy + - ./scripts/mitmproxy_addon.py:/scripts/mitmproxy_addon.py:ro + - ~/.config/sandcat/settings.json:/config/settings.json:ro + healthcheck: + test: ["CMD", "test", "-f", "/home/mitmproxy/.mitmproxy/wireguard.conf"] + interval: 2s + timeout: 2s + retries: 15 + environment: + - OP_SERVICE_ACCOUNT_TOKEN +volumes: + mitmproxy-config: diff --git a/.devcontainer/sandcat/scripts/__pycache__/mitmproxy_addon.cpython-314.pyc b/.devcontainer/sandcat/scripts/__pycache__/mitmproxy_addon.cpython-314.pyc new file mode 100644 index 0000000000..0c920dbe82 Binary files /dev/null and b/.devcontainer/sandcat/scripts/__pycache__/mitmproxy_addon.cpython-314.pyc differ diff --git a/.devcontainer/sandcat/scripts/app-init.sh b/.devcontainer/sandcat/scripts/app-init.sh new file mode 100644 index 0000000000..af52805751 --- /dev/null +++ b/.devcontainer/sandcat/scripts/app-init.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# +# Entrypoint for containers that share the wg-client's network namespace. +# Installs the mitmproxy CA cert, disables commit signing, loads env vars +# and secret placeholders from sandcat.env, runs vscode-user setup (git +# identity, Java trust store, Claude Code update), then drops to vscode +# and exec's the container's main command. +# +set -e + +CA_CERT="/mitmproxy-config/mitmproxy-ca-cert.pem" + +# The CA cert is guaranteed to exist: app depends_on wg-client (healthy), +# which depends_on mitmproxy (healthy), whose healthcheck requires the +# WireGuard config — generated after the CA. +if [ ! -f "$CA_CERT" ]; then + echo "mitmproxy CA cert not found at $CA_CERT" >&2 + exit 1 +fi + +cp "$CA_CERT" /usr/local/share/ca-certificates/mitmproxy.crt +update-ca-certificates + +# Node.js ignores the system trust store and bundles its own CA certs. +# Point it at the mitmproxy CA so TLS verification works for Node-based +# tools (e.g. Anthropic SDK). +export NODE_EXTRA_CA_CERTS="$CA_CERT" +echo "export NODE_EXTRA_CA_CERTS=\"$CA_CERT\"" > /etc/profile.d/sandcat-node-ca.sh + +# GPG keys are not forwarded into the container (credential isolation), +# so commit signing would always fail. Git env vars have the highest +# precedence, overriding system/global/local/worktree config files. +export GIT_CONFIG_COUNT=1 +export GIT_CONFIG_KEY_0="commit.gpgsign" +export GIT_CONFIG_VALUE_0="false" +cat > /etc/profile.d/sandcat-git.sh << 'GITEOF' +export GIT_CONFIG_COUNT=1 +export GIT_CONFIG_KEY_0="commit.gpgsign" +export GIT_CONFIG_VALUE_0="false" +GITEOF + +# Source env vars and secret placeholders (if available) +SANDCAT_ENV="/mitmproxy-config/sandcat.env" +if [ -f "$SANDCAT_ENV" ]; then + . "$SANDCAT_ENV" + # Make vars available to new shells (e.g. VS Code terminals in dev + # containers) that won't inherit the entrypoint's environment. + cp "$SANDCAT_ENV" /etc/profile.d/sandcat-env.sh + count=$(grep -c '^export ' "$SANDCAT_ENV" 2>/dev/null || echo 0) + echo "Loaded $count env var(s) from $SANDCAT_ENV" + grep '^export ' "$SANDCAT_ENV" | sed 's/=.*//' | sed 's/^export / /' +else + echo "No $SANDCAT_ENV found — env vars and secret substitution disabled" +fi + +# Run vscode-user tasks: git identity, Java trust store, Claude Code update. +su - vscode -c /usr/local/bin/app-user-init.sh + +# Source all sandcat profile.d scripts from /etc/bash.bashrc so env vars +# are available in non-login shells (e.g. VS Code integrated terminals). +# Guard with a marker to avoid duplicating on container restart. +BASHRC_MARKER="# sandcat-profile-source" +if ! grep -q "$BASHRC_MARKER" /etc/bash.bashrc 2>/dev/null; then + cat >> /etc/bash.bashrc << 'BASHRC_EOF' + +# sandcat-profile-source +for _f in /etc/profile.d/sandcat-*.sh; do + [ -r "$_f" ] && . "$_f" +done +unset _f +BASHRC_EOF +fi + +exec gosu vscode "$@" diff --git a/.devcontainer/sandcat/scripts/app-post-start.sh b/.devcontainer/sandcat/scripts/app-post-start.sh new file mode 100755 index 0000000000..e5a6098b75 --- /dev/null +++ b/.devcontainer/sandcat/scripts/app-post-start.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Post-start hook for VS Code dev containers. +# Runs after VS Code connects and sets up its remote server. +# +# VS Code forwards host credential sockets into the container +# (SSH agent, git credential helper). Clearing the env vars via +# remoteEnv hides the paths, but the socket files still exist in +# /tmp and can be discovered by scanning. Remove them as a +# best-effort hardening measure. +# +# This is not bulletproof — VS Code could recreate sockets on +# reconnect, or change the naming pattern in future versions. +# +set -e + +remove_sockets() { + local pattern="$1" + local label="$2" + local found=0 + + for sock in $pattern; do + [ -e "$sock" ] || continue + if rm -f "$sock" 2>/dev/null; then + echo "sandcat: removed forwarded $label socket: $sock" + else + echo "sandcat: warning: could not remove $sock (owned by root?)" >&2 + fi + found=1 + done + + if [ "$found" -eq 0 ]; then + echo "sandcat: no $label socket found in /tmp (pattern: $pattern)" >&2 + fi +} + +remove_sockets "/tmp/vscode-ssh-auth-*.sock" "SSH agent" +remove_sockets "/tmp/vscode-git-*.sock" "git credential" diff --git a/.devcontainer/sandcat/scripts/app-user-init.sh b/.devcontainer/sandcat/scripts/app-user-init.sh new file mode 100644 index 0000000000..5aea7bba8c --- /dev/null +++ b/.devcontainer/sandcat/scripts/app-user-init.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# +# vscode-user tasks run via su from app-init.sh. +# /etc/profile.d/ is sourced by the login shell, providing +# GIT_USER_NAME, GIT_USER_EMAIL, and NODE_EXTRA_CA_CERTS. +# +set -e + +if [ -n "${GIT_USER_NAME:-}" ]; then + git config --global user.name "$GIT_USER_NAME" +fi +if [ -n "${GIT_USER_EMAIL:-}" ]; then + git config --global user.email "$GIT_USER_EMAIL" +fi + +# GPG keys are not forwarded into the container (credential isolation), +# so commit signing would always fail. Disable it via global config as a +# baseline; app-init.sh also sets GIT_CONFIG env vars which override even +# repo-level config. +git config --global commit.gpgsign false + +# SSH keys are not available in the container (SSH_AUTH_SOCK is cleared +# and credential sockets are removed), so rewrite git SSH URLs to HTTPS. +# Sandcat's secret substitution handles GitHub token authentication over +# HTTPS transparently. +git config --global --replace-all url."https://github.com/".insteadOf "git@github.com:" "git@github.com:" +git config --global --replace-all url."https://github.com/".insteadOf "ssh://git@github.com/" "ssh://git@github.com/" + +# If Java is installed (via mise), import the mitmproxy CA into Java's trust +# store. Java uses its own cacerts and ignores the system CA store. +CA_CERT="/mitmproxy-config/mitmproxy-ca-cert.pem" + +# Ensure mise is on PATH. `su - vscode` resets the environment and sources +# only the first of ~/.bash_profile, ~/.bash_login, ~/.profile. If +# ~/.bash_profile exists (e.g. created by VS Code on a persistent volume), +# ~/.profile — where the Dockerfile adds mise — is never read. +if ! command -v mise >/dev/null 2>&1; then + export PATH="/home/vscode/.local/bin:/home/vscode/.local/share/mise/shims:$PATH" +fi + +MISE_JAVA_HOME="$(mise where java 2>/dev/null || true)" +if [ -n "$MISE_JAVA_HOME" ] && [ -f "$CA_CERT" ]; then + # Create a version-independent symlink so JAVA_HOME doesn't depend + # on the mise Java version. + SANDCAT_DIR="$HOME/.local/share/sandcat" + mkdir -p "$SANDCAT_DIR" + ln -sfn "$MISE_JAVA_HOME" "$SANDCAT_DIR/java-home" + + JAVA_CACERTS="$MISE_JAVA_HOME/lib/security/cacerts" + SANDCAT_CACERTS="$SANDCAT_DIR/cacerts" + if [ -f "$JAVA_CACERTS" ]; then + # Import on first start; on restart the alias already exists (harmless failure). + if keytool -importcert -trustcacerts -noprompt \ + -alias mitmproxy \ + -file "$CA_CERT" \ + -keystore "$JAVA_CACERTS" \ + -storepass changeit >/dev/null 2>&1; then + echo "Imported mitmproxy CA into Java trust store" + fi + + # Create/update a standalone copy of the trust store (with the mitmproxy + # CA) so JAVA_TOOL_OPTIONS can point all JVMs to it — including + # ones downloaded later by tools like Coursier (Scala Metals). + cp "$JAVA_CACERTS" "$SANDCAT_CACERTS" + + # scala-cli is a GraalVM native binary that ignores JAVA_TOOL_OPTIONS + # and JAVA_HOME for trust store resolution. Pre-create its config + # so the trust store is used even if scala-cli isn't installed yet + # (e.g. when Metals downloads it later). + SCALACLI_CONFIG="$HOME/.local/share/scalacli/secrets/config.json" + mkdir -p "$(dirname "$SCALACLI_CONFIG")" + cat > "$SCALACLI_CONFIG" << EOFJSON +{ + "java": { + "properties": ["javax.net.ssl.trustStore=$SANDCAT_CACERTS","javax.net.ssl.trustStorePassword=changeit"] + } +} +EOFJSON + + fi +fi + +# Seed the onboarding flag so Claude Code uses the API key without interactive +# setup. Only written when the user configured an ANTHROPIC_API_KEY secret. +if [ -n "${ANTHROPIC_API_KEY:-}" ]; then + echo '{"hasCompletedOnboarding":true}' > "$HOME/.claude.json" +fi + +# Claude Code is installed at build time (Dockerfile.app). +# Background update so it doesn't block startup. +(claude install >/dev/null 2>&1 &) + diff --git a/.devcontainer/sandcat/scripts/mitmproxy_addon.py b/.devcontainer/sandcat/scripts/mitmproxy_addon.py new file mode 100644 index 0000000000..0e401b9cd3 --- /dev/null +++ b/.devcontainer/sandcat/scripts/mitmproxy_addon.py @@ -0,0 +1,271 @@ +""" +mitmproxy addon: network access rules and secret substitution. + +Loaded via: mitmweb -s /scripts/mitmproxy_addon.py + +On startup, reads settings from up to three layers (lowest to highest +precedence): user (~/.config/sandcat/settings.json), project +(.sandcat/settings.json), and local (.sandcat/settings.local.json). +Env vars and secrets are merged (higher precedence wins on conflict). +Network rules are concatenated (highest precedence first). + +Network rules are evaluated top-to-bottom, first match wins, default deny. +Secret placeholders are replaced with real values only for allowed hosts. +""" + +import json +import logging +import os +import re +import subprocess +import sys +from fnmatch import fnmatch + +from mitmproxy import ctx, http, dns + +_VALID_ENV_NAME = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$") + +# Settings layers, lowest to highest precedence. +SETTINGS_PATHS = [ + "/config/settings.json", # user: ~/.config/sandcat/settings.json + "/config/project/settings.json", # project: .sandcat/settings.json + "/config/project/settings.local.json", # local: .sandcat/settings.local.json +] +SANDCAT_ENV_PATH = "/home/mitmproxy/.mitmproxy/sandcat.env" + +logger = logging.getLogger(__name__) + +class SandcatAddon: + def __init__(self): + self.secrets: dict[str, dict] = {} # name -> {value, hosts, placeholder} + self.network_rules: list[dict] = [] + self.env: dict[str, str] = {} # non-secret env vars + + def load(self, loader): + layers = [] + for path in SETTINGS_PATHS: + if os.path.isfile(path): + with open(path) as f: + layers.append(json.load(f)) + + if not layers: + logger.info("No settings files found — addon disabled") + return + + merged = self._merge_settings(layers) + + self._configure_op_token(merged.get("op_service_account_token")) + self.env = merged["env"] + self._load_secrets(merged["secrets"]) + self._load_network_rules(merged["network"]) + self._write_placeholders_env() + + ctx.log.info( + f"Loaded {len(self.env)} env var(s) and {len(self.secrets)} secret(s), wrote {SANDCAT_ENV_PATH}" + ) + + @staticmethod + def _configure_op_token(token: str | None): + """Set OP_SERVICE_ACCOUNT_TOKEN from settings if not already in the environment.""" + if token and "OP_SERVICE_ACCOUNT_TOKEN" not in os.environ: + os.environ["OP_SERVICE_ACCOUNT_TOKEN"] = token + + @staticmethod + def _merge_settings(layers: list[dict]) -> dict: + """Merge settings from multiple layers (lowest to highest precedence). + + - env: dict merge, higher precedence overwrites. + - secrets: dict merge, higher precedence overwrites. + - network: concatenated, highest precedence first. + - op_service_account_token: highest precedence non-empty value wins. + """ + env: dict[str, str] = {} + secrets: dict[str, dict] = {} + network: list[dict] = [] + op_token: str | None = None + + for layer in layers: + env.update(layer.get("env", {})) + secrets.update(layer.get("secrets", {})) + layer_token = layer.get("op_service_account_token") + if layer_token: + op_token = layer_token + + # Network rules: highest-precedence layer's rules come first. + for layer in reversed(layers): + network.extend(layer.get("network", [])) + + return {"env": env, "secrets": secrets, "network": network, + "op_service_account_token": op_token} + + def _load_secrets(self, raw_secrets: dict): + for name, entry in raw_secrets.items(): + placeholder = f"SANDCAT_PLACEHOLDER_{name}" + try: + value = self._resolve_secret_value(name, entry) + except (RuntimeError, ValueError) as e: + ctx.log.warn(str(e)) + print(f"WARNING: {e}", file=sys.stderr) + value = "" + self.secrets[name] = { + "value": value, + "hosts": entry.get("hosts", []), + "placeholder": placeholder, + } + + @staticmethod + def _resolve_secret_value(name: str, entry: dict) -> str: + """Resolve a secret value from either a plain 'value' or a 1Password 'op' reference.""" + has_value = "value" in entry + has_op = "op" in entry + + if has_value and has_op: + raise ValueError( + f"Secret {name!r}: specify either 'value' or 'op', not both" + ) + if not has_value and not has_op: + raise ValueError( + f"Secret {name!r}: must specify either 'value' or 'op'" + ) + + if has_value: + return entry["value"] + + op_ref = entry["op"] + if not op_ref.startswith("op://"): + raise ValueError( + f"Secret {name!r}: 'op' value must start with 'op://', got {op_ref!r}" + ) + + try: + result = subprocess.run( + ["op", "read", op_ref], + capture_output=True, text=True, timeout=30, + ) + except FileNotFoundError: + raise RuntimeError( + f"Secret {name!r}: 'op' CLI not found. " + "Install 1Password CLI to use op:// references." + ) from None + + if result.returncode != 0: + stderr = result.stderr.strip() + raise RuntimeError( + f"Secret {name!r}: 'op read' failed: {stderr}" + ) + + return result.stdout.strip() + + def _load_network_rules(self, raw_rules: list): + self.network_rules = raw_rules + ctx.log.info(f"Loaded {len(self.network_rules)} network rule(s)") + + @staticmethod + def _shell_escape(value: str) -> str: + """Escape a string for safe inclusion inside double quotes in shell.""" + return (value + .replace("\\", "\\\\") + .replace('"', '\\"') + .replace("$", "\\$") + .replace("`", "\\`") + .replace("\n", "\\n")) + + @staticmethod + def _validate_env_name(name: str): + """Raise ValueError if name is not a valid shell variable name.""" + if not _VALID_ENV_NAME.match(name): + raise ValueError(f"Invalid env var name: {name!r}") + + def _write_placeholders_env(self): + lines = [] + # Non-secret env vars (e.g. git identity) — passed through as-is. + for name, value in self.env.items(): + self._validate_env_name(name) + lines.append(f'export {name}="{self._shell_escape(value)}"') + for name, entry in self.secrets.items(): + self._validate_env_name(name) + lines.append(f'export {name}="{self._shell_escape(entry["placeholder"])}"') + with open(SANDCAT_ENV_PATH, "w") as f: + f.write("\n".join(lines) + "\n") + + def _is_request_allowed(self, method: str | None, host: str) -> bool: + host = host.lower().rstrip(".") + for rule in self.network_rules: + if not fnmatch(host, rule["host"].lower()): + continue + rule_method = rule.get("method") + if rule_method is not None and method is not None and rule_method.upper() != method.upper(): + continue + return rule["action"] == "allow" + return False # default deny + + def _substitute_secrets(self, flow: http.HTTPFlow): + host = flow.request.pretty_host.lower() + + for name, entry in self.secrets.items(): + placeholder = entry["placeholder"] + value = entry["value"] + allowed_hosts = entry["hosts"] + + present = ( + placeholder in flow.request.url + or placeholder in str(flow.request.headers) + or ( + flow.request.content + and placeholder.encode() in flow.request.content + ) + ) + + if not present: + continue + + # Leak detection: block if secret going to disallowed host + if not any(fnmatch(host, pattern.lower()) for pattern in allowed_hosts): + flow.response = http.Response.make( + 403, + f"Blocked: secret {name!r} not allowed for host {host!r}\n".encode(), + {"Content-Type": "text/plain"}, + ) + ctx.log.warn( + f"Blocked secret {name!r} leak to disallowed host {host!r}" + ) + return + + if placeholder in flow.request.url: + flow.request.url = flow.request.url.replace(placeholder, value) + for k, v in flow.request.headers.items(): + if placeholder in v: + flow.request.headers[k] = v.replace(placeholder, value) + if flow.request.content and placeholder.encode() in flow.request.content: + flow.request.content = flow.request.content.replace( + placeholder.encode(), value.encode() + ) + + def request(self, flow: http.HTTPFlow): + method = flow.request.method + host = flow.request.pretty_host + + if not self._is_request_allowed(method, host): + flow.response = http.Response.make( + 403, + f"Blocked by network policy: {method} {host}\n".encode(), + {"Content-Type": "text/plain"}, + ) + ctx.log.warn(f"Network deny: {method} {host}") + return + + self._substitute_secrets(flow) + + def dns_request(self, flow: dns.DNSFlow): + question = flow.request.question + if question is None: + flow.response = flow.request.fail(dns.response_codes.REFUSED) + return + + host = question.name + if not self._is_request_allowed(None, host): + flow.response = flow.request.fail(dns.response_codes.REFUSED) + ctx.log.warn(f"DNS deny: {host}") + + +addons = [SandcatAddon()] diff --git a/.devcontainer/sandcat/scripts/wg-client-init.sh b/.devcontainer/sandcat/scripts/wg-client-init.sh new file mode 100644 index 0000000000..2361eabce7 --- /dev/null +++ b/.devcontainer/sandcat/scripts/wg-client-init.sh @@ -0,0 +1,133 @@ +#!/bin/bash +# +# Entrypoint for the wg-client container. Sets up a WireGuard tunnel to the +# mitmproxy container and configures iptables to enforce it. Other containers +# share this network namespace via network_mode: "service:wg-client" and +# inherit the tunnel without needing NET_ADMIN themselves. +# +# Expects: +# - /mitmproxy-config volume mounted (shared with the mitmproxy container) +# - NET_ADMIN capability (for creating the WireGuard interface and iptables) +# - net.ipv4.conf.all.src_valid_mark=1 sysctl (set via docker-compose) +# +set -e + +WG_JSON="/mitmproxy-config/wireguard.conf" + +# ── Wait for mitmproxy to be ready ────────────────────────────────────────── +# The mitmproxy container generates wireguard.conf (key pairs) on startup. +# The docker-compose healthcheck gates us, but wait just in case. +elapsed=0 +while [ ! -f "$WG_JSON" ]; do + if [ "$elapsed" -ge 60 ]; then + echo "Timed out waiting for mitmproxy wireguard.conf" >&2 + exit 1 + fi + sleep 1 + elapsed=$((elapsed + 1)) +done + +# ── Derive WireGuard keys ─────────────────────────────────────────────────── +# mitmproxy stores its WireGuard key pairs as JSON: +# {"server_key": "", "client_key": ""} +# We need the client's private key and the server's public key (derived from +# the server's private key). +client_private_key=$(jq -r .client_key "$WG_JSON") +server_private_key=$(jq -r .server_key "$WG_JSON") +server_public_key=$(echo "$server_private_key" | wg pubkey) + +# Resolve the mitmproxy endpoint IP before setting up the tunnel, since DNS +# won't be available through the normal path after routing is configured. +mitmproxy_ip=$(getent hosts mitmproxy | awk '{print $1}') + +# ── Create WireGuard interface ────────────────────────────────────────────── +# We set up WireGuard manually instead of using wg-quick because wg-quick +# calls `sysctl -w net.ipv4.conf.all.src_valid_mark=1` which fails in Docker +# (/proc/sys is read-only). The equivalent sysctl is pre-set via the +# docker-compose `sysctls` option. +ip link add wg0 type wireguard + +wg set wg0 \ + private-key <(echo "$client_private_key") \ + fwmark 51820 \ + peer "$server_public_key" \ + endpoint mitmproxy:51820 \ + allowed-ips 0.0.0.0/0,::/0 + +ip address add 10.0.0.1/32 dev wg0 +ip link set mtu 1420 up dev wg0 + +# ── Policy routing ────────────────────────────────────────────────────────── +# All traffic is routed through wg0 via a custom routing table (51820). +# WireGuard marks its own encapsulated UDP packets with fwmark 51820, which +# exempts them from the custom table — so they use the normal default route to +# reach the mitmproxy endpoint. The suppress_prefixlength rule ensures that +# local/link-local routes in the main table still work (e.g. Docker DNS, +# container-to-container traffic). +ip -4 route add 0.0.0.0/0 dev wg0 table 51820 +ip -4 rule add not fwmark 51820 table 51820 +ip -4 rule add table main suppress_prefixlength 0 +ip -6 route add ::/0 dev wg0 table 51820 +ip -6 rule add not fwmark 51820 table 51820 +ip -6 rule add table main suppress_prefixlength 0 + +# ── Override DNS ────────────────────────────────────────────────────────── +# Docker's embedded DNS at 127.0.0.11 resolves queries on the host, +# bypassing the WireGuard tunnel. Point resolv.conf at an external +# nameserver so DNS goes through wg0 and is intercepted by mitmproxy. +printf "nameserver 1.1.1.1\nnameserver 8.8.8.8\n" > /etc/resolv.conf + +# ── Firewall kill switch ──────────────────────────────────────────────────── +# iptables rules that enforce the tunnel. Without these, traffic could leak +# via eth0 if the WireGuard interface goes down, or a process could reach +# arbitrary IPs on the Docker network subnet directly (bypassing mitmproxy). +# +# Insert rules (-I) are evaluated first (in reverse insertion order): +# - All traffic on the WireGuard interface (wg0) and loopback +# - WireGuard's own UDP encapsulation to the mitmproxy endpoint (fwmark 51820) +# - Established/related return traffic on eth0 (for the WG handshake) +# +# Append rules (-A) handle remaining eth0 traffic in order: +# 1. DROP direct access to the mitmproxy container (prevent proxy bypass) +# 2. DROP access to the Docker gateway (host machine) +# 3. ACCEPT traffic to other containers on the Docker network +# 4. DROP everything else (external IPs) +# +# These rules cannot be modified by containers sharing this network namespace +# via network_mode, because those containers don't have NET_ADMIN. +docker_network=$(ip -4 route show dev eth0 proto kernel | awk '{print $1}') +docker_gateway=$(ip -4 route show default dev eth0 | awk '{print $3}') + +if [ -z "$docker_network" ] || [ -z "$docker_gateway" ]; then + echo "Failed to determine Docker IPv4 network/gateway for eth0; refusing to configure iptables." >&2 + exit 1 +fi + +iptables -I OUTPUT -o wg0 -j ACCEPT +iptables -I OUTPUT -o lo -j ACCEPT +iptables -I OUTPUT -d "$mitmproxy_ip" -p udp --dport 51820 -m mark --mark 51820 -j ACCEPT +iptables -I OUTPUT -o eth0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + +iptables -A OUTPUT -o eth0 -d "$mitmproxy_ip" -j DROP +iptables -A OUTPUT -o eth0 -d "$docker_gateway" -j DROP +iptables -A OUTPUT -o eth0 -d "$docker_network" -j ACCEPT +iptables -A OUTPUT -o eth0 -j DROP + +ip6tables -I OUTPUT -o wg0 -j ACCEPT +ip6tables -I OUTPUT -o lo -j ACCEPT +ip6tables -I OUTPUT -o eth0 -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + +docker_network_v6=$(ip -6 route show dev eth0 proto kernel 2>/dev/null | awk '{print $1}') +if [ -n "$docker_network_v6" ]; then + docker_gateway_v6=$(ip -6 route show default dev eth0 2>/dev/null | awk '{print $3}') + ip6tables -A OUTPUT -o eth0 -d "$mitmproxy_ip" -j DROP + [ -n "$docker_gateway_v6" ] && ip6tables -A OUTPUT -o eth0 -d "$docker_gateway_v6" -j DROP + ip6tables -A OUTPUT -o eth0 -d "$docker_network_v6" -j ACCEPT +fi +ip6tables -A OUTPUT -o eth0 -j DROP + +# Signal readiness to containers waiting on the healthcheck. +touch /tmp/wg-ready + +# Hand off to the container's main command (e.g. "sleep infinity"). +exec "$@" diff --git a/.devcontainer/sandcat/tmux.conf b/.devcontainer/sandcat/tmux.conf new file mode 100644 index 0000000000..f42a23fcfa --- /dev/null +++ b/.devcontainer/sandcat/tmux.conf @@ -0,0 +1,19 @@ +# True color support +set -g default-terminal "tmux-256color" +set -ag terminal-overrides ",xterm-256color:RGB" + +# Mouse support (scroll, click panes, resize) +set -g mouse on + +# Reduce Esc delay (default 500ms makes vim/neovim feel laggy) +set -sg escape-time 10 + +# Scrollback buffer +set -g history-limit 50000 + +# Start numbering at 1 (0 is awkward to reach) +set -g base-index 1 +setw -g pane-base-index 1 + +# Renumber windows when one is closed +set -g renumber-windows on diff --git a/build.sbt b/build.sbt index b19fc3881f..5c5605cabf 100644 --- a/build.sbt +++ b/build.sbt @@ -1702,18 +1702,18 @@ lazy val awsLambdaCore: ProjectMatrix = (projectMatrix in file("serverless/aws/l .settings( name := "tapir-aws-lambda-core" ) - .jvmPlatform(scalaVersions = scala2And3Versions, settings = commonJvmSettings) + .jvmPlatform( + scalaVersions = scala2And3Versions, + settings = commonJvmSettings ++ Seq( + libraryDependencies += "com.amazonaws" % "aws-lambda-java-runtime-interface-client" % Versions.awsLambdaInterface + ) + ) .jsPlatform(scalaVersions = scala2Versions, settings = commonJsSettings) .dependsOn(serverCore, circeJson, tests % "test") lazy val awsLambdaZio: ProjectMatrix = (projectMatrix in file("serverless/aws/lambda-zio")) .settings(commonSettings) - .settings( - name := "tapir-aws-lambda-zio", - libraryDependencies ++= Seq( - "com.amazonaws" % "aws-lambda-java-runtime-interface-client" % Versions.awsLambdaInterface - ) - ) + .settings(name := "tapir-aws-lambda-zio") .jvmPlatform(scalaVersions = scala2And3Versions, settings = commonJvmSettings) .dependsOn(serverCore, awsLambdaCore, zio, zioHttpServer, circeJson, tests % "test") @@ -1940,8 +1940,7 @@ lazy val awsCdk: ProjectMatrix = (projectMatrix in file("serverless/aws/cdk")) "io.circe" %% "circe-yaml" % Versions.circeYaml, "io.circe" %% "circe-generic" % Versions.circe, "io.circe" %%% "circe-parser" % Versions.circe, - "org.typelevel" %%% "cats-effect" % Versions.catsEffect, - "com.amazonaws" % "aws-lambda-java-runtime-interface-client" % Versions.awsLambdaInterface + "org.typelevel" %%% "cats-effect" % Versions.catsEffect ) ) .jvmPlatform(scalaVersions = scala2And3Versions, settings = commonJvmSettings) @@ -1980,8 +1979,7 @@ lazy val awsExamples: ProjectMatrix = (projectMatrix in file("serverless/aws/exa case _ @("scala/annotation/nowarn.class" | "scala/annotation/nowarn$.class") => MergeStrategy.first case PathList(ps @ _*) if ps.last == "module-info.class" => MergeStrategy.first case x => (assembly / assemblyMergeStrategy).value(x) - }, - libraryDependencies += "com.amazonaws" % "aws-lambda-java-runtime-interface-client" % Versions.awsLambdaInterface + } ) ) .jsPlatform( diff --git a/doc/server/aws.md b/doc/server/aws.md index 4b3444c7d0..39677b36e1 100644 --- a/doc/server/aws.md +++ b/doc/server/aws.md @@ -1,44 +1,153 @@ # Running using the AWS serverless stack -Tapir server endpoints can be packaged and deployed as -an [AWS Lambda](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html) function. -This approach, known as the Fat Lambda function, utilizes a single lambda function for deploying multiple endpoints. To invoke the -function, HTTP requests can be proxied through [AWS API Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html). +Tapir server endpoints can be packaged and deployed as an [AWS +Lambda](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html) +function. You can utilize a single lambda function for multiple endpoints ("Fat +Lambda"), or deploy the same jar multiple times, so that each handles its own +endpoint or subset of endpoints. To invoke the function, HTTP requests can be +proxied through [AWS API +Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html). -To configure API Gateway routes, and the Lambda function, tools like [AWS SAM](https://aws.amazon.com/serverless/sam/), [AWS CDK](https://aws.amazon.com/cdk/) or [Terraform](https://www.terraform.io/) can be used, to automate cloud deployments. +To configure API Gateway routes, and the Lambda function, tools like [AWS +SAM](https://aws.amazon.com/serverless/sam/), [AWS +CDK](https://aws.amazon.com/cdk/) or [Terraform](https://www.terraform.io/) can +be used, to automate cloud deployments. -For an overview of how this works in more detail, see [this blog post](https://blog.softwaremill.com/tapir-serverless-a-proof-of-concept-6b8c9de4d396). +For an overview of how this works in more detail, see [this blog +post](https://blog.softwaremill.com/tapir-serverless-a-proof-of-concept-6b8c9de4d396). -## Runtime & Server interpreters +## Runtimes & interpreters -Tapir supports three of the AWS Lambda runtimes: custom runtime, Java, and NodeJS. Below you have a list of classes that can be used as an entry point -to your Lambda application depending on runtime of your choice. Each one of them uses server interpreter, which responsibility is to transform Tapir -endpoints with associated server logic to function like `AwsRequest => F[AwsResponse]` in case of custom and Java runtime, -or `AwsJsRequest => Future[AwsJsResponse]` in case of NodeJS runtime. Currently, two server interpreters are available, the first one is using -cats-effect (`AwsCatsEffectServerInterpreter`), and the other one is using Scala Future (`AwsFutureServerInterpreter`). Custom runtime, and Java -runtime are using only cats-effect interpreter, where NodeJS runtime can be used with both interpreters. -These are corresponding classes for each of the supported runtime: +AWS Lambda supports several +[runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) — +the language-specific environment in which your function code executes. Tapir +supports three of them, each described in its own section below. -* The `AwsLambdaIORuntime` for custom runtime. Implement the Lambda loop of reading the next request, computing and sending the response - through [Lambda runtime API](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html). -* The `LambdaHandler` for Java runtime, which - utilizes [RequestStreamHandler](https://github.com/aws/aws-lambda-java-libs/blob/master/aws-lambda-java-core/src/main/java/com/amazonaws/services/lambda/runtime/RequestStreamHandler.java) - interface for handling requests, response flow inside Java runtime. -* The `AwsJsRouteHandler` for NodeJS runtime. The main benefit is the reduced deployment time. Initialization of JVM-based application ( - with `sam local`) took ~11 seconds on average, while Node.js based one only ~2 seconds. +Note: the handler type parameter (`AwsRequest` or `AwsRequestV1`) determines the +expected API Gateway request format. Use `AwsRequest` for API Gateway V2 (HTTP +API) and `AwsRequestV1` for API Gateway V1 (REST API). V1 requests are +automatically normalized to V2 internally. -To start using any of the above add the following dependency: +### Java runtime + +With the Java runtime, you package your code as a fat JAR (via `assembly`) and +upload it to AWS, which manages the JVM. The Lambda entry point implements the +AWS +[RequestStreamHandler](https://github.com/aws/aws-lambda-java-libs/blob/master/aws-lambda-java-core/src/main/java/com/amazonaws/services/lambda/runtime/RequestStreamHandler.java) +interface, which is provided by the `aws-lambda-java-runtime-interface-client` +dependency (pulled in transitively by all tapir AWS Lambda modules). You can use +this with direct-style, cats-effect, or ZIO: + +#### Direct-style + +No effect library needed. Extend `SyncLambdaHandler`, provide your endpoints via +`getAllEndpoints`, and you're done — the class directly implements +`RequestStreamHandler`. + +Uses `AwsSyncServerInterpreter` (`AwsRequest => AwsResponse`). + +```scala +"com.softwaremill.sttp.tapir" %% "tapir-aws-lambda-core" % "@VERSION@" +``` + +Example: +[SyncLambdaApiExample](https://github.com/softwaremill/tapir/blob/master/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/SyncLambdaApiExample.scala) + +#### cats-effect + +Extend `LambdaHandler[F, R]`, provide your endpoints via `getAllEndpoints`, and +implement `handleRequest` by calling `process(input, output).unsafeRunSync()`. + +Uses `AwsCatsEffectServerInterpreter` (`AwsRequest => F[AwsResponse]`). + +```scala +"com.softwaremill.sttp.tapir" %% "tapir-aws-lambda" % "@VERSION@" +``` + +Examples: +[LambdaApiExample](https://github.com/softwaremill/tapir/blob/master/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/LambdaApiExample.scala), +[V1 +variant](https://github.com/softwaremill/tapir/blob/master/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/LambdaApiV1Example.scala) + +#### ZIO + +Create a handler instance via `ZioLambdaHandler.default(endpoints)`, then call +`handler.process[AwsRequest](input, output)` from a `RequestStreamHandler` +implementation, running the ZIO effect with `Runtime.default.unsafe.run(...)`. + +Uses `AwsZioServerInterpreter` (`AwsRequest => RIO[Env, AwsResponse]`). + +```scala +"com.softwaremill.sttp.tapir" %% "tapir-aws-lambda-zio" % "@VERSION@" +``` + +Example: +[ZioLambdaHandlerImpl](https://github.com/softwaremill/tapir/blob/master/serverless/aws/lambda-zio-tests/src/main/scala/sttp/tapir/serverless/aws/ziolambda/tests/ZioLambdaHandlerImpl.scala) + +### Custom runtime + +As an alternative to the AWS-provided Java runtime, you can use a [custom +runtime](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html) where +your application includes its own runtime loop that polls the Lambda Runtime API +for invocations via HTTP. This can be useful when running in custom containers +or with GraalVM native images. The tradeoff is an additional dependency on an +HTTP client (sttp client4 with the fs2 backend). + +#### cats-effect + +Extend `AwsLambdaIORuntime` and provide your `endpoints`. The base class +implements `main` and runs the polling loop via `AwsLambdaRuntime`. + +Uses `AwsCatsEffectServerInterpreter` (`AwsRequest => F[AwsResponse]`). ```scala "com.softwaremill.sttp.tapir" %% "tapir-aws-lambda" % "@VERSION@" ``` +### NodeJS runtime + +You can also compile your Scala code to JavaScript via Scala.js and run it on +Node.js. The main benefit are faster cold starts, though with AWS's SnapStart, +this might or might not be a significant advantage. + +Handler functions are exported to JavaScript using `@JSExportTopLevel` and +return a `js.Promise[AwsJsResponse]`. Use `AwsJsRouteHandler` to bridge between +the JS request/response types and tapir's `Route[F]`. You can use this with +either Future or cats-effect: + +#### Future + +Uses `AwsFutureServerInterpreter` (`AwsRequest => Future[AwsResponse]`). + +```scala +"com.softwaremill.sttp.tapir" %%% "tapir-aws-lambda-core" % "@VERSION@" +``` + +Example: +[LambdaApiJsExample](https://github.com/softwaremill/tapir/blob/master/serverless/aws/examples/src/main/scalajs/sttp/tapir/serverless/aws/examples/LambdaApiJsExample.scala) + +#### cats-effect + +Uses `AwsCatsEffectServerInterpreter` (`AwsRequest => IO[AwsResponse]`). +Supports both plain `Route[IO]` (via `catsIOHandler`) and `Resource[IO, +Route[IO]]` (via `catsResourceHandler`). + +```scala +"com.softwaremill.sttp.tapir" %%% "tapir-aws-lambda" % "@VERSION@" +``` + +Example: +[LambdaApiJsResourceExample](https://github.com/softwaremill/tapir/blob/master/serverless/aws/examples/src/main/scalajs/sttp/tapir/serverless/aws/examples/LambdaApiJsResourceExample.scala) + ## Deployment -To make it possible, to call your endpoints, you will need to deploy your application to Lambda, and configure Amazon API Gateway. -Tapir leverages ways of doing it provided by AWS, you can choose from: AWS SAM template file, terraform configuration, and AWS CDK. +To make it possible, to call your endpoints, you will need to deploy your +application to Lambda, and configure Amazon API Gateway. Tapir leverages ways of +doing it provided by AWS, you can choose from: AWS SAM template file, terraform +configuration, and AWS CDK. -You can start by adding one of the following dependencies to your project, and then follow examples: +You can start by adding one of the following dependencies to your project, and +then follow examples: ```scala "com.softwaremill.sttp.tapir" %% "tapir-aws-sam" % "@VERSION@" @@ -48,36 +157,61 @@ You can start by adding one of the following dependencies to your project, and t ### Examples -Go ahead and clone tapir project. To deploy you application to AWS you will need to have an AWS account -and [AWS command line tools installed](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). +Go ahead and clone tapir project. To deploy you application to AWS you will need +to have an AWS account and [AWS command line tools +installed](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). #### SAM -SAM can be deployed using Java runtime or NodeJS runtime. For each of these cases first you will have to install [AWS SAM command line tool](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-command-reference.html), and create a S3 bucket, that will be used during deployment. Before going further, open sbt shell, as it will be needed for both runtimes. +SAM can be deployed using Java runtime or NodeJS runtime. For each of these +cases first you will have to install [AWS SAM command line +tool](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-command-reference.html), +and create a S3 bucket, that will be used during deployment. Before going +further, open sbt shell, as it will be needed for both runtimes. -For Java runtime, use sbt to run `assembly` task, and then `runMain sttp.tapir.serverless.aws.examples.SamTemplateExample`, this will generate `template.yaml` sam file in main directory +For Java runtime, use sbt to run `assembly` task, and then `runMain +sttp.tapir.serverless.aws.examples.SamTemplateExample`, this will generate +`template.yaml` sam file in main directory -For NodeJS runtime, first generate AWS Lambda yaml file by execution inside sbt shell command `awsExamples/runMain sttp.tapir.serverless.aws.examples.SamJsTemplateExample`, and then build Node.js module with `awsExamplesJS/fastLinkJS`, it will create all-in-one JS file under `tapir/serverless/aws/examples/target/js-2.13/tapir-aws-examples-fastopt/main.js` +For NodeJS runtime, first generate AWS Lambda yaml file by execution inside sbt +shell command `awsExamples/runMain +sttp.tapir.serverless.aws.examples.SamJsTemplateExample`, and then build Node.js +module with `awsExamplesJS/fastLinkJS`, it will create all-in-one JS file under +`tapir/serverless/aws/examples/target/js-2.13/tapir-aws-examples-fastopt/main.js` From now the steps for both runtimes are the same: -1. Before deploying, if you want to test your application locally, you will need Docker. Execute `sam local start-api --warm-containers EAGER`, there will be a link displayed at the console output -2. To deploy it to AWS, run `sam deploy --template-file template.yaml --stack-name sam-app --capabilities CAPABILITY_IAM --s3-bucket [name of your bucket]`. The console output should print url of the application, just add `/api/hello` to the end of it, and you should see `Hello!` message. Be aware in case of Java runtime, the first call can take a little longer as the application takes some time to start, but consecutive calls will be much faster. -3. When you want to rollback changes made on AWS, run `sam delete --stack-name sam-app` +1. Before deploying, if you want to test your application locally, you will need + Docker. Execute `sam local start-api --warm-containers EAGER`, there will be + a link displayed at the console output +2. To deploy it to AWS, run `sam deploy --template-file template.yaml + --stack-name sam-app --capabilities CAPABILITY_IAM --s3-bucket [name of your + bucket]`. The console output should print url of the application, just add + `/api/hello` to the end of it, and you should see `Hello!` message. Be aware + in case of Java runtime, the first call can take a little longer as the + application takes some time to start, but consecutive calls will be much + faster. +3. When you want to rollback changes made on AWS, run `sam delete --stack-name + sam-app` #### Terraform Terraform deployment requires you to have a S3 bucket. -1. Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) +1. Install + [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) 2. Run `assembly` task inside sbt shell -3. Open a terminal in `tapir/serverless/aws/examples/target/jvm-2.13` directory. That's where the fat jar is saved. You - need to upload it into your s3 bucket. Using command line - tools: `aws s3 cp tapir-aws-examples.jar s3://{your-bucket}/{your-key}`. -4. Run `runMain sttp.tapir.serverless.aws.examples.TerraformConfigExample {your-aws-region} {your-bucket} {your-key}` inside sbt shell -5. Open terminal in tapir root directory, run `terraform init` and `terraform apply` - -That will create `api_gateway.tf.json` configuration and deploy Api Gateway and lambda function to AWS. Terraform will -output the url of the created API Gateway which you can call followed by `/api/hello` path. +3. Open a terminal in `tapir/serverless/aws/examples/target/jvm-2.13` directory. + That's where the fat jar is saved. You need to upload it into your s3 bucket. + Using command line tools: `aws s3 cp tapir-aws-examples.jar + s3://{your-bucket}/{your-key}`. +4. Run `runMain sttp.tapir.serverless.aws.examples.TerraformConfigExample + {your-aws-region} {your-bucket} {your-key}` inside sbt shell +5. Open terminal in tapir root directory, run `terraform init` and `terraform + apply` + +That will create `api_gateway.tf.json` configuration and deploy Api Gateway and +lambda function to AWS. Terraform will output the url of the created API Gateway +which you can call followed by `/api/hello` path. To destroy all the created resources run `terraform destroy`. @@ -86,11 +220,15 @@ To destroy all the created resources run `terraform destroy`. 1. First you need to install: * [NPM](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) * [AWS CDK Toolkit](https://docs.aws.amazon.com/cdk/v2/guide/cli.html) -2. Open sbt shell, then run `assembly` task, and execute `runMain sttp.tapir.serverless.aws.examples.CdkAppExample` to generate CDK application template under `cdk` - directory -3. Go to `cdk` and run `npm install`, it will create all files needed for the deployment -4. Before deploying, if you want to test your application locally, you will need Docker - and [AWS SAM command line tool](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-command-reference.html) - , then execute `cdk synth`, and `sam local start-api -t cdk.out/TapirCdkStack.template.json --warm-containers EAGER` +2. Open sbt shell, then run `assembly` task, and execute `runMain + sttp.tapir.serverless.aws.examples.CdkAppExample` to generate CDK application + template under `cdk` directory +3. Go to `cdk` and run `npm install`, it will create all files needed for the + deployment +4. Before deploying, if you want to test your application locally, you will need + Docker and [AWS SAM command line + tool](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-command-reference.html) + , then execute `cdk synth`, and `sam local start-api -t + cdk.out/TapirCdkStack.template.json --warm-containers EAGER` 5. To deploy it to AWS simply run `cdk deploy` 6. When you want to rollback changes made on AWS, run `cdk destroy` diff --git a/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/SyncLambdaApiExample.scala b/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/SyncLambdaApiExample.scala new file mode 100644 index 0000000000..3f4629350e --- /dev/null +++ b/serverless/aws/examples/src/main/scalajvm/sttp/tapir/serverless/aws/examples/SyncLambdaApiExample.scala @@ -0,0 +1,16 @@ +package sttp.tapir.serverless.aws.examples + +import io.circe.generic.auto._ +import sttp.tapir._ +import sttp.tapir.server.ServerEndpoint +import sttp.tapir.serverless.aws.lambda._ + +object SyncLambdaApiExample extends SyncLambdaHandler[AwsRequest](AwsSyncServerOptions.default) { + + val helloEndpoint: ServerEndpoint[Any, sttp.shared.Identity] = endpoint.get + .in("api" / "hello") + .out(stringBody) + .handleSuccess { _ => s"Hello!" } + + override protected def getAllEndpoints: List[ServerEndpoint[Any, sttp.shared.Identity]] = List(helloEndpoint) +} diff --git a/serverless/aws/lambda-cats-effect/src/main/scala/sttp/tapir/serverless/aws/lambda/LambdaHandler.scala b/serverless/aws/lambda-cats-effect/src/main/scala/sttp/tapir/serverless/aws/lambda/LambdaHandler.scala index cae13bbc9a..5427c1f4d6 100644 --- a/serverless/aws/lambda-cats-effect/src/main/scala/sttp/tapir/serverless/aws/lambda/LambdaHandler.scala +++ b/serverless/aws/lambda-cats-effect/src/main/scala/sttp/tapir/serverless/aws/lambda/LambdaHandler.scala @@ -1,14 +1,11 @@ package sttp.tapir.serverless.aws.lambda -import cats.effect.{Resource, Sync} +import cats.effect.Sync import cats.implicits._ import com.amazonaws.services.lambda.runtime.RequestStreamHandler import io.circe._ -import io.circe.generic.auto._ -import io.circe.parser.decode -import io.circe.syntax.EncoderOps import sttp.tapir.server.ServerEndpoint -import java.io.{BufferedWriter, InputStream, OutputStream, OutputStreamWriter} +import java.io.{InputStream, OutputStream} import java.nio.charset.StandardCharsets /** [[LambdaHandler]] is an entry point for handling requests sent to AWS Lambda application which exposes Tapir endpoints. @@ -25,38 +22,13 @@ abstract class LambdaHandler[F[_]: Sync, R: Decoder](options: AwsServerOptions[F protected def getAllEndpoints: List[ServerEndpoint[Any, F]] - protected def process(input: InputStream, output: OutputStream): F[Unit] = { - val server: AwsCatsEffectServerInterpreter[F] = AwsCatsEffectServerInterpreter(options) + private lazy val route: Route[F] = AwsCatsEffectServerInterpreter(options).toRoute(getAllEndpoints) + protected def process(input: InputStream, output: OutputStream): F[Unit] = for { allBytes <- Sync[F].blocking(input.readAllBytes()) - decoded <- Sync[F].delay(decode[R](new String(allBytes, StandardCharsets.UTF_8))) - response <- decoded match { - case Left(e) => Sync[F].pure(AwsResponse.badRequest(s"Invalid AWS request: ${e.getMessage}")) - case Right(awsRequest) => - awsRequest match { - case r: AwsRequestV1 => server.toRoute(getAllEndpoints)(r.toV2) - case r: AwsRequest => server.toRoute(getAllEndpoints)(r) - case r => - Sync[F].raiseError[AwsResponse]( - new IllegalArgumentException(s"Request of type ${r.getClass.getCanonicalName} is not suppoerted") - ) - } - } - _ <- writerResource(Sync[F].delay(output)).use { writer => - Sync[F].blocking(writer.write(Printer.noSpaces.print(response.asJson))) - } + decoded <- Sync[F].delay(AwsLambdaCodec.decodeRequest[R](new String(allBytes, StandardCharsets.UTF_8))) + response <- decoded.fold(Sync[F].pure(_), route) + _ <- Sync[F].blocking(AwsLambdaCodec.writeResponse(response, output)) } yield () - } - - private val writerResource: F[OutputStream] => Resource[F, BufferedWriter] = output => { - Resource.make { - output.map(i => new BufferedWriter(new OutputStreamWriter(i, StandardCharsets.UTF_8))) - } { writer => - Sync[F].delay { - writer.flush() - writer.close() - } - } - } } diff --git a/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsLambdaCodec.scala b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsLambdaCodec.scala new file mode 100644 index 0000000000..9fc13b500e --- /dev/null +++ b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsLambdaCodec.scala @@ -0,0 +1,42 @@ +package sttp.tapir.serverless.aws.lambda + +import io.circe._ +import io.circe.generic.auto._ +import io.circe.parser.{decode => jsonDecode} +import io.circe.syntax._ + +import java.io.{BufferedWriter, OutputStream, OutputStreamWriter} +import java.nio.charset.StandardCharsets + +/** Shared JSON decode/encode and V1-to-V2 request normalization used by the various Lambda handler implementations. */ +private[aws] object AwsLambdaCodec { + + /** Decodes a JSON string into an [[AwsRequest]], normalizing V1 format to V2 if needed. Returns `Left` with an error response for + * malformed input, or `Right` with the normalized request. + * + * @throws IllegalArgumentException + * if the decoded type is neither [[AwsRequestV1]] nor [[AwsRequest]] + */ + def decodeRequest[R: Decoder](json: String): Either[AwsResponse, AwsRequest] = + jsonDecode[R](json) match { + case Left(e) => Left(AwsResponse.badRequest(s"Invalid AWS request: ${e.getMessage}")) + case Right(r: AwsRequestV1) => Right(r.toV2) + case Right(r: AwsRequest) => Right(r) + case Right(r) => + throw new IllegalArgumentException(s"Request of type ${r.getClass.getCanonicalName} is not supported") + } + + /** Encodes an [[AwsResponse]] as a JSON string. */ + def encodeResponse(response: AwsResponse): String = + Printer.noSpaces.print(response.asJson) + + /** Writes an [[AwsResponse]] as JSON to the given output stream. */ + def writeResponse(response: AwsResponse, output: OutputStream): Unit = { + val writer = new BufferedWriter(new OutputStreamWriter(output, StandardCharsets.UTF_8)) + try writer.write(encodeResponse(response)) + finally { + writer.flush() + writer.close() + } + } +} diff --git a/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerInterpreter.scala b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerInterpreter.scala new file mode 100644 index 0000000000..b57a40b807 --- /dev/null +++ b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerInterpreter.scala @@ -0,0 +1,24 @@ +package sttp.tapir.serverless.aws.lambda + +import sttp.monad.MonadError +import sttp.monad.IdentityMonad +import AwsSyncServerInterpreter._ + +abstract class AwsSyncServerInterpreter extends AwsServerInterpreter[sttp.shared.Identity] + +object AwsSyncServerInterpreter { + + implicit val idMonadError: MonadError[sttp.shared.Identity] = IdentityMonad + + def apply(serverOptions: AwsServerOptions[sttp.shared.Identity]): AwsSyncServerInterpreter = { + new AwsSyncServerInterpreter { + override def awsServerOptions: AwsServerOptions[sttp.shared.Identity] = serverOptions + } + } + + def apply(): AwsSyncServerInterpreter = { + new AwsSyncServerInterpreter { + override def awsServerOptions: AwsServerOptions[sttp.shared.Identity] = AwsSyncServerOptions.default + } + } +} diff --git a/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerOptions.scala b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerOptions.scala new file mode 100644 index 0000000000..f5a6042090 --- /dev/null +++ b/serverless/aws/lambda-core/src/main/scala/sttp/tapir/serverless/aws/lambda/AwsSyncServerOptions.scala @@ -0,0 +1,18 @@ +package sttp.tapir.serverless.aws.lambda + +import sttp.tapir.server.interceptor.CustomiseInterceptors + +object AwsSyncServerOptions { + + /** Allows customising the interceptors used by the server interpreter. */ + def customiseInterceptors: CustomiseInterceptors[sttp.shared.Identity, AwsServerOptions[sttp.shared.Identity]] = + CustomiseInterceptors( + createOptions = (ci: CustomiseInterceptors[sttp.shared.Identity, AwsServerOptions[sttp.shared.Identity]]) => + AwsServerOptions(encodeResponseBody = true, ci.interceptors) + ) + + def default: AwsServerOptions[sttp.shared.Identity] = customiseInterceptors.options + + def noEncoding: AwsServerOptions[sttp.shared.Identity] = + default.copy(encodeResponseBody = false) +} diff --git a/serverless/aws/lambda-core/src/main/scalajvm/sttp/tapir/serverless/aws/lambda/SyncLambdaHandler.scala b/serverless/aws/lambda-core/src/main/scalajvm/sttp/tapir/serverless/aws/lambda/SyncLambdaHandler.scala new file mode 100644 index 0000000000..26c47a366d --- /dev/null +++ b/serverless/aws/lambda-core/src/main/scalajvm/sttp/tapir/serverless/aws/lambda/SyncLambdaHandler.scala @@ -0,0 +1,42 @@ +package sttp.tapir.serverless.aws.lambda + +import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler} +import io.circe._ +import sttp.tapir.server.ServerEndpoint + +import java.io.{InputStream, OutputStream} +import java.nio.charset.StandardCharsets + +/** [[SyncLambdaHandler]] is a direct-style entry point for handling requests sent to AWS Lambda application which exposes Tapir endpoints. + * + * @tparam R + * AWS API Gateway request type [[AwsRequestV1]] or [[AwsRequest]]. + * @param options + * Server options of type AwsServerOptions. + */ +abstract class SyncLambdaHandler[R: Decoder](options: AwsServerOptions[sttp.shared.Identity]) extends RequestStreamHandler { + + protected def getAllEndpoints: List[ServerEndpoint[Any, sttp.shared.Identity]] + + private lazy val route: AwsRequest => AwsResponse = AwsSyncServerInterpreter(options).toRoute(getAllEndpoints) + + override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = { + val json = new String(input.readAllBytes(), StandardCharsets.UTF_8) + val response = AwsLambdaCodec.decodeRequest[R](json).fold(identity, route) + AwsLambdaCodec.writeResponse(response, output) + } +} + +object SyncLambdaHandler { + + def apply[R: Decoder]( + endpoints: List[ServerEndpoint[Any, sttp.shared.Identity]], + serverOptions: AwsServerOptions[sttp.shared.Identity] + ): SyncLambdaHandler[R] = + new SyncLambdaHandler[R](serverOptions) { + override protected def getAllEndpoints: List[ServerEndpoint[Any, sttp.shared.Identity]] = endpoints + } + + def default[R: Decoder](endpoints: List[ServerEndpoint[Any, sttp.shared.Identity]]): SyncLambdaHandler[R] = + apply(endpoints, AwsSyncServerOptions.default) +} diff --git a/serverless/aws/lambda-zio/src/main/scala/sttp/tapir/serverless/aws/ziolambda/ZioLambdaHandler.scala b/serverless/aws/lambda-zio/src/main/scala/sttp/tapir/serverless/aws/ziolambda/ZioLambdaHandler.scala index 35d9493de7..52a5efcf85 100644 --- a/serverless/aws/lambda-zio/src/main/scala/sttp/tapir/serverless/aws/ziolambda/ZioLambdaHandler.scala +++ b/serverless/aws/lambda-zio/src/main/scala/sttp/tapir/serverless/aws/ziolambda/ZioLambdaHandler.scala @@ -1,15 +1,12 @@ package sttp.tapir.serverless.aws.ziolambda import io.circe._ -import io.circe.generic.auto._ -import io.circe.parser.decode -import io.circe.syntax.EncoderOps import sttp.tapir.server.ziohttp.ZioHttpServerOptions -import sttp.tapir.serverless.aws.lambda.{AwsRequest, AwsRequestV1, AwsResponse, AwsServerOptions} +import sttp.tapir.serverless.aws.lambda.{AwsLambdaCodec, AwsRequest, AwsResponse, AwsServerOptions} import sttp.tapir.ztapir._ -import zio.{RIO, Task, ZIO} +import zio.{RIO, ZIO} -import java.io.{BufferedWriter, InputStream, OutputStream, OutputStreamWriter} +import java.io.{InputStream, OutputStream} import java.nio.charset.StandardCharsets /** [[ZioLambdaHandler]] is an entry point for handling requests sent to AWS Lambda application which exposes Tapir endpoints. @@ -23,37 +20,15 @@ abstract class ZioLambdaHandler[Env: RIOMonadError](options: AwsServerOptions[RI protected def getAllEndpoints: List[ZServerEndpoint[Env, Any]] - def process[R: Decoder](input: InputStream, output: OutputStream): RIO[Env, Unit] = { - - val server: AwsZioServerInterpreter[Env] = - AwsZioServerInterpreter[Env](options) + private lazy val route: AwsRequest => RIO[Env, AwsResponse] = AwsZioServerInterpreter[Env](options).toRoute(getAllEndpoints) + def process[R: Decoder](input: InputStream, output: OutputStream): RIO[Env, Unit] = for { allBytes <- ZIO.attempt(input.readAllBytes()) - str = new String(allBytes, StandardCharsets.UTF_8) - decoded = decode[R](str) - response <- decoded match { - case Left(e) => ZIO.succeed(AwsResponse.badRequest(s"Invalid AWS request: ${e.getMessage}")) - case Right(r: AwsRequestV1) => server.toRoute(getAllEndpoints)(r.toV2) - case Right(r: AwsRequest) => server.toRoute(getAllEndpoints)(r) - case Right(r) => - val message = s"Request of type ${r.getClass.getCanonicalName} is not supported" - ZIO.fail(new IllegalArgumentException(message)) - } - _ <- writerResource(response, output) + decoded <- ZIO.attempt(AwsLambdaCodec.decodeRequest[R](new String(allBytes, StandardCharsets.UTF_8))) + response <- decoded.fold(ZIO.succeed(_), route) + _ <- ZIO.attempt(AwsLambdaCodec.writeResponse(response, output)) } yield () - } - - private def writerResource(response: AwsResponse, output: OutputStream): Task[Unit] = { - val acquire = ZIO.attempt(new BufferedWriter(new OutputStreamWriter(output, StandardCharsets.UTF_8))) - val release = (writer: BufferedWriter) => - ZIO.attempt { - writer.flush() - writer.close() - }.orDie - val use = (writer: BufferedWriter) => ZIO.attempt(writer.write(Printer.noSpaces.print(response.asJson))) - ZIO.acquireReleaseWith(acquire)(release)(use) - } } object ZioLambdaHandler {