diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..941325b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,6 @@ +.git +__pycache__ +*.py[cod] +*.egg-info +.pytest_cache +.env diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..d213320 --- /dev/null +++ b/.env.example @@ -0,0 +1,10 @@ +# Database Configuration +POSTGRES_USER=admin +POSTGRES_PASSWORD=password +POSTGRES_DB=inventory +POSTGRES_HOST=db +POSTGRES_PORT=5432 + +# Application Configuration +# Constructed automatically in docker-compose, but if running locally: +# DATABASE_URL=postgresql://admin:password@localhost:5432/inventory diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..e9aa539 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,67 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + test: + runs-on: ubuntu-latest + env: + POSTGRES_USER: ${{ secrets.POSTGRES_USER || 'testuser' }} + POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD || 'testpass' }} + POSTGRES_DB: ${{ secrets.POSTGRES_DB || 'testdb' }} + + services: + postgres: + image: postgres:15-alpine + env: + POSTGRES_USER: ${{ env.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ env.POSTGRES_DB }} + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: pip + + - name: Install dependencies + run: pip install ".[test]" + + - name: Initialize database + run: | + PGPASSWORD=${{ env.POSTGRES_PASSWORD }} psql -h localhost -U ${{ env.POSTGRES_USER }} -d ${{ env.POSTGRES_DB }} -f init.sql + + - name: Run tests + run: pytest -v + env: + DATABASE_URL: postgresql://${{ env.POSTGRES_USER }}:${{ env.POSTGRES_PASSWORD }}@localhost:5432/${{ env.POSTGRES_DB }} + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install ruff + run: pip install ruff + + - name: Run linter + run: ruff check . --output-format=github diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b34c9ad --- /dev/null +++ b/.gitignore @@ -0,0 +1,73 @@ +# Python +__pycache__/ +__pycache__ +__pycache__/* +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Virtual Environment +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# Editor / IDE +.vscode/ +.idea/ +*.swp +*.swo + +# Docker +.dockerignore + +# OS +.DS_Store +Thumbs.db + +# Project specific diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..e835e12 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,15 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.2.0 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files diff --git a/API.md b/API.md new file mode 100644 index 0000000..2dee0f2 --- /dev/null +++ b/API.md @@ -0,0 +1,133 @@ +# Server Inventory API & CLI + +## Overview +REST API and CLI for tracking server inventory. Built with FastAPI, PostgreSQL (raw SQL), and Typer. + +## Quick Start + +```bash +# Start with Docker/Podman +podman compose up -d --build + +# Run tests +podman compose exec api pytest -v + +# View API docs +open http://localhost:8000/docs +``` + +## Makefile Commands + +```bash +make help # Show all commands +make podman-up # Start the stack +make podman-test # Run tests in container +make lint # Run linter +make format # Format code +``` + +--- + +## API Endpoints + +### CRUD Operations + +| Method | Endpoint | Description | +|--------|----------|-------------| +| POST | `/servers` | Create a server | +| GET | `/servers` | List servers (with filtering) | +| GET | `/servers/{id}` | Get a server | +| PUT | `/servers/{id}` | Update a server | +| DELETE | `/servers/{id}` | Delete a server | + +### Filtering & Pagination + +```bash +GET /servers?limit=10&offset=0 # Pagination +GET /servers?state=active # Filter by state +GET /servers?hostname_contains=web # Search hostname +GET /servers?state=active&hostname_contains=web # Combined +``` + +### ETag Concurrency Control + +All responses include an `ETag` header for optimistic concurrency: + +```bash +# Conditional GET (returns 304 if unchanged) +curl -H "If-None-Match: \"abc123\"" http://localhost:8000/servers/1 + +# Conditional PUT (returns 412 if stale) +curl -X PUT -H "If-Match: \"abc123\"" -d '{"state":"offline"}' http://localhost:8000/servers/1 +``` + +### Health & Observability + +| Endpoint | Description | +|----------|-------------| +| `/health` | Liveness probe | +| `/ready` | Readiness probe (checks DB) | +| `/metrics` | Prometheus metrics | + +### API Versioning + +All endpoints are available at both `/servers` and `/v1/servers`. + +--- + +## CLI Usage + +```bash +# Basic commands +python cli/main.py create web-01 192.168.1.5 active +python cli/main.py list +python cli/main.py get 1 +python cli/main.py update 1 --state offline +python cli/main.py delete 1 + +# Output formats +python cli/main.py list --format json # JSON output +python cli/main.py list --format table # Table output (default) + +# Filtering +python cli/main.py list --state active +python cli/main.py list --hostname web +``` + +### CLI Features +- **Retry with backoff** - Auto-retries on connection errors +- **Format options** - `--format json` or `--format table` +- **Filtering** - `--state` and `--hostname` flags + +--- + +## Request Tracing + +All responses include `X-Request-ID` header for distributed tracing. +Send your own `X-Request-ID` header and it will be echoed back. + +--- + +## Development + +```bash +# Install dev dependencies +pip install -e ".[test,dev]" + +# Run pre-commit hooks +pre-commit install +pre-commit run --all-files + +# Run Alembic migrations +alembic upgrade head +``` + +## Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `DATABASE_URL` | `postgresql://admin:password@db:5432/inventory` | Database connection | +| `POSTGRES_USER` | `admin` | DB username | +| `POSTGRES_PASSWORD` | `password` | DB password | +| `POSTGRES_DB` | `inventory` | DB name | +| `OTEL_CONSOLE_EXPORT` | `false` | Enable trace console output | diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..6845073 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,40 @@ +# Build Stage +FROM python:3.10-slim-bullseye as builder + +WORKDIR /app + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +RUN pip install --upgrade pip + +COPY pyproject.toml . +COPY . . + +# Build wheels for all dependencies including test extras +RUN pip wheel --no-cache-dir --wheel-dir /app/wheels ".[test]" + +# Runtime Stage +FROM python:3.10-slim-bullseye + +WORKDIR /app + +# Create a non-root user +RUN addgroup --system app && adduser --system --group app + +# Install Runtime Dependencies +COPY --from=builder /app/wheels /wheels +COPY --from=builder /app/pyproject.toml . + +# Install dependencies from wheels +RUN pip install --no-cache /wheels/* + +COPY . /app + +# Change ownership +RUN chown -R app:app /app + +# Switch to non-root user +USER app + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..c536a1d --- /dev/null +++ b/Makefile @@ -0,0 +1,37 @@ +.PHONY: help install dev test lint format run clean docker-up docker-down docker-test + +help: ## Show this help + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' + +install: ## Install production dependencies + pip install . + +dev: ## Install development dependencies + pip install -e ".[test]" + +test: ## Run tests + pytest -v + +lint: ## Run linter (ruff) + ruff check . + +format: ## Format code (ruff) + ruff format . + +run: ## Run the API locally + uvicorn app.main:app --reload + +clean: ## Clean up cache files + find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true + find . -type d -name .pytest_cache -exec rm -rf {} + 2>/dev/null || true + find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true + +# Podman commands +podman-up: ## Start the stack with Podman + podman compose up -d --build + +podman-down: ## Stop the stack + podman compose down + +podman-test: ## Run tests in container + podman compose exec api pytest -v diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 0000000..5860553 --- /dev/null +++ b/alembic.ini @@ -0,0 +1,44 @@ +# Alembic configuration file + +[alembic] +script_location = alembic +prepend_sys_path = . +version_path_separator = os + +sqlalchemy.url = driver://user:pass@localhost/dbname + +[post_write_hooks] + +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/alembic/env.py b/alembic/env.py new file mode 100644 index 0000000..e072919 --- /dev/null +++ b/alembic/env.py @@ -0,0 +1,41 @@ +"""Alembic migration environment configuration.""" +import os +from logging.config import fileConfig + +from alembic import context + +config = context.config + +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Use DATABASE_URL from environment +database_url = os.getenv("DATABASE_URL", "postgresql://admin:password@localhost:5432/inventory") + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + context.configure( + url=database_url, + target_metadata=None, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + import psycopg + + connectable = psycopg.connect(database_url, autocommit=True) + + with connectable: + context.configure(connection=connectable, target_metadata=None) + with context.begin_transaction(): + context.run_migrations() + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/alembic/script.py.mako b/alembic/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/alembic/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/alembic/versions/001_initial.py b/alembic/versions/001_initial.py new file mode 100644 index 0000000..cc7e4ea --- /dev/null +++ b/alembic/versions/001_initial.py @@ -0,0 +1,43 @@ +"""Initial schema - servers table + +Revision ID: 001_initial +Revises: +Create Date: 2024-01-01 00:00:00.000000 + +""" +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = '001_initial' +down_revision: Union[str, None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Create ENUM type + op.execute(""" + DO $$ BEGIN + CREATE TYPE server_state AS ENUM ('active', 'offline', 'retired'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + """) + + # Create servers table + op.execute(""" + CREATE TABLE IF NOT EXISTS servers ( + id SERIAL PRIMARY KEY, + hostname VARCHAR(255) NOT NULL UNIQUE, + ip_address INET, + state server_state NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP + ); + """) + + +def downgrade() -> None: + op.execute("DROP TABLE IF EXISTS servers;") + op.execute("DROP TYPE IF EXISTS server_state;") diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/config.py b/app/config.py new file mode 100644 index 0000000..7a4be14 --- /dev/null +++ b/app/config.py @@ -0,0 +1,8 @@ +from pydantic_settings import BaseSettings, SettingsConfigDict + +class Settings(BaseSettings): + DATABASE_URL: str = "postgresql://admin:password@localhost:5432/inventory" + + model_config = SettingsConfigDict(env_file=".env", extra="ignore") + +settings = Settings() diff --git a/app/database.py b/app/database.py new file mode 100644 index 0000000..e5770e3 --- /dev/null +++ b/app/database.py @@ -0,0 +1,53 @@ +import psycopg +from psycopg.rows import dict_row +from psycopg_pool import AsyncConnectionPool +from contextlib import asynccontextmanager + +from app.config import settings + +# Connection pool - reuses connections for better performance +pool: AsyncConnectionPool | None = None + +async def init_pool(): + """Initialize the connection pool. Call on app startup.""" + global pool + pool = AsyncConnectionPool( + conninfo=settings.DATABASE_URL, + min_size=2, + max_size=10, + kwargs={"row_factory": dict_row}, + ) + await pool.open() + +async def close_pool(): + """Close the connection pool. Call on app shutdown.""" + global pool + if pool: + await pool.close() + +class Database: + """Legacy class for backward compatibility.""" + def __init__(self): + self.conn_str = settings.DATABASE_URL + + @asynccontextmanager + async def get_connection(self): + conn = await psycopg.AsyncConnection.connect(self.conn_str, row_factory=dict_row) + try: + yield conn + finally: + await conn.close() + +db = Database() + +async def get_db_connection(): + """Dependency that provides a pooled database connection.""" + global pool + if pool: + async with pool.connection() as conn: + yield conn + else: + # Fallback for tests or when pool isn't initialized + async with db.get_connection() as conn: + yield conn + diff --git a/app/etag.py b/app/etag.py new file mode 100644 index 0000000..48f3264 --- /dev/null +++ b/app/etag.py @@ -0,0 +1,49 @@ +"""ETag utilities for optimistic concurrency control.""" +import hashlib +import json +from typing import Any, Dict + + +def generate_etag(data: Dict[str, Any]) -> str: + """Generate an ETag from server data. + + The ETag is a hash of the serializable fields, ensuring it changes + when any field is modified. + """ + # Convert datetime to ISO format string for hashing + serializable = {} + for key, value in data.items(): + if hasattr(value, 'isoformat'): + serializable[key] = value.isoformat() + else: + serializable[key] = str(value) if value is not None else None + + content = json.dumps(serializable, sort_keys=True) + return hashlib.md5(content.encode()).hexdigest() + + +def etag_matches(etag: str, if_match: str | None) -> bool: + """Check if the provided If-Match header matches the current ETag.""" + if if_match is None: + return True # No If-Match header means proceed + + # Handle weak ETags (W/"...") + if_match = if_match.strip() + if if_match.startswith('W/'): + if_match = if_match[2:] + if_match = if_match.strip('"') + + return etag == if_match + + +def etag_none_match(etag: str, if_none_match: str | None) -> bool: + """Check if the ETag matches If-None-Match (return True if NOT modified).""" + if if_none_match is None: + return False # No header means content is considered modified + + if_none_match = if_none_match.strip() + if if_none_match.startswith('W/'): + if_none_match = if_none_match[2:] + if_none_match = if_none_match.strip('"') + + return etag == if_none_match diff --git a/app/health.py b/app/health.py new file mode 100644 index 0000000..15c1414 --- /dev/null +++ b/app/health.py @@ -0,0 +1,41 @@ +from fastapi import APIRouter +from pydantic import BaseModel +from datetime import datetime + +router = APIRouter(tags=["health"]) + +class HealthResponse(BaseModel): + status: str + timestamp: datetime + +class ReadyResponse(BaseModel): + status: str + database: str + timestamp: datetime + +@router.get("/health", response_model=HealthResponse) +async def health_check(): + """Liveness probe - is the service running?""" + return HealthResponse( + status="healthy", + timestamp=datetime.utcnow() + ) + +@router.get("/ready", response_model=ReadyResponse) +async def readiness_check(): + """Readiness probe - is the service ready to accept traffic?""" + from app.database import db + + try: + async with db.get_connection() as conn: + async with conn.cursor() as cur: + await cur.execute("SELECT 1") + db_status = "connected" + except Exception: + db_status = "disconnected" + + return ReadyResponse( + status="ready" if db_status == "connected" else "not_ready", + database=db_status, + timestamp=datetime.utcnow() + ) diff --git a/app/logging.py b/app/logging.py new file mode 100644 index 0000000..705c31b --- /dev/null +++ b/app/logging.py @@ -0,0 +1,54 @@ +"""Structured logging configuration using structlog.""" +import structlog +import logging +import sys + +def setup_logging(json_logs: bool = True, log_level: str = "INFO"): + """Configure structured logging for the application.""" + + shared_processors = [ + structlog.contextvars.merge_contextvars, + structlog.stdlib.add_log_level, + structlog.stdlib.add_logger_name, + structlog.stdlib.PositionalArgumentsFormatter(), + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.StackInfoRenderer(), + structlog.processors.UnicodeDecoder(), + ] + + if json_logs: + # JSON output for production + renderer = structlog.processors.JSONRenderer() + else: + # Pretty output for development + renderer = structlog.dev.ConsoleRenderer(colors=True) + + structlog.configure( + processors=shared_processors + [ + structlog.stdlib.ProcessorFormatter.wrap_for_formatter, + ], + logger_factory=structlog.stdlib.LoggerFactory(), + cache_logger_on_first_use=True, + ) + + formatter = structlog.stdlib.ProcessorFormatter( + foreign_pre_chain=shared_processors, + processors=[ + structlog.stdlib.ProcessorFormatter.remove_processors_meta, + renderer, + ], + ) + + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + + root_logger = logging.getLogger() + root_logger.addHandler(handler) + root_logger.setLevel(getattr(logging, log_level.upper())) + + # Silence noisy loggers + logging.getLogger("uvicorn.access").setLevel(logging.WARNING) + +def get_logger(name: str = __name__): + """Get a structured logger instance.""" + return structlog.get_logger(name) diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..b92cca7 --- /dev/null +++ b/app/main.py @@ -0,0 +1,47 @@ +from contextlib import asynccontextmanager +from fastapi import FastAPI +from app.routers import router as servers_router +from app.health import router as health_router +from app.metrics import router as metrics_router +from app.database import init_pool, close_pool +from app.logging import setup_logging +from app.tracing import setup_tracing +from app.middleware import RequestIDMiddleware + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Startup and shutdown events.""" + setup_logging(json_logs=False) # Set to True in production + await init_pool() + yield + await close_pool() + +app = FastAPI( + title="Server Inventory API", + description="CRUD API for managing server inventory", + version="1.0.0", + lifespan=lifespan +) + +# Add middleware +app.add_middleware(RequestIDMiddleware) + +# Setup observability +setup_tracing(app) + +# API versioning - mount servers under /v1 +app.include_router(servers_router, prefix="/v1") + +# Keep backward compatibility with non-versioned endpoints +app.include_router(servers_router) + +# Utility routers (no versioning needed) +app.include_router(health_router) +app.include_router(metrics_router) + +@app.get("/") +def read_root(): + return {"message": "Welcome to the Server Inventory API", "version": "1.0.0"} + + + diff --git a/app/metrics.py b/app/metrics.py new file mode 100644 index 0000000..2ec3115 --- /dev/null +++ b/app/metrics.py @@ -0,0 +1,43 @@ +"""Prometheus metrics configuration.""" +from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST +from fastapi import APIRouter, Response +import time +from functools import wraps + +router = APIRouter(tags=["metrics"]) + +# Define metrics +REQUEST_COUNT = Counter( + "http_requests_total", + "Total HTTP requests", + ["method", "endpoint", "status"] +) + +REQUEST_LATENCY = Histogram( + "http_request_duration_seconds", + "HTTP request latency in seconds", + ["method", "endpoint"] +) + +SERVERS_CREATED = Counter( + "servers_created_total", + "Total number of servers created" +) + +SERVERS_DELETED = Counter( + "servers_deleted_total", + "Total number of servers deleted" +) + +@router.get("/metrics") +async def metrics(): + """Prometheus metrics endpoint.""" + return Response( + content=generate_latest(), + media_type=CONTENT_TYPE_LATEST + ) + +def record_request(method: str, endpoint: str, status: int, duration: float): + """Record metrics for a request.""" + REQUEST_COUNT.labels(method=method, endpoint=endpoint, status=status).inc() + REQUEST_LATENCY.labels(method=method, endpoint=endpoint).observe(duration) diff --git a/app/middleware.py b/app/middleware.py new file mode 100644 index 0000000..a9602bb --- /dev/null +++ b/app/middleware.py @@ -0,0 +1,32 @@ +"""Request ID middleware for request tracing.""" +import uuid +from contextvars import ContextVar +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.requests import Request + + +# Context variable to store request ID across async boundaries +request_id_ctx: ContextVar[str] = ContextVar("request_id", default="") + + +def get_request_id() -> str: + """Get the current request ID from context.""" + return request_id_ctx.get() + + +class RequestIDMiddleware(BaseHTTPMiddleware): + """Middleware that adds X-Request-ID header to all requests/responses.""" + + async def dispatch(self, request: Request, call_next): + # Use existing X-Request-ID or generate new one + request_id = request.headers.get("X-Request-ID", str(uuid.uuid4())) + + # Store in context for logging + token = request_id_ctx.set(request_id) + + try: + response = await call_next(request) + response.headers["X-Request-ID"] = request_id + return response + finally: + request_id_ctx.reset(token) diff --git a/app/models.py b/app/models.py new file mode 100644 index 0000000..edc5597 --- /dev/null +++ b/app/models.py @@ -0,0 +1,29 @@ +from enum import Enum +from datetime import datetime +from ipaddress import IPv4Address +from typing import Optional +from pydantic import BaseModel, ConfigDict, Field + +class ServerState(str, Enum): + active = "active" + offline = "offline" + retired = "retired" + +class ServerBase(BaseModel): + hostname: str = Field(..., min_length=1, max_length=255) + ip_address: IPv4Address + state: ServerState + +class ServerCreate(ServerBase): + pass + +class ServerUpdate(BaseModel): + hostname: Optional[str] = Field(None, min_length=1, max_length=255) + ip_address: Optional[IPv4Address] = None + state: Optional[ServerState] = None + +class Server(ServerBase): + id: int + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/app/routers.py b/app/routers.py new file mode 100644 index 0000000..7f53616 --- /dev/null +++ b/app/routers.py @@ -0,0 +1,215 @@ +from fastapi import APIRouter, Depends, HTTPException, status, Header, Response +from psycopg import AsyncConnection +from psycopg.errors import UniqueViolation +from typing import List, Optional + +from app.database import get_db_connection +from app.models import Server, ServerCreate, ServerUpdate +from app.etag import generate_etag, etag_matches, etag_none_match + +router = APIRouter(prefix="/servers", tags=["servers"]) + + +@router.post("/", response_model=Server, status_code=status.HTTP_201_CREATED) +async def create_server( + response: Response, + server: ServerCreate, + conn: AsyncConnection = Depends(get_db_connection) +): + try: + async with conn.cursor() as cur: + await cur.execute( + """ + INSERT INTO servers (hostname, ip_address, state) + VALUES (%s, %s, %s) + RETURNING id, hostname, ip_address, state, created_at + """, + (server.hostname, str(server.ip_address), server.state.value) + ) + new_server = await cur.fetchone() + await conn.commit() + + # Add ETag header to response + etag = generate_etag(new_server) + response.headers["ETag"] = f'"{etag}"' + + return new_server + except UniqueViolation: + await conn.rollback() + raise HTTPException(status_code=400, detail="Server with this hostname already exists") + + +@router.get("/", response_model=List[Server]) +async def list_servers( + limit: int = 100, + offset: int = 0, + state: Optional[str] = None, + hostname_contains: Optional[str] = None, + conn: AsyncConnection = Depends(get_db_connection) +): + """List servers with optional filtering. + + Args: + limit: Maximum number of results (default 100) + offset: Pagination offset (default 0) + state: Filter by server state (active, offline, retired) + hostname_contains: Filter servers whose hostname contains this string + """ + # Build dynamic WHERE clause + conditions = [] + params = [] + + if state: + conditions.append("state = %s") + params.append(state) + + if hostname_contains: + conditions.append("hostname ILIKE %s") + params.append(f"%{hostname_contains}%") + + where_clause = "" + if conditions: + where_clause = "WHERE " + " AND ".join(conditions) + + query = f""" + SELECT id, hostname, ip_address, state, created_at + FROM servers + {where_clause} + ORDER BY id + LIMIT %s OFFSET %s + """ + params.extend([limit, offset]) + + async with conn.cursor() as cur: + await cur.execute(query, params) + servers = await cur.fetchall() + return servers + + +@router.get("/{server_id}", response_model=Server) +async def get_server( + server_id: int, + response: Response, + conn: AsyncConnection = Depends(get_db_connection), + if_none_match: Optional[str] = Header(None) +): + async with conn.cursor() as cur: + await cur.execute( + "SELECT id, hostname, ip_address, state, created_at FROM servers WHERE id = %s", + (server_id,) + ) + server = await cur.fetchone() + if not server: + raise HTTPException(status_code=404, detail="Server not found") + + # Generate ETag + etag = generate_etag(server) + response.headers["ETag"] = f'"{etag}"' + + # Check If-None-Match for conditional GET (304 Not Modified) + if etag_none_match(etag, if_none_match): + return Response(status_code=status.HTTP_304_NOT_MODIFIED, headers={"ETag": f'"{etag}"'}) + + return server + + +@router.put("/{server_id}", response_model=Server) +async def update_server( + server_id: int, + server_update: ServerUpdate, + response: Response, + conn: AsyncConnection = Depends(get_db_connection), + if_match: Optional[str] = Header(None) +): + # Build query dynamically based on set fields + update_data = server_update.model_dump(exclude_unset=True) + if not update_data: + raise HTTPException(status_code=400, detail="No fields to update") + + # If If-Match is provided, verify the ETag before updating + if if_match: + async with conn.cursor() as cur: + await cur.execute( + "SELECT id, hostname, ip_address, state, created_at FROM servers WHERE id = %s", + (server_id,) + ) + current = await cur.fetchone() + if not current: + raise HTTPException(status_code=404, detail="Server not found") + + current_etag = generate_etag(current) + if not etag_matches(current_etag, if_match): + raise HTTPException( + status_code=status.HTTP_412_PRECONDITION_FAILED, + detail="Resource has been modified. Refresh and retry." + ) + + set_clauses = [] + values = [] + for key, value in update_data.items(): + if key == 'ip_address': + value = str(value) + elif key == 'state': + value = value.value + + set_clauses.append(f"{key} = %s") + values.append(value) + + values.append(server_id) + + query = f""" + UPDATE servers + SET {", ".join(set_clauses)} + WHERE id = %s + RETURNING id, hostname, ip_address, state, created_at + """ + + try: + async with conn.cursor() as cur: + await cur.execute(query, values) + updated_server = await cur.fetchone() + if not updated_server: + raise HTTPException(status_code=404, detail="Server not found") + await conn.commit() + + # Add new ETag to response + etag = generate_etag(updated_server) + response.headers["ETag"] = f'"{etag}"' + + return updated_server + except UniqueViolation: + await conn.rollback() + raise HTTPException(status_code=400, detail="Server with this hostname already exists") + + +@router.delete("/{server_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_server( + server_id: int, + conn: AsyncConnection = Depends(get_db_connection), + if_match: Optional[str] = Header(None) +): + # If If-Match is provided, verify the ETag before deleting + if if_match: + async with conn.cursor() as cur: + await cur.execute( + "SELECT id, hostname, ip_address, state, created_at FROM servers WHERE id = %s", + (server_id,) + ) + current = await cur.fetchone() + if not current: + raise HTTPException(status_code=404, detail="Server not found") + + current_etag = generate_etag(current) + if not etag_matches(current_etag, if_match): + raise HTTPException( + status_code=status.HTTP_412_PRECONDITION_FAILED, + detail="Resource has been modified. Refresh and retry." + ) + + async with conn.cursor() as cur: + await cur.execute("DELETE FROM servers WHERE id = %s RETURNING id", (server_id,)) + deleted = await cur.fetchone() + if not deleted: + raise HTTPException(status_code=404, detail="Server not found") + await conn.commit() + diff --git a/app/tracing.py b/app/tracing.py new file mode 100644 index 0000000..bff9a81 --- /dev/null +++ b/app/tracing.py @@ -0,0 +1,41 @@ +"""OpenTelemetry tracing configuration.""" +import os +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter +from opentelemetry.sdk.resources import Resource, SERVICE_NAME +from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor + +_provider = None + +def setup_tracing(app, service_name: str = "server-inventory-api"): + """Configure OpenTelemetry tracing for the application.""" + global _provider + + resource = Resource(attributes={ + SERVICE_NAME: service_name + }) + + _provider = TracerProvider(resource=resource) + + # Only add console exporter if explicitly enabled (avoids I/O errors) + # In production, use OTLP exporter instead + if os.getenv("OTEL_CONSOLE_EXPORT", "").lower() == "true": + processor = BatchSpanProcessor(ConsoleSpanExporter()) + _provider.add_span_processor(processor) + + trace.set_tracer_provider(_provider) + + # Instrument FastAPI + FastAPIInstrumentor.instrument_app(app) + +def shutdown_tracing(): + """Shutdown tracing provider gracefully.""" + global _provider + if _provider: + _provider.shutdown() + +def get_tracer(name: str = __name__): + """Get a tracer instance for manual instrumentation.""" + return trace.get_tracer(name) + diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cli/main.py b/cli/main.py new file mode 100644 index 0000000..2a08daf --- /dev/null +++ b/cli/main.py @@ -0,0 +1,162 @@ +import typer +import requests +import json +import time +from typing import Optional +from enum import Enum +from functools import wraps + +app = typer.Typer(help="Server Inventory CLI - Manage your server fleet") + +API_URL = "http://localhost:8000/servers" + +# Output format options +class OutputFormat(str, Enum): + json = "json" + table = "table" + +class ServerState(str, Enum): + active = "active" + offline = "offline" + retired = "retired" + + +def retry_with_backoff(max_retries: int = 3, base_delay: float = 1.0): + """Decorator for exponential backoff retry on connection errors.""" + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + last_exception = None + for attempt in range(max_retries): + try: + return func(*args, **kwargs) + except requests.exceptions.ConnectionError as e: + last_exception = e + delay = base_delay * (2 ** attempt) + typer.echo(f"Connection failed, retrying in {delay}s... (attempt {attempt + 1}/{max_retries})", err=True) + time.sleep(delay) + typer.echo(f"Failed after {max_retries} attempts: {last_exception}", err=True) + raise typer.Exit(1) + return wrapper + return decorator + + +def format_output(data, fmt: OutputFormat): + """Format output based on user preference.""" + if fmt == OutputFormat.json: + return json.dumps(data, indent=2, default=str) + else: + # Table format + if isinstance(data, list): + if not data: + return "No servers found." + headers = list(data[0].keys()) + lines = [" | ".join(headers)] + lines.append("-" * len(lines[0])) + for item in data: + lines.append(" | ".join(str(item.get(h, "")) for h in headers)) + return "\n".join(lines) + else: + lines = [f"{k}: {v}" for k, v in data.items()] + return "\n".join(lines) + + +@app.command() +@retry_with_backoff() +def create( + hostname: str, + ip_address: str, + state: ServerState, + format: OutputFormat = typer.Option(OutputFormat.table, "--format", "-f", help="Output format") +): + """Create a new server.""" + payload = { + "hostname": hostname, + "ip_address": ip_address, + "state": state.value + } + response = requests.post(API_URL, json=payload) + if response.status_code >= 400: + typer.echo(f"Error: {response.text}", err=True) + raise typer.Exit(1) + typer.echo(format_output(response.json(), format)) + + +@app.command("list") +@retry_with_backoff() +def list_servers( + format: OutputFormat = typer.Option(OutputFormat.table, "--format", "-f", help="Output format"), + state: Optional[str] = typer.Option(None, "--state", "-s", help="Filter by state"), + hostname: Optional[str] = typer.Option(None, "--hostname", "-h", help="Filter by hostname (contains)") +): + """List all servers with optional filtering.""" + params = {} + if state: + params["state"] = state + if hostname: + params["hostname_contains"] = hostname + + response = requests.get(API_URL, params=params) + response.raise_for_status() + typer.echo(format_output(response.json(), format)) + + +@app.command() +@retry_with_backoff() +def get( + server_id: int, + format: OutputFormat = typer.Option(OutputFormat.table, "--format", "-f", help="Output format") +): + """Get a specific server by ID.""" + response = requests.get(f"{API_URL}/{server_id}") + if response.status_code == 404: + typer.echo(f"Error: Server {server_id} not found", err=True) + raise typer.Exit(1) + response.raise_for_status() + typer.echo(format_output(response.json(), format)) + + +@app.command() +@retry_with_backoff() +def update( + server_id: int, + hostname: Optional[str] = typer.Option(None, "--hostname"), + ip_address: Optional[str] = typer.Option(None, "--ip"), + state: Optional[ServerState] = typer.Option(None, "--state"), + format: OutputFormat = typer.Option(OutputFormat.table, "--format", "-f", help="Output format") +): + """Update a server.""" + payload = {} + if hostname: + payload["hostname"] = hostname + if ip_address: + payload["ip_address"] = ip_address + if state: + payload["state"] = state.value + + if not payload: + typer.echo("No updates specified. Use --hostname, --ip, or --state.", err=True) + raise typer.Exit(1) + + response = requests.put(f"{API_URL}/{server_id}", json=payload) + if response.status_code >= 400: + typer.echo(f"Error: {response.text}", err=True) + raise typer.Exit(1) + typer.echo(format_output(response.json(), format)) + + +@app.command() +@retry_with_backoff() +def delete(server_id: int): + """Delete a server.""" + response = requests.delete(f"{API_URL}/{server_id}") + if response.status_code == 404: + typer.echo(f"Error: Server {server_id} not found", err=True) + raise typer.Exit(1) + response.raise_for_status() + typer.echo(f"✓ Server {server_id} deleted successfully.") + + +if __name__ == "__main__": + app() + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a36bdfe --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,36 @@ +services: + db: + image: postgres:15-alpine + container_name: db + environment: + POSTGRES_USER: ${POSTGRES_USER:-admin} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} + POSTGRES_DB: ${POSTGRES_DB:-inventory} + volumes: + - postgres_data:/var/lib/postgresql/data + - ./init.sql:/docker-entrypoint-initdb.d/init.sql:Z + ports: + - "5432:5432" + healthcheck: + test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-admin} -d ${POSTGRES_DB:-inventory}" ] + interval: 5s + timeout: 5s + retries: 5 + + api: + build: . + container_name: api + ports: + - "8000:8000" + environment: + DATABASE_URL: postgresql://${POSTGRES_USER:-admin}:${POSTGRES_PASSWORD:-password}@db:5432/${POSTGRES_DB:-inventory} + OTEL_CONSOLE_EXPORT: false + depends_on: + db: + condition: service_healthy + volumes: + - .:/app:Z + command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload + +volumes: + postgres_data: diff --git a/init.sql b/init.sql new file mode 100644 index 0000000..09aefbb --- /dev/null +++ b/init.sql @@ -0,0 +1,15 @@ +-- Create ENUM type for server state +DO $$ BEGIN + CREATE TYPE server_state AS ENUM ('active', 'offline', 'retired'); +EXCEPTION + WHEN duplicate_object THEN null; +END $$; + +-- Create servers table +CREATE TABLE IF NOT EXISTS servers ( + id SERIAL PRIMARY KEY, + hostname VARCHAR(255) NOT NULL UNIQUE, + ip_address INET, + state server_state NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +); diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6224e02 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,45 @@ +[project] +name = "server-inventory" +version = "0.1.0" +description = "Server Inventory Management System" +authors = [ + {name = "User", email = "user@example.com"}, +] +dependencies = [ + "fastapi>=0.109.0", + "uvicorn[standard]>=0.27.0", + "psycopg[binary,pool]>=3.1.17", + "pydantic>=2.6.0", + "pydantic-settings>=2.1.0", + "typer[all]>=0.9.0", + "requests>=2.31.0", + "structlog>=24.1.0", + "alembic>=1.13.0", + "opentelemetry-api>=1.22.0", + "opentelemetry-sdk>=1.22.0", + "opentelemetry-instrumentation-fastapi>=0.43b0", + "prometheus-client>=0.19.0", +] +requires-python = ">=3.10" + +[project.optional-dependencies] +test = [ + "pytest>=8.0.0", + "pytest-asyncio>=0.23.0", + "httpx>=0.26.0", +] +dev = [ + "ruff>=0.2.0", + "pre-commit>=3.6.0", +] + +[build-system] +requires = ["setuptools>=42", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["."] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +cache_dir = "/tmp/.pytest_cache" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..92b7028 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,68 @@ +import pytest +import pytest_asyncio +import asyncio +from httpx import AsyncClient, ASGITransport +from app.main import app +from app.database import get_db_connection, db +from app.config import settings +import psycopg + +# Use a different DB for testing if possible, or just the same one for this simple task +# In a real world, we'd spin up a test container or create a test_db + +@pytest.fixture(scope="session") +def event_loop(): + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + +@pytest_asyncio.fixture(scope="session") +async def db_pool(): + # Ensure tables exist + # In a real app, we would run alembic migrations here + # For this task, we will just execute init.sql content + conn_str = settings.DATABASE_URL + async with await psycopg.AsyncConnection.connect(conn_str, autocommit=True) as conn: + async with conn.cursor() as cur: + # Very simple teardown/rebuild for fresh state + await cur.execute("DROP TABLE IF EXISTS servers CASCADE") + await cur.execute("DROP TYPE IF EXISTS server_state CASCADE") + + # Re-create (Read init.sql would be better, but hardcoding for simplicity of this artifact) + await cur.execute(""" + DO $$ BEGIN + CREATE TYPE server_state AS ENUM ('active', 'offline', 'retired'); + EXCEPTION + WHEN duplicate_object THEN null; + END $$; + + CREATE TABLE IF NOT EXISTS servers ( + id SERIAL PRIMARY KEY, + hostname VARCHAR(255) NOT NULL UNIQUE, + ip_address INET, + state server_state NOT NULL, + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP + ); + """) + yield + +@pytest_asyncio.fixture +async def override_get_db(db_pool): + # We want a fresh transaction for each test that rolls back + # But for simplicity with psycopg3 async, we can just TRUNCATE or similar. + # Proper transactional tests are complex to setup without TestContainers or specific libs. + # Let's go with TRUNCATE for simplicity. + async with db.get_connection() as conn: + async with conn.cursor() as cur: + await cur.execute("TRUNCATE TABLE servers RESTART IDENTITY") + await conn.commit() + + # Return the actual dependency + async for conn in get_db_connection(): + yield conn + +@pytest_asyncio.fixture +async def client(override_get_db): + transport = ASGITransport(app=app) + async with AsyncClient(transport=transport, base_url="http://test") as ac: + yield ac diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..38a191a --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,215 @@ +import pytest +from app.models import ServerState + +@pytest.mark.asyncio +async def test_create_server(client): + response = await client.post("/servers/", json={ + "hostname": "web-01", + "ip_address": "192.168.1.10", + "state": "active" + }) + + assert response.status_code == 201 + data = response.json() + assert data["hostname"] == "web-01" + assert data["state"] == "active" + assert "id" in data + +@pytest.mark.asyncio +async def test_create_duplicate_hostname(client): + # Create first + await client.post("/servers/", json={ + "hostname": "web-01", + "ip_address": "192.168.1.10", + "state": "active" + }) + + # Try duplicate + response = await client.post("/servers/", json={ + "hostname": "web-01", + "ip_address": "10.0.0.1", + "state": "offline" + }) + assert response.status_code == 400 + +@pytest.mark.asyncio +async def test_list_servers(client): + await client.post("/servers/", json={"hostname": "s1", "ip_address": "1.1.1.1", "state": "active"}) + await client.post("/servers/", json={"hostname": "s2", "ip_address": "2.2.2.2", "state": "offline"}) + + response = await client.get("/servers/") + assert response.status_code == 200 + data = response.json() + assert len(data) == 2 + +@pytest.mark.asyncio +async def test_get_server(client): + r_create = await client.post("/servers/", json={"hostname": "s1", "ip_address": "1.1.1.1", "state": "active"}) + server_id = r_create.json()["id"] + + response = await client.get(f"/servers/{server_id}") + assert response.status_code == 200 + assert response.json()["hostname"] == "s1" + +@pytest.mark.asyncio +async def test_get_server_not_found(client): + response = await client.get("/servers/999999") + assert response.status_code == 404 + +@pytest.mark.asyncio +async def test_update_server(client): + r_create = await client.post("/servers/", json={"hostname": "s1", "ip_address": "1.1.1.1", "state": "active"}) + server_id = r_create.json()["id"] + + response = await client.put(f"/servers/{server_id}", json={"state": "retired"}) + assert response.status_code == 200 + assert response.json()["state"] == "retired" + + # Verify persistence + r_get = await client.get(f"/servers/{server_id}") + assert r_get.json()["state"] == "retired" + +@pytest.mark.asyncio +async def test_delete_server(client): + r_create = await client.post("/servers/", json={"hostname": "s1", "ip_address": "1.1.1.1", "state": "active"}) + server_id = r_create.json()["id"] + + response = await client.delete(f"/servers/{server_id}") + assert response.status_code == 204 + + # Verify gone + r_get = await client.get(f"/servers/{server_id}") + assert r_get.status_code == 404 + +@pytest.mark.asyncio +async def test_create_server_invalid_ip(client): + response = await client.post("/servers/", json={ + "hostname": "test-invalid-ip", + "ip_address": "invalid-ip-string", + "state": "active" + }) + assert response.status_code == 422 + + +# ETag Tests +@pytest.mark.asyncio +async def test_etag_returned_on_create(client): + """Test that ETag is returned when creating a server.""" + response = await client.post("/servers/", json={ + "hostname": "etag-test", + "ip_address": "10.0.0.1", + "state": "active" + }) + assert response.status_code == 201 + assert "etag" in response.headers + + +@pytest.mark.asyncio +async def test_etag_returned_on_get(client): + """Test that ETag is returned when getting a server.""" + r_create = await client.post("/servers/", json={ + "hostname": "etag-get-test", + "ip_address": "10.0.0.2", + "state": "active" + }) + server_id = r_create.json()["id"] + + response = await client.get(f"/servers/{server_id}") + assert response.status_code == 200 + assert "etag" in response.headers + + +@pytest.mark.asyncio +async def test_if_none_match_returns_304(client): + """Test conditional GET with If-None-Match returns 304 Not Modified.""" + r_create = await client.post("/servers/", json={ + "hostname": "conditional-get-test", + "ip_address": "10.0.0.3", + "state": "active" + }) + server_id = r_create.json()["id"] + + # First GET to get ETag + r_get = await client.get(f"/servers/{server_id}") + etag = r_get.headers["etag"] + + # Conditional GET with same ETag should return 304 + response = await client.get(f"/servers/{server_id}", headers={"If-None-Match": etag}) + assert response.status_code == 304 + + +@pytest.mark.asyncio +async def test_if_match_update_succeeds(client): + """Test update with correct If-Match header succeeds.""" + r_create = await client.post("/servers/", json={ + "hostname": "if-match-test", + "ip_address": "10.0.0.4", + "state": "active" + }) + server_id = r_create.json()["id"] + etag = r_create.headers["etag"] + + response = await client.put( + f"/servers/{server_id}", + json={"state": "offline"}, + headers={"If-Match": etag} + ) + assert response.status_code == 200 + + +@pytest.mark.asyncio +async def test_if_match_update_fails_with_stale_etag(client): + """Test update with stale If-Match header returns 412 Precondition Failed.""" + r_create = await client.post("/servers/", json={ + "hostname": "stale-etag-test", + "ip_address": "10.0.0.5", + "state": "active" + }) + server_id = r_create.json()["id"] + old_etag = r_create.headers["etag"] + + # Update to change the ETag + await client.put(f"/servers/{server_id}", json={"state": "offline"}) + + # Try to update with old ETag + response = await client.put( + f"/servers/{server_id}", + json={"state": "retired"}, + headers={"If-Match": old_etag} + ) + assert response.status_code == 412 + + +# Filtering Tests +@pytest.mark.asyncio +async def test_filter_by_state(client): + """Test filtering servers by state.""" + await client.post("/servers/", json={"hostname": "filter-active", "ip_address": "10.1.1.1", "state": "active"}) + await client.post("/servers/", json={"hostname": "filter-offline", "ip_address": "10.1.1.2", "state": "offline"}) + + response = await client.get("/servers/?state=active") + assert response.status_code == 200 + data = response.json() + assert all(s["state"] == "active" for s in data) + + +@pytest.mark.asyncio +async def test_filter_by_hostname_contains(client): + """Test filtering servers by hostname pattern.""" + await client.post("/servers/", json={"hostname": "web-prod-01", "ip_address": "10.2.1.1", "state": "active"}) + await client.post("/servers/", json={"hostname": "db-prod-01", "ip_address": "10.2.1.2", "state": "active"}) + + response = await client.get("/servers/?hostname_contains=web") + assert response.status_code == 200 + data = response.json() + assert all("web" in s["hostname"] for s in data) + + +# Request ID Test +@pytest.mark.asyncio +async def test_request_id_header(client): + """Test that X-Request-ID header is returned in responses.""" + response = await client.get("/servers/") + assert "x-request-id" in response.headers + + diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..e2ada41 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,27 @@ +from typer.testing import CliRunner +from cli.main import app +from unittest.mock import patch, MagicMock + +runner = CliRunner() + +def test_list_servers(): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = [{"id": 1, "hostname": "test"}] + + with patch("requests.get", return_value=mock_response) as mock_get: + result = runner.invoke(app, ["list"]) + assert result.exit_code == 0 + assert "test" in result.stdout + mock_get.assert_called_once() + +def test_create_server(): + mock_response = MagicMock() + mock_response.status_code = 201 + mock_response.json.return_value = {"id": 1, "hostname": "new", "state": "active"} + + with patch("requests.post", return_value=mock_response) as mock_post: + result = runner.invoke(app, ["create", "new", "1.1.1.1", "active"]) + assert result.exit_code == 0 + assert "new" in result.stdout + mock_post.assert_called_once()