diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000..375155e --- /dev/null +++ b/TESTING.md @@ -0,0 +1,255 @@ +# ๐Ÿงช Testing Guide + +This document explains how to run tests for the ApplyBot application. + +## ๐Ÿš€ Quick Start + +1. **Install dependencies:** + ```bash + pip install -r requirements.txt + ``` + +2. **Run all tests:** + ```bash + python run_tests.py + ``` + +## ๐Ÿ“ Test Structure + +- `conftest.py` - Pytest configuration with fixtures and mocks +- `test_integration_simple.py` - Integration tests for API endpoints +- `run_tests.py` - Test runner script with different options +- `TESTING.md` - This documentation file + +## ๐Ÿ”ง Environment Setup + +### Environment Variables + +Tests automatically load environment variables from `.env` file. Required variables: + +```bash +# Database +DATABASE_URL=sqlite:///test.db + +# AI Services +OPENAI_API_KEY=your-openai-key + +# Supabase +SUPABASE_URL=https://your-project.supabase.co +SUPABASE_KEY=your-supabase-key + +# Optional - will use test defaults if not provided +REDIS_URL=redis://localhost:6379/1 +REED_API_KEY=your-reed-key +ADZUNA_APP_ID=your-adzuna-id +ADZUNA_APP_KEY=your-adzuna-key +``` + +### Test Environment Variables + +If environment variables are not set, `conftest.py` automatically provides test defaults: + +- `DATABASE_URL`: `sqlite:///test.db` +- `REDIS_URL`: `redis://localhost:6379/1` +- `OPENAI_API_KEY`: `test-openai-key` +- `SUPABASE_URL`: `https://test.supabase.co` +- `SUPABASE_KEY`: `test-supabase-key` +- `ENVIRONMENT`: `test` + +## ๐ŸŽฏ Running Tests + +### Using the Test Runner + +```bash +# Run all tests +python run_tests.py + +# Run only integration tests +python run_tests.py integration + +# Run with coverage report +python run_tests.py coverage + +# Test environment variable loading +python run_tests.py env + +# Show help +python run_tests.py --help +``` + +### Using Pytest Directly + +```bash +# Run all tests with verbose output +pytest -v + +# Run specific test file +pytest test_integration_simple.py -v + +# Run specific test class +pytest test_integration_simple.py::TestHealthEndpoint -v + +# Run specific test method +pytest test_integration_simple.py::TestHealthEndpoint::test_health_check -v + +# Run with coverage +pytest --cov=app --cov-report=html +``` + +## ๐Ÿ” Test Categories + +### Integration Tests (`test_integration_simple.py`) + +Tests API endpoints with mocked external dependencies: + +- **Health Check**: Basic server health endpoint +- **Job Endpoints**: Job fetching, filtering, and sources +- **Project Matching**: Project-to-job matching algorithms +- **Resume Generation**: PDF resume creation with AI +- **Cover Letter Generation**: AI-powered cover letter creation +- **Cache Endpoints**: Redis cache statistics +- **Environment Variables**: Proper loading and defaults + +### Mocked Dependencies + +Tests use mocks for external services to avoid: +- Real API calls to OpenAI, Reed, Adzuna +- Database connections (uses test database) +- Redis connections (mocked) +- File system operations + +## ๐Ÿ› ๏ธ Fixtures Available + +From `conftest.py`: + +- `mock_openai_client` - Mocked OpenAI API client +- `mock_supabase_client` - Mocked Supabase client +- `mock_redis_client` - Mocked Redis client +- `mock_external_apis` - Mocked job API responses +- `sample_job_data` - Sample job data for testing +- `sample_user_data` - Sample user data for testing +- `sample_project_data` - Sample project data for testing + +## ๐Ÿ“Š Coverage Reports + +Generate HTML coverage reports: + +```bash +python run_tests.py coverage +``` + +View the report: +```bash +open htmlcov/index.html # macOS +xdg-open htmlcov/index.html # Linux +``` + +## ๐Ÿ› Troubleshooting + +### Common Issues + +1. **Import Errors** + ```bash + # Make sure you're in the project root + cd /path/to/ApplyBot + + # Install dependencies + pip install -r requirements.txt + ``` + +2. **Environment Variable Issues** + ```bash + # Test environment loading specifically + python run_tests.py env + ``` + +3. **API Server Not Running** + - Integration tests will skip if the API server isn't running + - Start the server: `python start_server.py` + - Or run tests without server dependency + +4. **Database Issues** + ```bash + # Tests use SQLite by default + # Make sure DATABASE_URL is set correctly + export DATABASE_URL="sqlite:///test.db" + ``` + +### Debug Mode + +Run tests with more verbose output: + +```bash +pytest -v -s --tb=long +``` + +## ๐Ÿ”„ Continuous Integration + +For CI/CD pipelines, use: + +```bash +# Install dependencies +pip install -r requirements.txt + +# Run tests with JUnit XML output +pytest --junitxml=test-results.xml + +# Run with coverage for CI +pytest --cov=app --cov-report=xml --cov-report=term +``` + +## ๐Ÿ“ Writing New Tests + +### Test File Naming +- Integration tests: `test_integration_*.py` +- Unit tests: `test_unit_*.py` +- Specific feature tests: `test_feature_*.py` + +### Example Test Structure + +```python +import pytest +from unittest.mock import patch + +class TestNewFeature: + \"\"\"Test new feature functionality.\"\"\" + + def test_basic_functionality(self, sample_data): + \"\"\"Test basic feature works.\"\"\" + # Arrange + input_data = sample_data + + # Act + result = your_function(input_data) + + # Assert + assert result is not None + assert result["status"] == "success" + + @patch('external.service.call') + def test_with_mocked_service(self, mock_service): + \"\"\"Test with mocked external service.\"\"\" + # Setup mock + mock_service.return_value = {"data": "test"} + + # Test your code + result = function_that_calls_service() + + # Verify + assert mock_service.called + assert result["data"] == "test" +``` + +## ๐ŸŽฏ Best Practices + +1. **Use Fixtures**: Leverage existing fixtures for common test data +2. **Mock External Services**: Always mock external API calls +3. **Test Edge Cases**: Include error conditions and edge cases +4. **Clear Test Names**: Use descriptive test method names +5. **Arrange-Act-Assert**: Structure tests clearly +6. **Independent Tests**: Each test should be independent +7. **Clean Up**: Use fixtures for setup/teardown when needed + +--- + +For more information, see the main [README.md](README.md) and [API_DOCUMENTATION.md](API_DOCUMENTATION.md). \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..ebb7362 --- /dev/null +++ b/conftest.py @@ -0,0 +1,137 @@ +""" +Pytest configuration file for ApplyBot tests. +Handles environment variable loading and mock configurations. +""" + +import os +import pytest +from unittest.mock import patch, MagicMock +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +@pytest.fixture(scope="session", autouse=True) +def setup_test_environment(): + """Set up test environment with mock configurations.""" + # Set test environment variables if not already set + test_env_vars = { + "DATABASE_URL": "sqlite:///test.db", + "REDIS_URL": "redis://localhost:6379/1", + "OPENAI_API_KEY": "test-openai-key", + "SUPABASE_URL": "https://test.supabase.co", + "SUPABASE_KEY": "test-supabase-key", + "REED_API_KEY": "test-reed-key", + "ADZUNA_APP_ID": "test-adzuna-id", + "ADZUNA_APP_KEY": "test-adzuna-key", + "ENVIRONMENT": "test" + } + + for key, value in test_env_vars.items(): + if not os.getenv(key): + os.environ[key] = value + +@pytest.fixture +def mock_openai_client(): + """Mock OpenAI client for testing.""" + with patch('openai.OpenAI') as mock_client: + mock_instance = MagicMock() + mock_client.return_value = mock_instance + + # Mock chat completions + mock_instance.chat.completions.create.return_value = MagicMock( + choices=[MagicMock(message=MagicMock(content="Mock AI response"))] + ) + + yield mock_instance + +@pytest.fixture +def mock_supabase_client(): + """Mock Supabase client for testing.""" + with patch('supabase.create_client') as mock_create: + mock_client = MagicMock() + mock_create.return_value = mock_client + + # Mock table operations + mock_client.table.return_value.select.return_value.execute.return_value = MagicMock( + data=[] + ) + mock_client.table.return_value.insert.return_value.execute.return_value = MagicMock( + data=[{"id": "test-id"}] + ) + + yield mock_client + +@pytest.fixture +def mock_redis_client(): + """Mock Redis client for testing.""" + with patch('redis.Redis') as mock_redis: + mock_instance = MagicMock() + mock_redis.return_value = mock_instance + + # Mock Redis operations + mock_instance.get.return_value = None + mock_instance.set.return_value = True + mock_instance.ping.return_value = True + + yield mock_instance + +@pytest.fixture +def mock_external_apis(): + """Mock external job APIs (Reed, Adzuna, etc.).""" + with patch('requests.get') as mock_get: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "results": [ + { + "id": "test-job-1", + "title": "Test Developer", + "company": "Test Company", + "location": "Test Location", + "description": "Test job description" + } + ] + } + mock_get.return_value = mock_response + + yield mock_get + +@pytest.fixture +def sample_job_data(): + """Sample job data for testing.""" + return { + "id": "test-job-uuid", + "title": "Senior Python Developer", + "company": "TechCorp", + "description": "We are looking for a senior Python developer...", + "location": "San Francisco, CA", + "salary_range": "$120k - $150k", + "requirements": ["Python", "FastAPI", "PostgreSQL"], + "source": "RemoteOK", + "posted_date": "2024-01-15T10:30:00Z" + } + +@pytest.fixture +def sample_user_data(): + """Sample user data for testing.""" + return { + "user_id": "test-user-uuid", + "name": "John Doe", + "email": "john@example.com", + "phone": "+1(555) 123-4567", + "location": "San Francisco, CA", + "experience_years": "5+", + "primary_skills": ["Python", "React", "AWS"] + } + +@pytest.fixture +def sample_project_data(): + """Sample project data for testing.""" + return { + "project_id": "test-project-uuid", + "title": "E-commerce Platform", + "description": "Built a full-stack e-commerce platform using Python, FastAPI, and React", + "technologies": ["Python", "FastAPI", "React", "PostgreSQL"], + "user_id": "test-user-uuid" + } \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index c97cc8a..afdb182 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ psycopg2-binary==2.9.9 # HTTP client httpx==0.25.2 +requests==2.31.0 # Logging loguru==0.7.2 @@ -44,4 +45,10 @@ mlflow==2.8.1 python-dateutil==2.8.2 # CORS -python-cors==1.0.1 \ No newline at end of file +python-cors==1.0.1 + +# Testing dependencies +pytest==7.4.3 +pytest-asyncio==0.21.1 +pytest-mock==3.12.0 +pytest-cov==4.1.0 \ No newline at end of file diff --git a/run_tests.py b/run_tests.py new file mode 100644 index 0000000..84f3bbf --- /dev/null +++ b/run_tests.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Test runner script for ApplyBot. +Provides easy commands to run different types of tests. +""" + +import os +import sys +import subprocess +from pathlib import Path + +def run_command(cmd, description): + """Run a command and handle errors.""" + print(f"\n๐Ÿ”„ {description}") + print(f"Running: {' '.join(cmd)}") + + try: + result = subprocess.run(cmd, check=True, capture_output=True, text=True) + print(f"โœ… {description} completed successfully") + if result.stdout: + print(result.stdout) + return True + except subprocess.CalledProcessError as e: + print(f"โŒ {description} failed") + print(f"Error: {e}") + if e.stdout: + print("STDOUT:", e.stdout) + if e.stderr: + print("STDERR:", e.stderr) + return False + +def main(): + """Main test runner function.""" + # Ensure we're in the project root + project_root = Path(__file__).parent + os.chdir(project_root) + + print("๐Ÿงช ApplyBot Test Runner") + print("=" * 50) + + # Check if pytest is installed + try: + subprocess.run(["pytest", "--version"], check=True, capture_output=True) + except (subprocess.CalledProcessError, FileNotFoundError): + print("โŒ pytest not found. Please install dependencies:") + print(" pip install -r requirements.txt") + sys.exit(1) + + # Parse command line arguments + if len(sys.argv) > 1: + test_type = sys.argv[1].lower() + else: + test_type = "all" + + success = True + + if test_type in ["all", "integration"]: + # Run integration tests + cmd = [ + "pytest", + "test_integration_simple.py", + "-v", + "--tb=short", + "--color=yes" + ] + success &= run_command(cmd, "Running integration tests") + + if test_type in ["all", "unit"]: + # Run unit tests (if any exist) + unit_test_files = list(Path(".").glob("test_unit_*.py")) + if unit_test_files: + cmd = [ + "pytest", + *[str(f) for f in unit_test_files], + "-v", + "--tb=short", + "--color=yes" + ] + success &= run_command(cmd, "Running unit tests") + else: + print("โ„น๏ธ No unit test files found (test_unit_*.py)") + + if test_type == "coverage": + # Run tests with coverage + cmd = [ + "pytest", + "--cov=app", + "--cov-report=html", + "--cov-report=term-missing", + "-v" + ] + success &= run_command(cmd, "Running tests with coverage") + + if success: + print("\n๐Ÿ“Š Coverage report generated in htmlcov/index.html") + + if test_type == "env": + # Test environment variable loading + cmd = [ + "pytest", + "test_integration_simple.py::TestEnvironmentVariables", + "-v" + ] + success &= run_command(cmd, "Testing environment variable loading") + + # Summary + print("\n" + "=" * 50) + if success: + print("โœ… All tests completed successfully!") + else: + print("โŒ Some tests failed. Check the output above.") + sys.exit(1) + +def print_usage(): + """Print usage information.""" + print(""" +Usage: python run_tests.py [test_type] + +Test types: + all - Run all tests (default) + integration - Run integration tests only + unit - Run unit tests only + coverage - Run tests with coverage report + env - Test environment variable loading only + +Examples: + python run_tests.py + python run_tests.py integration + python run_tests.py coverage + """) + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] in ["-h", "--help", "help"]: + print_usage() + else: + main() \ No newline at end of file diff --git a/test_integration_simple.py b/test_integration_simple.py new file mode 100644 index 0000000..91c18fc --- /dev/null +++ b/test_integration_simple.py @@ -0,0 +1,276 @@ +""" +Simple integration tests for ApplyBot API endpoints. +Tests basic functionality with mocked external dependencies. +""" + +import os +import pytest +import requests +from dotenv import load_dotenv +from unittest.mock import patch, MagicMock + +# Load environment variables at module level +load_dotenv() + +# Test configuration +BASE_URL = os.getenv("TEST_BASE_URL", "http://localhost:8000/api/v1") +TEST_TIMEOUT = 30 + +class TestHealthEndpoint: + """Test health check endpoint.""" + + def test_health_check(self): + """Test that health endpoint returns expected response.""" + try: + response = requests.get(f"{BASE_URL}/health", timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert "status" in data + assert "service" in data + assert data["status"] == "healthy" + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestJobEndpoints: + """Test job-related endpoints with mocked external APIs.""" + + def test_get_jobs_empty_response(self, mock_external_apis): + """Test getting jobs when no jobs exist.""" + try: + response = requests.get(f"{BASE_URL}/jobs", timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert isinstance(data, list) + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + + def test_get_jobs_with_filters(self, mock_external_apis): + """Test getting jobs with query parameters.""" + try: + params = { + "keywords": "python,developer", + "location": "San Francisco", + "limit": 10 + } + response = requests.get(f"{BASE_URL}/jobs", params=params, timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert isinstance(data, list) + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + + @patch('requests.post') + def test_fetch_jobs(self, mock_post, mock_external_apis): + """Test fetching new jobs from external sources.""" + # Mock the internal API response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "success": True, + "message": "Fetched 5 jobs, 3 new, 2 updated", + "results": { + "total_fetched": 5, + "new_jobs": 3, + "updated_jobs": 2, + "sources": { + "RemoteOK": {"fetched": 3, "new": 2, "updated": 1, "status": "success"}, + "GitHub": {"fetched": 2, "new": 1, "updated": 1, "status": "success"} + } + } + } + mock_post.return_value = mock_response + + try: + payload = { + "keywords": ["python", "react"], + "limit_per_source": 10 + } + response = requests.post(f"{BASE_URL}/jobs/fetch", json=payload, timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert data["success"] is True + assert "results" in data + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + + def test_get_job_sources(self): + """Test getting available job sources.""" + try: + response = requests.get(f"{BASE_URL}/jobs/sources", timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert "success" in data + assert "enabled_sources" in data + assert "available_sources" in data + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestProjectMatching: + """Test project matching endpoints with mocked data.""" + + def test_match_projects_invalid_job_id(self): + """Test project matching with invalid job ID.""" + try: + params = {"user_id": "test-user-id"} + response = requests.get(f"{BASE_URL}/match/invalid-job-id", params=params, timeout=TEST_TIMEOUT) + assert response.status_code in [404, 422] # Either not found or validation error + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + + def test_match_projects_missing_user_id(self): + """Test project matching without required user_id parameter.""" + try: + response = requests.get(f"{BASE_URL}/match/test-job-id", timeout=TEST_TIMEOUT) + assert response.status_code == 422 # Validation error + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestResumeGeneration: + """Test resume generation endpoints with mocked AI services.""" + + def test_generate_resume_missing_data(self, mock_openai_client): + """Test resume generation with missing required data.""" + try: + payload = { + "name": "John Doe" + # Missing other required fields + } + response = requests.post(f"{BASE_URL}/resume/generate", json=payload, timeout=TEST_TIMEOUT) + assert response.status_code == 422 # Validation error + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + + def test_generate_resume_complete_data(self, mock_openai_client, sample_user_data): + """Test resume generation with complete user data.""" + try: + payload = { + **sample_user_data, + "education": [ + { + "degree": "Bachelor of Computer Science", + "institution": "Stanford University", + "year": "2018-2022" + } + ], + "skills": [ + { + "category": "Programming Languages", + "items": ["Python", "JavaScript", "TypeScript"] + } + ], + "experience": [ + { + "role": "Software Engineer", + "company": "TechCorp", + "duration": "2022-Present", + "location": "San Francisco, CA", + "achievements": ["Built scalable APIs", "Reduced response times by 60%"] + } + ] + } + + with patch('requests.post') as mock_post: + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "success": True, + "resume_id": "test-resume-id", + "download_url": "/api/v1/resume/download/test-resume-id", + "generation_method": "latex", + "message": "Resume generated successfully" + } + mock_post.return_value = mock_response + + response = requests.post(f"{BASE_URL}/resume/generate", json=payload, timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert data["success"] is True + assert "resume_id" in data + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestCoverLetterGeneration: + """Test cover letter generation endpoints with mocked AI services.""" + + def test_generate_cover_letter_invalid_job_id(self, mock_openai_client): + """Test cover letter generation with invalid job ID.""" + try: + payload = { + "user_id": "test-user-id", + "user_name": "John Doe", + "user_email": "john@example.com" + } + response = requests.post(f"{BASE_URL}/cover-letters/invalid-job-id", json=payload, timeout=TEST_TIMEOUT) + assert response.status_code in [404, 422] + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestCacheEndpoints: + """Test cache-related endpoints.""" + + def test_cache_stats(self, mock_redis_client): + """Test getting cache statistics.""" + try: + response = requests.get(f"{BASE_URL}/match/cache/stats", timeout=TEST_TIMEOUT) + assert response.status_code == 200 + + data = response.json() + assert "success" in data + assert "cache_stats" in data + + except requests.exceptions.ConnectionError: + pytest.skip("API server not running - skipping integration test") + +class TestEnvironmentVariables: + """Test that environment variables are properly loaded.""" + + def test_required_env_vars_loaded(self): + """Test that all required environment variables are available.""" + required_vars = [ + "DATABASE_URL", + "OPENAI_API_KEY", + "SUPABASE_URL", + "SUPABASE_KEY" + ] + + for var in required_vars: + assert os.getenv(var) is not None, f"Environment variable {var} is not set" + + def test_optional_env_vars(self): + """Test that optional environment variables have defaults.""" + # These should have defaults set by conftest.py if not provided + optional_vars = [ + "REDIS_URL", + "REED_API_KEY", + "ADZUNA_APP_ID", + "ADZUNA_APP_KEY" + ] + + for var in optional_vars: + value = os.getenv(var) + assert value is not None, f"Optional environment variable {var} should have a default value" + + def test_test_environment_flag(self): + """Test that we're running in test environment.""" + env = os.getenv("ENVIRONMENT") + assert env == "test", "Tests should run with ENVIRONMENT=test" + +if __name__ == "__main__": + # Run tests with pytest + pytest.main([__file__, "-v"]) \ No newline at end of file