From 4041b0eff4030b26311c04be27ba84403a2b2558 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Fri, 31 Jan 2025 08:49:41 +0000 Subject: [PATCH 01/16] Move generate image to class so behvaiout is more similar to generate quiz --- backend/fastapi_generate_quiz.py | 5 +- backend/generate_image.py | 78 +++++++++++++++----------------- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/backend/fastapi_generate_quiz.py b/backend/fastapi_generate_quiz.py index ad87fbc..eabf7b0 100644 --- a/backend/fastapi_generate_quiz.py +++ b/backend/fastapi_generate_quiz.py @@ -2,7 +2,7 @@ # https://platform.openai.com/docs/api-reference/streaming import logging from generate_quiz import QuizGenerator -from generate_image import generate_image +from generate_image import ImageGenerator from fastapi import FastAPI, Request from fastapi.responses import (StreamingResponse, JSONResponse) from fastapi.middleware.cors import CORSMiddleware @@ -100,7 +100,8 @@ async def generate_image_endpoint(request: Request) -> JSONResponse: return JSONResponse(content={"error": error_message}, status_code=400) logging.info(f"Received prompt: {prompt}") - image_url = generate_image(prompt) + image_generator = ImageGenerator() + image_url = image_generator.generate_image(prompt) if image_url is None: error_message = "Error - Image generation failed." diff --git a/backend/generate_image.py b/backend/generate_image.py index 4297e10..7a3525a 100644 --- a/backend/generate_image.py +++ b/backend/generate_image.py @@ -1,5 +1,4 @@ from openai import OpenAI - import logging import os @@ -7,47 +6,44 @@ logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') -# Set up OpenAI API key from environment variables -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -if not OPENAI_API_KEY: - raise ValueError( - "Environment variable OPENAI_API_KEY is not set. " - "Please ensure it's set and try again." - ) - -client = OpenAI(api_key=OPENAI_API_KEY) - - -def generate_image(prompt: str, n: int = 1, size: str = "256x256") -> str: - """ - Generates an image using OpenAI's Image API based on a given prompt. - - Parameters: - - prompt (str): The textual description for the image to be generated. - - n (int): The number of images to generate. Default is 1. - - size (str): The size of the generated image. Default is "256x256". - - Returns: - - str: URL of generated image, in JSON dict with key URL - - Raises: - - openai.error.OpenAIError: If there's an error in the request. - """ - - logging.info(f"{prompt=}") - - try: - response = client.images.generate(prompt=prompt, n=n, size=size) - return response.data[0].url - except Exception as e: - logger.error(f"Non-OpenAI Error when calling OpenAI api: {e}") - return None - +class ImageGenerator: + def __init__(self): + """ + Initializes the ImageGenerator by setting up the OpenAI client with the API key from environment variables. + Raises an error if the API key is not set. + """ + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError( + "Environment variable OPENAI_API_KEY is not set. " + "Please ensure it's set and try again." + ) + self.client = OpenAI(api_key=api_key) + + def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> str: + """ + Generates an image using OpenAI's Image API based on a given prompt. + + Parameters: + - prompt (str): The textual description for the image to be generated. + - n (int): The number of images to generate. Default is 1. + - size (str): The size of the generated image. Default is "256x256". + + Returns: + - str: URL of generated image in JSON dict with key URL, or None in case of an error. + """ + logging.info(f"Generating image with prompt: {prompt=}") + + try: + response = self.client.images.generate(prompt=prompt, n=n, size=size) + return response.data[0].url + except Exception as e: + logger.error(f"Error when calling OpenAI API: {e}") + return None if __name__ == "__main__": - image_description = ( - "Crested Gecko showcasing its distinct crests and coloration. Pixel Art" - ) - image_url = generate_image(image_description) + image_generator = ImageGenerator() + image_description = "Crested Gecko showcasing its distinct crests and coloration. Pixel Art" + image_url = image_generator.generate_image(image_description) if image_url: print(f"Generated Image URL: {image_url}") From 639f6e943e27233cc3c04f7a799b3143c90b34a1 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Fri, 31 Jan 2025 08:51:20 +0000 Subject: [PATCH 02/16] function test --- backend/tests/test_generate_image.py | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 backend/tests/test_generate_image.py diff --git a/backend/tests/test_generate_image.py b/backend/tests/test_generate_image.py new file mode 100644 index 0000000..ef95e96 --- /dev/null +++ b/backend/tests/test_generate_image.py @@ -0,0 +1,81 @@ +import pytest +import os +from backend.generate_image import generate_image + +""" +Test file for generate_image function. + +Grouped into: +1. **Unit Tests**: Tests function behavior using mocks (no real API calls). +2. **Integration Tests**: Makes real API calls to OpenAI (run manually/staging only). +""" + +class TestGenerateImageUnit: + """ + Unit tests for generate_image function. + Uses mocker to avoid real API calls. + """ + + def test_generate_image_success(self, mocker): + """Test generate_image with a successful API response.""" + mocker.patch("backend.generate_image.client.images.generate", + return_value=mocker.Mock(data=[mocker.Mock(url="https://example.com/generated_image.png")])) + + url = generate_image("A test prompt") + assert url == "https://example.com/generated_image.png" + + def test_generate_image_custom_size(self, mocker): + """Test generate_image with a custom image size.""" + mocker.patch("backend.generate_image.client.images.generate", + return_value=mocker.Mock(data=[mocker.Mock(url="https://example.com/custom_size_image.png")])) + + url = generate_image("A dragon flying over mountains", size="512x512") + assert url == "https://example.com/custom_size_image.png" + + def test_generate_image_api_failure(self, mocker): + """Test generate_image when OpenAI API raises an exception.""" + mocker.patch("backend.generate_image.client.images.generate", side_effect=Exception("API request failed")) + + url = generate_image("A cyberpunk city at night") + assert url is None + + def test_generate_image_invalid_prompt(self): + """Test generate_image with an invalid (empty) prompt.""" + assert generate_image("") is None + + def test_openai_api_key_not_set(self): + """Test that an error is raised if the OpenAI API key is not set in the environment variables.""" + if "OPENAI_API_KEY" in os.environ: + del os.environ["OPENAI_API_KEY"] + + with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): + generate_image("A test prompt") + + def test_logging_when_api_fails(self, mocker): + """Test that errors are properly logged when the OpenAI API fails.""" + mock_logger = mocker.patch("backend.generate_image.logger.error") + mocker.patch("backend.generate_image.client.images.generate", side_effect=Exception("API failure")) + + generate_image("Test prompt") + mock_logger.assert_called_with("Non-OpenAI Error when calling OpenAI api: API failure") + + +class TestGenerateImageIntegration: + """ + Integration tests for generate_image function. + These tests make real API calls and should be run manually. + """ + + @pytest.mark.integration + def test_generate_image_real_api(self): + """Calls the real OpenAI API and verifies it returns a valid URL.""" + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + pytest.skip("Skipping test: OPENAI_API_KEY is not set.") + + prompt = "A futuristic city skyline at sunset" + + url = generate_image(prompt) + + assert url is not None, "Expected a valid URL, but got None." + assert url.startswith("http"), f"Unexpected URL format: {url}" From d127e5e28993bedbccf14bd00741425c742869e2 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Fri, 31 Jan 2025 08:51:26 +0000 Subject: [PATCH 03/16] Update to class test --- backend/tests/test_generate_image.py | 82 ++++++++++++++++------------ 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/backend/tests/test_generate_image.py b/backend/tests/test_generate_image.py index ef95e96..5404aad 100644 --- a/backend/tests/test_generate_image.py +++ b/backend/tests/test_generate_image.py @@ -1,68 +1,77 @@ -import pytest import os -from backend.generate_image import generate_image +import pytest +from backend.generate_image import ImageGenerator """ -Test file for generate_image function. +Test file for ImageGenerator class. Grouped into: -1. **Unit Tests**: Tests function behavior using mocks (no real API calls). +1. **Unit Tests**: Tests class behavior using mocks (no real API calls). 2. **Integration Tests**: Makes real API calls to OpenAI (run manually/staging only). """ -class TestGenerateImageUnit: +@pytest.fixture +def image_generator(): + """Fixture to create an instance of ImageGenerator""" + return ImageGenerator() + + +class TestImageGeneratorUnit: """ - Unit tests for generate_image function. + Unit tests for ImageGenerator class. Uses mocker to avoid real API calls. """ - def test_generate_image_success(self, mocker): + def test_generate_image_success(self, mocker, image_generator): """Test generate_image with a successful API response.""" - mocker.patch("backend.generate_image.client.images.generate", - return_value=mocker.Mock(data=[mocker.Mock(url="https://example.com/generated_image.png")])) - - url = generate_image("A test prompt") + mock_response = mocker.Mock() + mock_response.data = [{"url": "https://example.com/generated_image.png"}] + + mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) + + url = image_generator.generate_image("A test prompt") assert url == "https://example.com/generated_image.png" - def test_generate_image_custom_size(self, mocker): + def test_generate_image_custom_size(self, mocker, image_generator): """Test generate_image with a custom image size.""" - mocker.patch("backend.generate_image.client.images.generate", - return_value=mocker.Mock(data=[mocker.Mock(url="https://example.com/custom_size_image.png")])) - - url = generate_image("A dragon flying over mountains", size="512x512") + mock_response = mocker.Mock() + mock_response.data = [{"url": "https://example.com/custom_size_image.png"}] + + mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) + + url = image_generator.generate_image("A dragon flying over mountains", size="512x512") assert url == "https://example.com/custom_size_image.png" - def test_generate_image_api_failure(self, mocker): + def test_generate_image_api_failure(self, mocker, image_generator): """Test generate_image when OpenAI API raises an exception.""" - mocker.patch("backend.generate_image.client.images.generate", side_effect=Exception("API request failed")) - - url = generate_image("A cyberpunk city at night") + mocker.patch.object(image_generator.client.images, "generate", side_effect=Exception("API request failed")) + + url = image_generator.generate_image("A cyberpunk city at night") assert url is None - def test_generate_image_invalid_prompt(self): + def test_generate_image_invalid_prompt(self, image_generator): """Test generate_image with an invalid (empty) prompt.""" - assert generate_image("") is None + assert image_generator.generate_image("") is None - def test_openai_api_key_not_set(self): + def test_openai_api_key_not_set(self, mocker): """Test that an error is raised if the OpenAI API key is not set in the environment variables.""" - if "OPENAI_API_KEY" in os.environ: - del os.environ["OPENAI_API_KEY"] + mocker.patch.dict(os.environ, {}, clear=True) # Simulate missing API key with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): - generate_image("A test prompt") + ImageGenerator() - def test_logging_when_api_fails(self, mocker): + def test_logging_when_api_fails(self, mocker, image_generator): """Test that errors are properly logged when the OpenAI API fails.""" mock_logger = mocker.patch("backend.generate_image.logger.error") - mocker.patch("backend.generate_image.client.images.generate", side_effect=Exception("API failure")) - - generate_image("Test prompt") - mock_logger.assert_called_with("Non-OpenAI Error when calling OpenAI api: API failure") + mocker.patch.object(image_generator.client.images, "generate", side_effect=Exception("API failure")) + + image_generator.generate_image("Test prompt") + mock_logger.assert_called_with("Error when calling OpenAI API: API failure") -class TestGenerateImageIntegration: +class TestImageGeneratorIntegration: """ - Integration tests for generate_image function. + Integration tests for ImageGenerator class. These tests make real API calls and should be run manually. """ @@ -73,9 +82,10 @@ def test_generate_image_real_api(self): if not api_key: pytest.skip("Skipping test: OPENAI_API_KEY is not set.") + image_generator = ImageGenerator() prompt = "A futuristic city skyline at sunset" - - url = generate_image(prompt) - + + url = image_generator.generate_image(prompt) + assert url is not None, "Expected a valid URL, but got None." assert url.startswith("http"), f"Unexpected URL format: {url}" From 1516d1a9eb4fc1ba0120431ecf834796214a6848 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Fri, 31 Jan 2025 09:02:34 +0000 Subject: [PATCH 04/16] Update readme and dev requirements --- .vscode/launch.json | 7 +++++++ README.md | 12 +++++++----- backend/requirements-dev.txt | 3 +++ 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 backend/requirements-dev.txt diff --git a/.vscode/launch.json b/.vscode/launch.json index 4508b45..5ba08bc 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,6 +1,13 @@ { "version": "0.2.0", "configurations": [ + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, { "name": "Attach to Python Functions", "type": "python", diff --git a/README.md b/README.md index 8f294d2..2786716 100644 --- a/README.md +++ b/README.md @@ -38,11 +38,13 @@ GPTeasers is a webapp that generates quiz-style questions based on the topic you 3. Azure Container Apps: Once triggered, the FastAPI containers communicates with the OpenAI API, sending requests and receiving responses. 4. OpenAI API: Processes the request and sends back a response. -## Contribute ๐Ÿคฒ +## Running Tests ๐Ÿงช -Love **GPTeasers**? Want to make it even better? We welcome contributions! +To ensure the quality and functionality of the code, we use `pytest` for testing. Follow the steps below to run the tests: -1. **Fork** this repo ๐Ÿด. -2. Make your changes ๐Ÿ› ๏ธ. -3. Submit a **pull request** ๐Ÿ‘ฅ. +bash``` +cd backend +pip install -r requirements-dev.txt +pytest tests/ +``` diff --git a/backend/requirements-dev.txt b/backend/requirements-dev.txt new file mode 100644 index 0000000..2fb6ed4 --- /dev/null +++ b/backend/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +pytest +pytest-mock \ No newline at end of file From 043b87ca8d9f5c6f152f44c1740fb22e8f6b40c1 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Fri, 31 Jan 2025 09:03:09 +0000 Subject: [PATCH 05/16] Add conftest --- backend/tests/conftest.py | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 backend/tests/conftest.py diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py new file mode 100644 index 0000000..a0dbdc6 --- /dev/null +++ b/backend/tests/conftest.py @@ -0,0 +1,4 @@ +import sys +import os + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) From 937753bb58e5747a23ac4500bef3d3999be14ebb Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sat, 1 Feb 2025 12:49:11 +0000 Subject: [PATCH 06/16] Working generate image tests --- backend/README.md | 39 +++++++++++++- backend/generate_image.py | 78 ++++++++++++++++++++-------- backend/pytest.ini | 4 ++ backend/tests/test_generate_image.py | 31 +++++++---- 4 files changed, 120 insertions(+), 32 deletions(-) create mode 100644 backend/pytest.ini diff --git a/backend/README.md b/backend/README.md index 715df99..53dbc9f 100644 --- a/backend/README.md +++ b/backend/README.md @@ -66,4 +66,41 @@ To debug locally, follow these steps: docker push ghcr.io/djsaunders1997/fastapi_generate_quiz:latest ``` - \ No newline at end of file +## Running Tests + +Our test suite is divided into **unit tests** and **integration tests**. + +- **Unit Tests:** + These tests use mocks to simulate API responses. They run quickly and do not require real API calls. + +- **Integration Tests:** + These tests make real API calls (e.g., to the OpenAI API) and require a valid API key. They are intended to be run manually or in a staging environment. + +### Default Behavior + +By default, integration tests are **excluded** from the test run. This is achieved by configuring `pytest` in our `pytest.ini` file (located in the `backend` directory): + +```ini +[pytest] +markers = + integration: mark test as an integration test. +addopts = -m "not integration" +``` + +This configuration tells `pytest` to skip any test marked with `@pytest.mark.integration` when you run: + +```bash +pytest -v +``` + +### Running Integration Tests + +To run the integration tests, override the default marker filter by using the `-m` option: + +```bash +pytest -m integration +``` + +> **Note:** Integration tests make real API calls and require the `OPENAI_API_KEY` environment variable to be set. Make sure you have this environment variable configured before running these tests. + +--- diff --git a/backend/generate_image.py b/backend/generate_image.py index 7a3525a..9de2e76 100644 --- a/backend/generate_image.py +++ b/backend/generate_image.py @@ -1,16 +1,21 @@ -from openai import OpenAI -import logging import os +import logging +from typing import Optional +from openai import OpenAI -# Set up logging logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') class ImageGenerator: - def __init__(self): - """ - Initializes the ImageGenerator by setting up the OpenAI client with the API key from environment variables. - Raises an error if the API key is not set. + @classmethod + def get_api_key_from_env(cls) -> str: + """Retrieves the OpenAI API key from environment variables. + + Returns: + str: The API key from the environment variable OPENAI_API_KEY. + + Raises: + ValueError: If the environment variable is not set or empty. """ api_key = os.getenv("OPENAI_API_KEY") if not api_key: @@ -18,22 +23,51 @@ def __init__(self): "Environment variable OPENAI_API_KEY is not set. " "Please ensure it's set and try again." ) - self.client = OpenAI(api_key=api_key) + return api_key - def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> str: + def __init__(self, api_key: Optional[str] = None): + """Initialises the ImageGenerator. + + If `api_key` is not provided, it is retrieved from the environment + using `get_api_key_from_env`. + + Args: + api_key (str, optional): The OpenAI API key to use. Defaults to None. """ - Generates an image using OpenAI's Image API based on a given prompt. + if api_key is None: + api_key = self.get_api_key_from_env() + + self.client = OpenAI(api_key=api_key) + + def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> Optional[str]: + """Generates an image based on the provided prompt. - Parameters: - - prompt (str): The textual description for the image to be generated. - - n (int): The number of images to generate. Default is 1. - - size (str): The size of the generated image. Default is "256x256". + Args: + prompt (str): The textual description for the image to be generated. + n (int, optional): The number of images to generate. Defaults to 1. + size (str, optional): The size of the generated image. Defaults to "256x256". Returns: - - str: URL of generated image in JSON dict with key URL, or None in case of an error. + Optional[str]: The URL of the generated image if successful, + or `None` if an error occurred. """ - logging.info(f"Generating image with prompt: {prompt=}") + logger.info(f"Generating image with prompt: {prompt=}") + image_url = self._get_image_url(prompt, n, size) + logger.info(f"Generated image URL: {image_url}") + return image_url + + def _get_image_url(self, prompt: str, n: int, size: str) -> Optional[str]: + """Makes the API call to generate images using OpenAI and returns the URL. + Args: + prompt (str): The textual description for the image to be generated. + n (int): The number of images to generate. + size (str): The size of the generated image (e.g., "256x256"). + + Returns: + Optional[str]: The URL of the first generated image, + or `None` if an error occurred. + """ try: response = self.client.images.generate(prompt=prompt, n=n, size=size) return response.data[0].url @@ -41,9 +75,9 @@ def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> str: logger.error(f"Error when calling OpenAI API: {e}") return None + if __name__ == "__main__": - image_generator = ImageGenerator() - image_description = "Crested Gecko showcasing its distinct crests and coloration. Pixel Art" - image_url = image_generator.generate_image(image_description) - if image_url: - print(f"Generated Image URL: {image_url}") + # Example usage: + image_generator = ImageGenerator() # Uses environment variable if no API key is provided + prompt_text = "Crested Gecko showcasing its distinct crests and colouration. Pixel Art" + image_url = image_generator.generate_image(prompt_text) diff --git a/backend/pytest.ini b/backend/pytest.ini new file mode 100644 index 0000000..1e6be3e --- /dev/null +++ b/backend/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +markers = + integration: mark test as an integration test. +addopts = -m "not integration" diff --git a/backend/tests/test_generate_image.py b/backend/tests/test_generate_image.py index 5404aad..c0a9741 100644 --- a/backend/tests/test_generate_image.py +++ b/backend/tests/test_generate_image.py @@ -1,5 +1,6 @@ import os import pytest +from types import SimpleNamespace from backend.generate_image import ImageGenerator """ @@ -10,12 +11,12 @@ 2. **Integration Tests**: Makes real API calls to OpenAI (run manually/staging only). """ +# Fixture to create an ImageGenerator instance with a dummy API key. @pytest.fixture -def image_generator(): - """Fixture to create an instance of ImageGenerator""" +def image_generator(monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "dummy_key") return ImageGenerator() - class TestImageGeneratorUnit: """ Unit tests for ImageGenerator class. @@ -24,9 +25,11 @@ class TestImageGeneratorUnit: def test_generate_image_success(self, mocker, image_generator): """Test generate_image with a successful API response.""" + # Create a mock response simulating the structure returned by OpenAI. mock_response = mocker.Mock() - mock_response.data = [{"url": "https://example.com/generated_image.png"}] + mock_response.data = [SimpleNamespace(url="https://example.com/generated_image.png")] + # Patch the generate method of the images client. mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) url = image_generator.generate_image("A test prompt") @@ -35,7 +38,7 @@ def test_generate_image_success(self, mocker, image_generator): def test_generate_image_custom_size(self, mocker, image_generator): """Test generate_image with a custom image size.""" mock_response = mocker.Mock() - mock_response.data = [{"url": "https://example.com/custom_size_image.png"}] + mock_response.data = [SimpleNamespace(url="https://example.com/custom_size_image.png")] mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) @@ -44,26 +47,36 @@ def test_generate_image_custom_size(self, mocker, image_generator): def test_generate_image_api_failure(self, mocker, image_generator): """Test generate_image when OpenAI API raises an exception.""" - mocker.patch.object(image_generator.client.images, "generate", side_effect=Exception("API request failed")) + mocker.patch.object( + image_generator.client.images, "generate", side_effect=Exception("API request failed") + ) url = image_generator.generate_image("A cyberpunk city at night") assert url is None - def test_generate_image_invalid_prompt(self, image_generator): + def test_generate_image_invalid_prompt(self, mocker, image_generator): """Test generate_image with an invalid (empty) prompt.""" + # Simulate failure (e.g., by having the API call raise an exception) + mocker.patch.object( + image_generator.client.images, "generate", side_effect=Exception("Invalid prompt") + ) assert image_generator.generate_image("") is None def test_openai_api_key_not_set(self, mocker): """Test that an error is raised if the OpenAI API key is not set in the environment variables.""" - mocker.patch.dict(os.environ, {}, clear=True) # Simulate missing API key + # Clear environment variables to simulate missing API key. + mocker.patch.dict(os.environ, {}, clear=True) with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): ImageGenerator() def test_logging_when_api_fails(self, mocker, image_generator): """Test that errors are properly logged when the OpenAI API fails.""" + # Patch the logger's error method. mock_logger = mocker.patch("backend.generate_image.logger.error") - mocker.patch.object(image_generator.client.images, "generate", side_effect=Exception("API failure")) + mocker.patch.object( + image_generator.client.images, "generate", side_effect=Exception("API failure") + ) image_generator.generate_image("Test prompt") mock_logger.assert_called_with("Error when calling OpenAI API: API failure") From f609825f2614f34432ccdc4d619179435222f5f1 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 10:56:35 +0000 Subject: [PATCH 07/16] Seperate logic from the _create_question_generator method --- .vscode/settings.json | 7 +- backend/generate_quiz.py | 147 ++++++++++++++++++++++++--------------- 2 files changed, 95 insertions(+), 59 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 097dcce..6515c96 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,5 +5,10 @@ // "azureFunctions.pythonVenv": "${workspaceFolder}/backend_azure_function/.venv_azure_func", "azureFunctions.projectLanguage": "Python", "azureFunctions.projectRuntime": "~4", - "debug.internalConsoleOptions": "neverOpen" + "debug.internalConsoleOptions": "neverOpen", + "python.testing.pytestArgs": [ + "backend" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true } \ No newline at end of file diff --git a/backend/generate_quiz.py b/backend/generate_quiz.py index 275f114..059eac4 100644 --- a/backend/generate_quiz.py +++ b/backend/generate_quiz.py @@ -1,5 +1,4 @@ -# https://github.com/openai/openai-python -from typing import Generator +from typing import Generator, Optional, Iterable from openai import OpenAI, Stream import logging import json @@ -7,10 +6,13 @@ # Set up logging logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' +) class QuizGenerator: - # TODO: Implement a method of getting the quiz in the old format, even if it takes a while. EXAMPLE_RESPONSE = json.dumps({ "question_id": 1, "question": "Who was the first emperor of Rome?", @@ -26,10 +28,15 @@ class QuizGenerator: "wikipedia": r"https://en.wikipedia.org/wiki/Augustus", }) - def __init__(self): - """ - Initializes the QuizGenerator by setting up the OpenAI client with the API key from environment variables. - Raises an error if the API key is not set. + @classmethod + def get_api_key_from_env(cls) -> str: + """Retrieves the OpenAI API key from environment variables. + + Returns: + str: The API key from the environment variable OPENAI_API_KEY. + + Raises: + ValueError: If the environment variable is not set or empty. """ api_key = os.getenv("OPENAI_API_KEY") if not api_key: @@ -37,17 +44,30 @@ def __init__(self): "Environment variable OPENAI_API_KEY is not set. " "Please ensure it's set and try again." ) + return api_key + + def __init__(self, api_key: Optional[str] = None): + """ + Initializes the QuizGenerator by setting up the OpenAI client with the API key. + If `api_key` is not provided, it is retrieved from the environment + using `get_api_key_from_env`. + + Args: + api_key (str, optional): The OpenAI API key to use. Defaults to None. + """ + if api_key is None: + api_key = self.get_api_key_from_env() + self.client = OpenAI(api_key=api_key) - def generate_quiz(self, topic: str, difficulty: str, n_questions: str = "10", stream: bool = False) -> Generator[str, None, None]: + def generate_quiz(self, topic: str, difficulty: str, n_questions: int = 10) -> Generator[str, None, None]: """ Generate a quiz based on the provided topic and difficulty using OpenAI API. Parameters: - topic (str): The subject for the quiz, e.g., 'Roman History'. - difficulty (str): The desired difficulty of the quiz e.g., 'Easy', 'Medium'. - - n_questions (str, optional): Number of questions required. Defaults to '10'. - - stream (bool, optional): Whether to stream the response. Defaults to False. + - n_questions (int, optional): Number of questions required. Defaults to 10. Returns: - str: JSON-formatted quiz questions. If an error occurs, an empty string is returned. @@ -56,23 +76,20 @@ def generate_quiz(self, topic: str, difficulty: str, n_questions: str = "10", st the generation of the response, and the cleaning of the response. """ role = self._create_role(topic, difficulty, n_questions) - - logging.info(f"Role content for OpenAI API: {role}") - - stream = self._create_openai_stream(role) - - response_generator = self._create_question_generator(stream) + logger.info(f"Role content for OpenAI API: {role}") + openai_stream = self._create_openai_stream(role) + response_generator = self._create_question_generator(openai_stream) return response_generator - def _create_role(self, topic: str, difficulty: str, n_questions: str) -> str: + def _create_role(self, topic: str, difficulty: str, n_questions: int) -> str: """ Creates the role string that will be sent to the OpenAI API to generate the quiz. Parameters: - topic (str): The subject for the quiz. - difficulty (str): The desired difficulty of the quiz. - - n_questions (str): Number of questions required. + - n_questions (int): Number of questions required. Returns: - str: The role string to be sent to the OpenAI API. @@ -82,7 +99,7 @@ def _create_role(self, topic: str, difficulty: str, n_questions: str) -> str: return ( f"You are an AI to generate quiz questions. " f"You will be given a topic e.g. Roman History with a difficulty of Normal. " - f"Give {n_questions} responses in a json format such as: {self.EXAMPLE_RESPONSE}. " + f"Give {str(n_questions)} responses in a json format such as: {self.EXAMPLE_RESPONSE}. " f"Your task is to generate similar responses for {topic} " f"with the difficulty of {difficulty}. " f"ENSURE THESE ARE CORRECT. DO NOT INCLUDE INCORRECT ANSWERS! " @@ -93,29 +110,22 @@ def _create_role(self, topic: str, difficulty: str, n_questions: str) -> str: def _create_openai_stream(self, role: str) -> Stream: """ Creates the stream from the OpenAI API based on the given role. - + Exceptions are not caught here so that errors are visible in tests. + Parameters: - role (str): The role string to be sent to the OpenAI API. Returns: - str: The raw response from the OpenAI API. - - This method handles the API call to OpenAI and returns the raw response. - If an error occurs, it logs the error and returns an empty string. """ - try: - openai_stream = self.client.chat.completions.create( - model="gpt-4-turbo-preview", - messages=[{"role": "user", "content": role}], - stream=True - ) - except Exception as e: - logging.error(f"General error when creating OpenAI stream: {e}") - return openai_stream - + return self.client.chat.completions.create( + model="gpt-4-turbo-preview", + messages=[{"role": "user", "content": role}], + stream=True + ) def _create_question_generator(self, openai_stream: Stream) -> Generator[str, None, None]: - """Parses streamed data chunks from OpenAI into complete JSON objects and yields them. + """Parses streamed data chunks from OpenAI into complete JSON objects and yields them in SSE format. Accumulates data in a buffer and attempts to parse complete JSON objects. If successful, the JSON object is yielded as a string and the buffer is cleared for the next object. @@ -129,33 +139,55 @@ def _create_question_generator(self, openai_stream: Stream) -> Generator[str, No Yields: str: Complete JSON object of a quiz question in string representation. - - Raises: - json.JSONDecodeError: If parsing fails due to malformed JSON data. """ - - buffer = "" for chunk in openai_stream: chunk_contents = chunk.choices[0].delta.content + # Ignore empty chunks. if chunk_contents is None: - logger.info("Chunk was empty!") + logger.debug("Chunk was empty!") continue + buffer += chunk_contents # Append new data to buffer - try: - while buffer: - obj = json.loads(buffer) # Try to parse buffer as JSON - logger.info(f"Successfully parsed response as JSON object! {obj}") - formatted_sse = f"data: {json.dumps(obj)}\n\n" # Format as SSE - logger.info(f"Successfully formatted data as SSE event: {formatted_sse}") - yield formatted_sse # Yield the JSON string - buffer = "" # Clear buffer since JSON was successfully parsed - except json.JSONDecodeError: - continue # Continue buffering if JSON is incomplete + result = self.validate_and_parse_json(buffer) + + # If the JSON is incomplete, wait for more data. + if result is None: + logger.debug("JSON is incomplete, waiting for more data...") + continue + + # If the JSON is complete, yield it and clear the buffer. + yield self._format_sse(result) + buffer = "" # Clear buffer on successful parse. logger.info("Finished stream!") + @staticmethod + def _format_sse(json_obj: dict) -> str: + """ + Formats a JSON object as a Server-Sent Event (SSE) string. + """ + return f"data: {json.dumps(json_obj)}\n\n" + + @staticmethod + def validate_and_parse_json(s: str) -> Optional[dict]: + """ + Helper method to validate and parse the provided string as JSON. + Returns the parsed dict if s is valid JSON, otherwise returns None if the JSON is incomplete. + + Parameters: + - s (str): The string to check. + + Returns: + - dict: The parsed JSON object, or None if the JSON is incomplete. + """ + try: + return json.loads(s) + except json.JSONDecodeError as e: + logger.debug(f"Incomplete JSON '{s}': {e.msg} at pos {e.pos}") + return None + @staticmethod def print_quiz(generator: Generator[str, None, None]): """Helper function to iterate through and print the results from the question generator. @@ -164,19 +196,18 @@ def print_quiz(generator: Generator[str, None, None]): generator (Generator[str, None, None]): Generator producing quiz questions as SSE formatted strings. """ try: - i = 1 - for question in generator: - logger.info(f"Item {i}: {question}") - i += 1 + for idx, question in enumerate(generator, start=1): + logger.info(f"Item {idx}: {question}") except Exception as e: logger.error(f"Error during quiz generation: {e}") if __name__ == "__main__": - quiz_generator = QuizGenerator() + # Set logger level to DEBUG if running this file to test + logger.setLevel(logging.DEBUG) + quiz_generator = QuizGenerator() topic = "Crested Gecko" difficulty = "Medium" - generator = quiz_generator.generate_quiz(topic, difficulty, "5", stream=True) + generator = quiz_generator.generate_quiz(topic, difficulty, 2) logger.info(generator) - QuizGenerator.print_quiz(generator) From d35891b70eb82f0329db740a84e1750068c825be Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:02:18 +0000 Subject: [PATCH 08/16] Adding tests for generate_quiz --- backend/tests/test_generate_quiz.py | 217 ++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 backend/tests/test_generate_quiz.py diff --git a/backend/tests/test_generate_quiz.py b/backend/tests/test_generate_quiz.py new file mode 100644 index 0000000..4718fce --- /dev/null +++ b/backend/tests/test_generate_quiz.py @@ -0,0 +1,217 @@ +import os +import json +import pytest +from types import SimpleNamespace +from backend.generate_quiz import QuizGenerator + +""" +Test file for QuizGenerator class. + +Grouped into: +1. **Unit Tests**: Tests class behavior using mocks (no real API calls). +2. **Integration Tests**: Makes real API calls to OpenAI (run manually/staging only). + +This file demonstrates how to use fixtures, monkeypatching, and method patching to isolate +code under test and simulate various conditions. +""" + +# Fixture to create an instance of QuizGenerator with a dummy API key. +@pytest.fixture +def quiz_generator(monkeypatch): + # Set a dummy API key in the environment so that the class can initialize without error. + monkeypatch.setenv("OPENAI_API_KEY", "dummy_key") + return QuizGenerator() + + +class TestQuizGeneratorUnit: + """ + Unit tests for the QuizGenerator class. + These tests use mocks to avoid making real API calls. + """ + + def test_get_api_key_from_env(self, monkeypatch): + """ + Test that get_api_key_from_env correctly retrieves the API key from the environment. + + We set the environment variable and then call the class method to verify that it returns + the expected API key. + """ + monkeypatch.setenv("OPENAI_API_KEY", "test_key") + key = QuizGenerator.get_api_key_from_env() + assert key == "test_key" + + def test_environment_variable_not_set(self, monkeypatch): + """ + Test that initializing QuizGenerator without an API key (i.e. if the environment variable + is not set) raises a ValueError. + + We remove the environment variable and expect the constructor to raise an error. + """ + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): + QuizGenerator() + + def test_create_role(self, quiz_generator): + """ + Test that the _create_role method produces a prompt that contains the expected parameters. + + The role prompt should include the topic, difficulty, number of questions, and the example JSON. + """ + topic = "Science" + difficulty = "Easy" + n_questions = 5 + role = quiz_generator._create_role(topic, difficulty, n_questions) + # Check that the output string includes our input values and the example JSON + assert topic in role + assert difficulty in role + assert str(n_questions) in role + assert quiz_generator.EXAMPLE_RESPONSE in role + + def test_create_openai_stream(self, mocker, quiz_generator): + """ + Test that _create_openai_stream calls the underlying OpenAI API with the correct parameters. + + We use method patching (with mocker.patch.object) to replace the actual API call with a dummy + value, then verify that the method was called with the correct parameters. + """ + dummy_role = "dummy role string" + dummy_stream = "dummy stream" + # Patch the client's chat.completions.create method so no actual API call is made. + patcher = mocker.patch.object( + quiz_generator.client.chat.completions, "create", return_value=dummy_stream + ) + result = quiz_generator._create_openai_stream(dummy_role) + # Verify the patched method was called once with the expected arguments. + patcher.assert_called_once_with( + model="gpt-4-turbo-preview", + messages=[{"role": "user", "content": dummy_role}], + stream=True + ) + assert result == dummy_stream + + def test_create_question_generator(self, quiz_generator): + """ + Test the _create_question_generator method by simulating a stream that yields a single chunk + containing a complete JSON string. + + We use a fake chunk (wrapped in a SimpleNamespace) to simulate what the OpenAI API might return. + """ + # Use the EXAMPLE_RESPONSE as our fake complete JSON content. + fake_json = quiz_generator.EXAMPLE_RESPONSE + fake_chunk = SimpleNamespace( + choices=[SimpleNamespace(delta=SimpleNamespace(content=fake_json))] + ) + + def fake_stream(): + # Yield a single fake chunk. + yield fake_chunk + + # Call the generator method and check that it yields the correctly formatted SSE event. + gen = quiz_generator._create_question_generator(fake_stream()) + expected = "data: " + json.dumps(json.loads(fake_json)) + "\n\n" + result = next(gen) + assert result == expected + + def test_empty_chunk_in_question_generator(self, quiz_generator, mocker): + """ + Test _create_question_generator when the stream yields an empty chunk (i.e., a chunk with None content) + before yielding a valid JSON chunk. + + This verifies that the method correctly logs the empty chunk and then proceeds once valid data is received. + """ + fake_json = quiz_generator.EXAMPLE_RESPONSE + # Create a chunk that simulates an empty response. + empty_chunk = SimpleNamespace( + choices=[SimpleNamespace(delta=SimpleNamespace(content=None))] + ) + # Then a chunk that contains valid JSON. + valid_chunk = SimpleNamespace( + choices=[SimpleNamespace(delta=SimpleNamespace(content=fake_json))] + ) + + def fake_stream(): + yield empty_chunk + yield valid_chunk + + # Patch logger.debug to capture log messages about empty chunks. + logger_debug = mocker.patch("backend.generate_quiz.logger.debug") + gen = quiz_generator._create_question_generator(fake_stream()) + result = next(gen) + # Verify that the empty chunk log was produced. + logger_debug.assert_any_call("Chunk was empty!") + expected = "data: " + json.dumps(json.loads(fake_json)) + "\n\n" + assert result == expected + + def test_format_sse(self): + """ + Test that _format_sse correctly formats a JSON object as an SSE (Server-Sent Event) string. + + This is a simple helper method that should return a string starting with "data:". + """ + sample_dict = {"key": "value"} + expected = "data: " + json.dumps(sample_dict) + "\n\n" + result = QuizGenerator._format_sse(sample_dict) + assert result == expected + + def test_validate_and_parse_json_valid(self): + """ + Test validate_and_parse_json with a valid JSON string. + + The method should return the corresponding Python dictionary. + """ + valid_json_str = '{"foo": "bar"}' + result = QuizGenerator.validate_and_parse_json(valid_json_str) + assert result == {"foo": "bar"} + + def test_validate_and_parse_json_incomplete(self): + """ + Test validate_and_parse_json with an incomplete JSON string. + + Since the method is designed to return None if the JSON is incomplete (not fully formed), + we expect the result to be None. + """ + incomplete_json_str = '{"foo": "bar"' + result = QuizGenerator.validate_and_parse_json(incomplete_json_str) + assert result is None + + def test_print_quiz(self, mocker, quiz_generator): + """ + Test the static print_quiz method by passing in a dummy generator. + + We patch logger.info to verify that the print_quiz method logs each quiz item correctly. + """ + dummy_generator = (s for s in ["data: {\"quiz\": \"q1\"}\n\n", "data: {\"quiz\": \"q2\"}\n\n"]) + logger_info = mocker.patch("backend.generate_quiz.logger.info") + QuizGenerator.print_quiz(dummy_generator) + # Verify that logger.info was called with the expected messages. + logger_info.assert_any_call("Item 1: data: {\"quiz\": \"q1\"}\n\n") + logger_info.assert_any_call("Item 2: data: {\"quiz\": \"q2\"}\n\n") + + +class TestQuizGeneratorIntegration: + """ + Integration tests for the QuizGenerator class. + These tests make real API calls and are skipped by default, unless explicitly requested. + """ + + @pytest.mark.integration + def test_generate_quiz_real_api(self): + """ + Integration test that calls the real OpenAI API. + + This test will only run if the OPENAI_API_KEY is set in the environment. It verifies that the + generate_quiz method returns at least one SSE-formatted result. + """ + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + pytest.skip("Skipping integration test: OPENAI_API_KEY not set.") + quiz_gen = QuizGenerator() + topic = "Math" + difficulty = "Hard" + gen = quiz_gen.generate_quiz(topic, difficulty, 2) + # Collect the output from the generator. + results = list(gen) + # Verify that at least one SSE event is produced. + assert len(results) > 0 + for r in results: + assert r.startswith("data: ") From e3300c17474045ec9d65b23189238e2c22d1af00 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:08:13 +0000 Subject: [PATCH 09/16] Ruff format --- backend/fastapi_generate_quiz.py | 21 +++++---- backend/generate_image.py | 19 ++++++-- backend/generate_quiz.py | 70 ++++++++++++++++------------ backend/tests/conftest.py | 2 +- backend/tests/test_generate_image.py | 46 ++++++++++++------ backend/tests/test_generate_quiz.py | 15 ++++-- 6 files changed, 110 insertions(+), 63 deletions(-) diff --git a/backend/fastapi_generate_quiz.py b/backend/fastapi_generate_quiz.py index eabf7b0..510e9fa 100644 --- a/backend/fastapi_generate_quiz.py +++ b/backend/fastapi_generate_quiz.py @@ -1,10 +1,10 @@ -# Example of openai streaming +# Example of openai streaming # https://platform.openai.com/docs/api-reference/streaming import logging from generate_quiz import QuizGenerator from generate_image import ImageGenerator from fastapi import FastAPI, Request -from fastapi.responses import (StreamingResponse, JSONResponse) +from fastapi.responses import StreamingResponse, JSONResponse from fastapi.middleware.cors import CORSMiddleware # Copy Azure Docs Example @@ -20,9 +20,10 @@ allow_headers=["*"], # Allows all headers ) + @app.get("/GenerateQuiz") async def generate_quiz_endpoint(request: Request) -> JSONResponse: - """ + """ FastAPI App to generate an image based on a provided prompt. The function expects a 'prompt' parameter in the HTTP request query @@ -42,9 +43,11 @@ async def generate_quiz_endpoint(request: Request) -> JSONResponse: difficulty = request.query_params.get("difficulty") n_questions = request.query_params.get("n_questions") - logging.info(f"Python HTTP trigger function processed a request with {topic=} {difficulty=}, {n_questions=}.") + logging.info( + f"Python HTTP trigger function processed a request with {topic=} {difficulty=}, {n_questions=}." + ) - # If either 'topic' or 'difficulty' is not provided in the request, + # If either 'topic' or 'difficulty' is not provided in the request, # the function will return an error message and a 400 status code. # n_questions is optional if not topic or not difficulty: @@ -58,7 +61,7 @@ async def generate_quiz_endpoint(request: Request) -> JSONResponse: # Set default value if not set if not n_questions: n_questions = 10 - + logging.info( f"Generating quiz for topic: {topic} with difficulty: {difficulty} with number of questions: {n_questions}" ) @@ -72,9 +75,10 @@ async def generate_quiz_endpoint(request: Request) -> JSONResponse: return StreamingResponse(generator, media_type="text/event-stream") + @app.get("/GenerateImage") async def generate_image_endpoint(request: Request) -> JSONResponse: - """ + """ FastAPI App to generate an image based on a provided prompt. The function expects a 'prompt' parameter in the HTTP request query @@ -112,7 +116,8 @@ async def generate_image_endpoint(request: Request) -> JSONResponse: logging.info(f"Generated image for prompt {prompt}: {image_url}") return JSONResponse(content={"image_url": image_url}, status_code=200) + # Run with uvicorn fastapi_generate_quiz:app --reload --host 0.0.0.0 --port 8000 --log-level debug # Access with curl "http://localhost:8000/GenerateQuiz?topic=UK%20History&difficulty=easy&n_questions=3" # Access with curl "http://localhost:8000/GenerateImage?prompt=A%20Juicy%20Burger" -# This simple example works! \ No newline at end of file +# This simple example works! diff --git a/backend/generate_image.py b/backend/generate_image.py index 9de2e76..8bd8138 100644 --- a/backend/generate_image.py +++ b/backend/generate_image.py @@ -4,7 +4,10 @@ from openai import OpenAI logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + class ImageGenerator: @classmethod @@ -39,7 +42,9 @@ def __init__(self, api_key: Optional[str] = None): self.client = OpenAI(api_key=api_key) - def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> Optional[str]: + def generate_image( + self, prompt: str, n: int = 1, size: str = "256x256" + ) -> Optional[str]: """Generates an image based on the provided prompt. Args: @@ -55,7 +60,7 @@ def generate_image(self, prompt: str, n: int = 1, size: str = "256x256") -> Opti image_url = self._get_image_url(prompt, n, size) logger.info(f"Generated image URL: {image_url}") return image_url - + def _get_image_url(self, prompt: str, n: int, size: str) -> Optional[str]: """Makes the API call to generate images using OpenAI and returns the URL. @@ -78,6 +83,10 @@ def _get_image_url(self, prompt: str, n: int, size: str) -> Optional[str]: if __name__ == "__main__": # Example usage: - image_generator = ImageGenerator() # Uses environment variable if no API key is provided - prompt_text = "Crested Gecko showcasing its distinct crests and colouration. Pixel Art" + image_generator = ( + ImageGenerator() + ) # Uses environment variable if no API key is provided + prompt_text = ( + "Crested Gecko showcasing its distinct crests and colouration. Pixel Art" + ) image_url = image_generator.generate_image(prompt_text) diff --git a/backend/generate_quiz.py b/backend/generate_quiz.py index 059eac4..13c218b 100644 --- a/backend/generate_quiz.py +++ b/backend/generate_quiz.py @@ -8,25 +8,28 @@ logger = logging.getLogger(__name__) logging.basicConfig( level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - datefmt='%Y-%m-%d %H:%M:%S' + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) + class QuizGenerator: - EXAMPLE_RESPONSE = json.dumps({ - "question_id": 1, - "question": "Who was the first emperor of Rome?", - "A": "Julius Caesar", - "B": "Augustus", - "C": "Constantine", - "answer": "B", - "explanation": ( - "Augustus, originally Octavian, " - "was the first to hold the title of Roman Emperor. " - "Julius Caesar, while pivotal, never held the emperor title." - ), - "wikipedia": r"https://en.wikipedia.org/wiki/Augustus", - }) + EXAMPLE_RESPONSE = json.dumps( + { + "question_id": 1, + "question": "Who was the first emperor of Rome?", + "A": "Julius Caesar", + "B": "Augustus", + "C": "Constantine", + "answer": "B", + "explanation": ( + "Augustus, originally Octavian, " + "was the first to hold the title of Roman Emperor. " + "Julius Caesar, while pivotal, never held the emperor title." + ), + "wikipedia": r"https://en.wikipedia.org/wiki/Augustus", + } + ) @classmethod def get_api_key_from_env(cls) -> str: @@ -60,18 +63,20 @@ def __init__(self, api_key: Optional[str] = None): self.client = OpenAI(api_key=api_key) - def generate_quiz(self, topic: str, difficulty: str, n_questions: int = 10) -> Generator[str, None, None]: + def generate_quiz( + self, topic: str, difficulty: str, n_questions: int = 10 + ) -> Generator[str, None, None]: """ Generate a quiz based on the provided topic and difficulty using OpenAI API. - + Parameters: - topic (str): The subject for the quiz, e.g., 'Roman History'. - difficulty (str): The desired difficulty of the quiz e.g., 'Easy', 'Medium'. - n_questions (int, optional): Number of questions required. Defaults to 10. - + Returns: - str: JSON-formatted quiz questions. If an error occurs, an empty string is returned. - + This method coordinates the creation of the role for the OpenAI API, the generation of the response, and the cleaning of the response. """ @@ -85,15 +90,15 @@ def generate_quiz(self, topic: str, difficulty: str, n_questions: int = 10) -> G def _create_role(self, topic: str, difficulty: str, n_questions: int) -> str: """ Creates the role string that will be sent to the OpenAI API to generate the quiz. - + Parameters: - topic (str): The subject for the quiz. - difficulty (str): The desired difficulty of the quiz. - n_questions (int): Number of questions required. - + Returns: - str: The role string to be sent to the OpenAI API. - + This method structures the prompt for the OpenAI API to ensure consistent and correct responses. """ return ( @@ -114,17 +119,19 @@ def _create_openai_stream(self, role: str) -> Stream: Parameters: - role (str): The role string to be sent to the OpenAI API. - + Returns: - str: The raw response from the OpenAI API. """ return self.client.chat.completions.create( model="gpt-4-turbo-preview", messages=[{"role": "user", "content": role}], - stream=True + stream=True, ) - def _create_question_generator(self, openai_stream: Stream) -> Generator[str, None, None]: + def _create_question_generator( + self, openai_stream: Stream + ) -> Generator[str, None, None]: """Parses streamed data chunks from OpenAI into complete JSON objects and yields them in SSE format. Accumulates data in a buffer and attempts to parse complete JSON objects. If successful, @@ -133,7 +140,7 @@ def _create_question_generator(self, openai_stream: Stream) -> Generator[str, No Similar-ish SSE Fast API blog: https://medium.com/@nandagopal05/server-sent-events-with-python-fastapi-f1960e0c8e4b Helpful SO that says about the SSE format of data: {your-json}: https://stackoverflow.com/a/49486869/11902832 - + Args: openai_stream (Stream): Stream from OpenAI's api @@ -160,7 +167,7 @@ def _create_question_generator(self, openai_stream: Stream) -> Generator[str, No # If the JSON is complete, yield it and clear the buffer. yield self._format_sse(result) buffer = "" # Clear buffer on successful parse. - + logger.info("Finished stream!") @staticmethod @@ -175,10 +182,10 @@ def validate_and_parse_json(s: str) -> Optional[dict]: """ Helper method to validate and parse the provided string as JSON. Returns the parsed dict if s is valid JSON, otherwise returns None if the JSON is incomplete. - + Parameters: - s (str): The string to check. - + Returns: - dict: The parsed JSON object, or None if the JSON is incomplete. """ @@ -191,7 +198,7 @@ def validate_and_parse_json(s: str) -> Optional[dict]: @staticmethod def print_quiz(generator: Generator[str, None, None]): """Helper function to iterate through and print the results from the question generator. - + Args: generator (Generator[str, None, None]): Generator producing quiz questions as SSE formatted strings. """ @@ -201,6 +208,7 @@ def print_quiz(generator: Generator[str, None, None]): except Exception as e: logger.error(f"Error during quiz generation: {e}") + if __name__ == "__main__": # Set logger level to DEBUG if running this file to test logger.setLevel(logging.DEBUG) diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index a0dbdc6..06c4975 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -1,4 +1,4 @@ import sys import os -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))) diff --git a/backend/tests/test_generate_image.py b/backend/tests/test_generate_image.py index c0a9741..af1ef08 100644 --- a/backend/tests/test_generate_image.py +++ b/backend/tests/test_generate_image.py @@ -11,26 +11,32 @@ 2. **Integration Tests**: Makes real API calls to OpenAI (run manually/staging only). """ + # Fixture to create an ImageGenerator instance with a dummy API key. @pytest.fixture def image_generator(monkeypatch): monkeypatch.setenv("OPENAI_API_KEY", "dummy_key") return ImageGenerator() + class TestImageGeneratorUnit: """ Unit tests for ImageGenerator class. Uses mocker to avoid real API calls. """ - + def test_generate_image_success(self, mocker, image_generator): """Test generate_image with a successful API response.""" # Create a mock response simulating the structure returned by OpenAI. mock_response = mocker.Mock() - mock_response.data = [SimpleNamespace(url="https://example.com/generated_image.png")] + mock_response.data = [ + SimpleNamespace(url="https://example.com/generated_image.png") + ] # Patch the generate method of the images client. - mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) + mocker.patch.object( + image_generator.client.images, "generate", return_value=mock_response + ) url = image_generator.generate_image("A test prompt") assert url == "https://example.com/generated_image.png" @@ -38,17 +44,25 @@ def test_generate_image_success(self, mocker, image_generator): def test_generate_image_custom_size(self, mocker, image_generator): """Test generate_image with a custom image size.""" mock_response = mocker.Mock() - mock_response.data = [SimpleNamespace(url="https://example.com/custom_size_image.png")] + mock_response.data = [ + SimpleNamespace(url="https://example.com/custom_size_image.png") + ] - mocker.patch.object(image_generator.client.images, "generate", return_value=mock_response) + mocker.patch.object( + image_generator.client.images, "generate", return_value=mock_response + ) - url = image_generator.generate_image("A dragon flying over mountains", size="512x512") + url = image_generator.generate_image( + "A dragon flying over mountains", size="512x512" + ) assert url == "https://example.com/custom_size_image.png" def test_generate_image_api_failure(self, mocker, image_generator): """Test generate_image when OpenAI API raises an exception.""" mocker.patch.object( - image_generator.client.images, "generate", side_effect=Exception("API request failed") + image_generator.client.images, + "generate", + side_effect=Exception("API request failed"), ) url = image_generator.generate_image("A cyberpunk city at night") @@ -58,7 +72,9 @@ def test_generate_image_invalid_prompt(self, mocker, image_generator): """Test generate_image with an invalid (empty) prompt.""" # Simulate failure (e.g., by having the API call raise an exception) mocker.patch.object( - image_generator.client.images, "generate", side_effect=Exception("Invalid prompt") + image_generator.client.images, + "generate", + side_effect=Exception("Invalid prompt"), ) assert image_generator.generate_image("") is None @@ -66,16 +82,20 @@ def test_openai_api_key_not_set(self, mocker): """Test that an error is raised if the OpenAI API key is not set in the environment variables.""" # Clear environment variables to simulate missing API key. mocker.patch.dict(os.environ, {}, clear=True) - - with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): + + with pytest.raises( + ValueError, match="Environment variable OPENAI_API_KEY is not set" + ): ImageGenerator() - + def test_logging_when_api_fails(self, mocker, image_generator): """Test that errors are properly logged when the OpenAI API fails.""" # Patch the logger's error method. mock_logger = mocker.patch("backend.generate_image.logger.error") mocker.patch.object( - image_generator.client.images, "generate", side_effect=Exception("API failure") + image_generator.client.images, + "generate", + side_effect=Exception("API failure"), ) image_generator.generate_image("Test prompt") @@ -87,7 +107,7 @@ class TestImageGeneratorIntegration: Integration tests for ImageGenerator class. These tests make real API calls and should be run manually. """ - + @pytest.mark.integration def test_generate_image_real_api(self): """Calls the real OpenAI API and verifies it returns a valid URL.""" diff --git a/backend/tests/test_generate_quiz.py b/backend/tests/test_generate_quiz.py index 4718fce..d0f70fb 100644 --- a/backend/tests/test_generate_quiz.py +++ b/backend/tests/test_generate_quiz.py @@ -15,6 +15,7 @@ code under test and simulate various conditions. """ + # Fixture to create an instance of QuizGenerator with a dummy API key. @pytest.fixture def quiz_generator(monkeypatch): @@ -48,7 +49,9 @@ def test_environment_variable_not_set(self, monkeypatch): We remove the environment variable and expect the constructor to raise an error. """ monkeypatch.delenv("OPENAI_API_KEY", raising=False) - with pytest.raises(ValueError, match="Environment variable OPENAI_API_KEY is not set"): + with pytest.raises( + ValueError, match="Environment variable OPENAI_API_KEY is not set" + ): QuizGenerator() def test_create_role(self, quiz_generator): @@ -85,7 +88,7 @@ def test_create_openai_stream(self, mocker, quiz_generator): patcher.assert_called_once_with( model="gpt-4-turbo-preview", messages=[{"role": "user", "content": dummy_role}], - stream=True + stream=True, ) assert result == dummy_stream @@ -180,12 +183,14 @@ def test_print_quiz(self, mocker, quiz_generator): We patch logger.info to verify that the print_quiz method logs each quiz item correctly. """ - dummy_generator = (s for s in ["data: {\"quiz\": \"q1\"}\n\n", "data: {\"quiz\": \"q2\"}\n\n"]) + dummy_generator = ( + s for s in ['data: {"quiz": "q1"}\n\n', 'data: {"quiz": "q2"}\n\n'] + ) logger_info = mocker.patch("backend.generate_quiz.logger.info") QuizGenerator.print_quiz(dummy_generator) # Verify that logger.info was called with the expected messages. - logger_info.assert_any_call("Item 1: data: {\"quiz\": \"q1\"}\n\n") - logger_info.assert_any_call("Item 2: data: {\"quiz\": \"q2\"}\n\n") + logger_info.assert_any_call('Item 1: data: {"quiz": "q1"}\n\n') + logger_info.assert_any_call('Item 2: data: {"quiz": "q2"}\n\n') class TestQuizGeneratorIntegration: From c9b2b6071c577aaf9b3d13ea8ce2e98b5fcd252e Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:08:24 +0000 Subject: [PATCH 10/16] Add pytest and mypy to ci --- .github/workflows/ci_python.yml | 15 ++++++++++++--- backend/requirements-dev.txt | 2 ++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_python.yml b/.github/workflows/ci_python.yml index 8651b45..c0e2499 100644 --- a/.github/workflows/ci_python.yml +++ b/.github/workflows/ci_python.yml @@ -1,4 +1,4 @@ -name: Python Linting CI ๐Ÿ +name: Python Linting, Type Checking, and Testing CI ๐Ÿ on: push: @@ -21,8 +21,17 @@ jobs: with: python-version: "3.10" - - name: Install ruff ๐Ÿฆ€ - run: pip install ruff + - name: Install dependencies + run: | + # Install ruff for linting, mypy for type checking, and the development requirements (including pytest) + pip install ruff mypy + pip install -r backend/requirements-dev.txt - name: Lint Python with ruff ๐Ÿš€ run: ruff check backend/ + + - name: Run mypy for type checking ๐Ÿ” + run: mypy backend/ + + - name: Run Pytest ๐Ÿงช + run: pytest -q backend/tests/ -v diff --git a/backend/requirements-dev.txt b/backend/requirements-dev.txt index 2fb6ed4..2210c66 100644 --- a/backend/requirements-dev.txt +++ b/backend/requirements-dev.txt @@ -1,3 +1,5 @@ -r requirements.txt +ruff +mypy pytest pytest-mock \ No newline at end of file From 206fd6f55a92ffffe5f130f9e820ea2702cf72b0 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:11:34 +0000 Subject: [PATCH 11/16] Fix mypy errors --- backend/__init__.py | 0 backend/tests/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 backend/__init__.py create mode 100644 backend/tests/__init__.py diff --git a/backend/__init__.py b/backend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py new file mode 100644 index 0000000..e69de29 From 8050e7212ad75a08cfab6ab521be983e54cbde3b Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:19:32 +0000 Subject: [PATCH 12/16] Remove mypy as i cba --- .github/workflows/ci_python.yml | 6 +----- backend/requirements-dev.txt | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/ci_python.yml b/.github/workflows/ci_python.yml index c0e2499..f5dbc61 100644 --- a/.github/workflows/ci_python.yml +++ b/.github/workflows/ci_python.yml @@ -23,15 +23,11 @@ jobs: - name: Install dependencies run: | - # Install ruff for linting, mypy for type checking, and the development requirements (including pytest) - pip install ruff mypy + # Install ruff for linting and the development requirements (including pytest) pip install -r backend/requirements-dev.txt - name: Lint Python with ruff ๐Ÿš€ run: ruff check backend/ - - name: Run mypy for type checking ๐Ÿ” - run: mypy backend/ - - name: Run Pytest ๐Ÿงช run: pytest -q backend/tests/ -v diff --git a/backend/requirements-dev.txt b/backend/requirements-dev.txt index 2210c66..5570e34 100644 --- a/backend/requirements-dev.txt +++ b/backend/requirements-dev.txt @@ -1,5 +1,4 @@ -r requirements.txt ruff -mypy pytest pytest-mock \ No newline at end of file From 0352eea210832aeb3d6f8b5473cca145fbf80aed Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:22:46 +0000 Subject: [PATCH 13/16] Remove testing form main readme --- README.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/README.md b/README.md index 2786716..2a2411d 100644 --- a/README.md +++ b/README.md @@ -38,13 +38,3 @@ GPTeasers is a webapp that generates quiz-style questions based on the topic you 3. Azure Container Apps: Once triggered, the FastAPI containers communicates with the OpenAI API, sending requests and receiving responses. 4. OpenAI API: Processes the request and sends back a response. -## Running Tests ๐Ÿงช - -To ensure the quality and functionality of the code, we use `pytest` for testing. Follow the steps below to run the tests: - -bash``` -cd backend -pip install -r requirements-dev.txt -pytest tests/ -``` - From bba01c01ea4ab1711d337ad04e7c83ddfd67ae7b Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:48:28 +0000 Subject: [PATCH 14/16] Add dockerfile for frontend and docker-compose for both to help testing --- README.md | 38 ++++++++++++++++++++++++++++++++++ docker-compose.yml | 20 ++++++++++++++++++ frontend/Dockerfile | 11 ++++++++++ frontend/readme.md | 17 +++++++++++++-- frontend/scripts/controller.js | 11 ++++++++-- 5 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 docker-compose.yml create mode 100644 frontend/Dockerfile diff --git a/README.md b/README.md index 2a2411d..5301cd7 100644 --- a/README.md +++ b/README.md @@ -38,3 +38,41 @@ GPTeasers is a webapp that generates quiz-style questions based on the topic you 3. Azure Container Apps: Once triggered, the FastAPI containers communicates with the OpenAI API, sending requests and receiving responses. 4. OpenAI API: Processes the request and sends back a response. +## Docker Compose Setup for Local Testing + +This project uses Docker Compose to run both the FastAPI backend and the frontend services locally. + +### Services + +- **fastapi_generate_quiz**: + The FastAPI backend that serves the GPTeasers API. This container is responsible for handling requests from the frontend and interacting with the OpenAI API to generate quizzes. + +- **frontend**: + A static frontend application. Although the site is hosted on GitHub Pages, this container allows you to test it locally. + +### Running Locally + +1. **Set Environment Variables** + Ensure that the `OPENAI_API_KEY` is set in your environment or in a `.env` file at the project root: + ```sh + export OPENAI_API_KEY=your_openai_api_key_here + ``` + or create a `.env` file with: + ``` + OPENAI_API_KEY=your_openai_api_key_here + ``` + +2. **Build and Run the Containers** + From the project root, run: + ```sh + docker-compose up --build + ``` + This command builds both the backend and frontend images and starts the containers. + +3. **Access the Services** + - **Backend API (FastAPI)**: + Access via [http://localhost:8000](http://localhost:8000) + - **Frontend**: + Access via [http://localhost:8080](http://localhost:8080) + +By following these steps, you can easily test both your backend API and your static frontend locally using Docker Compose. diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a5dbde0 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,20 @@ +services: + fastapi_generate_quiz: + build: + context: ./backend + dockerfile: Dockerfile + ports: + - "8000:8000" + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + + frontend: + build: + context: ./frontend + # Use the appropriate Dockerfile if you have multiple options. + dockerfile: Dockerfile + ports: + # If using the Nginx option, map port 80 to your host. + - "8080:80" + # If using http-server, map port 8080 to your host. + # - "8080:8080" diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..406a6c2 --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,11 @@ +# Use the official lightweight Nginx image. +FROM nginx:alpine + +# Copy all files from the current directory (the frontend static files) +# to the default Nginx HTML directory. +COPY . /usr/share/nginx/html + +# Expose port 80 so the container can be accessed on that port. +EXPOSE 80 + +# Nginx is already configured to run on container start. diff --git a/frontend/readme.md b/frontend/readme.md index fe988f5..6c0f86c 100644 --- a/frontend/readme.md +++ b/frontend/readme.md @@ -54,11 +54,24 @@ display fetched quiz data on the web page, and manipulate other UI elements base ## Setup and Usage -1. Open `index.html` in a modern web browser, or `Live Server` VSCode extension. +1. Open `index.html` in a modern web browser, or use the `Live Server` VSCode extension. 2. Enter a quiz topic in the provided input field. 3. Click the "Fetch Quiz Data" button. 4. The app will fetch quiz data related to the given topic and display it on the page. +## Dockerfile for Local Testing + +This Dockerfile is used to containerize the frontend static site for local testing. +It copies all static files into a container running a lightweight web server so that you can test the site locally. + +- **Nginx Option:** + The Dockerfile uses the official Nginx image to serve the static files on port 80. + +- **http-server Option:** + Alternatively, the Dockerfile can use a Node.js image with http-server to serve the files on a specified port. + +Refer to the Docker Compose file at the project root to build and run the container. + ## UML ```mermaid @@ -126,4 +139,4 @@ classDiagram App --> UI : has a App --> Quiz : has a Controller --> Quiz : has a -``` \ No newline at end of file +``` diff --git a/frontend/scripts/controller.js b/frontend/scripts/controller.js index 3736cda..d69c742 100644 --- a/frontend/scripts/controller.js +++ b/frontend/scripts/controller.js @@ -12,8 +12,15 @@ class Controller { constructor(quiz) { this.eventSource = null; this.messageCount = 0; - // Change baseURL if local debugging - this.baseURL = "https://gpteasers.jollyocean-6818c6e0.ukwest.azurecontainerapps.io/"; + // Determine if we are running locally based on the hostname. + console.log(`Running on: ${window.location.hostname}`); + if (window.location.hostname === "localhost" || window.location.hostname === "0.0.0.0") { + console.log("Running locally, pointing to local FastAPI backend."); + this.baseURL = "http://localhost:8000"; + } else { + console.log("Running remotely, pointing to Azure Container Instance backend."); + this.baseURL = "https://gpteasers.jollyocean-6818c6e0.ukwest.azurecontainerapps.io"; + } this.baseURLQuiz = `${this.baseURL}/GenerateQuiz`; this.baseURLImage = `${this.baseURL}/GenerateImage`; this.quiz = quiz; // this will be initialized as a quiz object From ba0c60cdc437326a16bb7d7d3487faf9084ee791 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:51:14 +0000 Subject: [PATCH 15/16] Add handy comments --- docker-compose.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index a5dbde0..e2cd39e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,5 @@ services: +# Backend service for the FastAPI quiz generation API fastapi_generate_quiz: build: context: ./backend @@ -8,13 +9,10 @@ services: environment: - OPENAI_API_KEY=${OPENAI_API_KEY} +# Frontend service for local testing of the static site frontend: build: context: ./frontend - # Use the appropriate Dockerfile if you have multiple options. dockerfile: Dockerfile ports: - # If using the Nginx option, map port 80 to your host. - - "8080:80" - # If using http-server, map port 8080 to your host. - # - "8080:8080" + - "8080:80" # Nginx From 75cb26ed6e36b9ad603f33dc4764deda803731c1 Mon Sep 17 00:00:00 2001 From: DJSaunders1997 Date: Sun, 2 Feb 2025 11:52:56 +0000 Subject: [PATCH 16/16] ruff check --fix --- backend/generate_quiz.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/generate_quiz.py b/backend/generate_quiz.py index 13c218b..27c934b 100644 --- a/backend/generate_quiz.py +++ b/backend/generate_quiz.py @@ -1,4 +1,4 @@ -from typing import Generator, Optional, Iterable +from typing import Generator, Optional from openai import OpenAI, Stream import logging import json