Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions .github/workflows/python-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
---
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/use-cases-and-examples/building-and-testing/building-and-testing-python

name: Python package

on:
push:
branches: [main]
pull_request:
branches: [main]

permissions:
contents: read

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.11, 3.12]
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
with:
python-version: ${{ matrix.python-version }}
Comment thread
zkoppert marked this conversation as resolved.
- name: Cache pip dependencies
uses: actions/cache@2f8e54208210a422b2efd51efaa6bd6d7ca8920f # v3.4.3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt', '**/requirements-test.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
pip install -r requirements.txt -r requirements-test.txt
- name: Lint with flake8 and pylint
run: |
make lint
- name: Test with pytest
run: |
make test
3 changes: 2 additions & 1 deletion requirements-test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ flake8
isort
pylint
mypy
black
black
types-requests
62 changes: 57 additions & 5 deletions test_user_engagement_metrics.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
"""
Test suite for the GitHub User Engagement Metrics module.

This module contains unit tests for all the functions in the user_engagement_metrics.py
module, including API interactions, file operations, and data processing logic.
"""

import json
import time
from unittest.mock import MagicMock
Expand All @@ -8,6 +15,16 @@

@pytest.fixture(autouse=True)
def patch_globals(tmp_path, monkeypatch):
"""
Fixture to patch global file paths to use temporary test directories.

This automatically runs for all tests, ensuring that tests don't use
or modify the real data files.

Args:
tmp_path: pytest fixture providing a temporary directory
monkeypatch: pytest fixture for modifying objects
"""
# Patch file paths to use test dir
monkeypatch.setattr(
user_engagement_metrics, "USERNAMES_FILE", str(tmp_path / "usernames.txt")
Expand All @@ -24,6 +41,12 @@ def patch_globals(tmp_path, monkeypatch):


def test_safe_get_rate_limit(monkeypatch):
"""
Test that safe_get handles GitHub API rate limits correctly.

This test verifies that when a rate limit response is received,
the function waits and retries the request.
"""
m_resp = MagicMock()
m_resp.status_code = 403
m_resp.headers = {
Expand All @@ -34,7 +57,8 @@ def test_safe_get_rate_limit(monkeypatch):

call_count = {"count": 0}

def fake_requests_get(*a, **kw):
def fake_requests_get(*_a, **_kw):
"""Fake requests.get to simulate rate limiting."""
call_count["count"] += 1
# simulate second call as success
if call_count["count"] > 1:
Expand All @@ -52,6 +76,9 @@ def fake_requests_get(*a, **kw):


def test_get_user_profile(monkeypatch):
"""
Test that get_user_profile correctly calls the GitHub API and processes the result.
"""
monkeypatch.setattr(
user_engagement_metrics,
"safe_get",
Expand All @@ -61,12 +88,13 @@ def test_get_user_profile(monkeypatch):


def test_get_user_repos(monkeypatch):
"""
Test that get_user_repos correctly handles API pagination.
"""
# Simulate 2 pages, then empty
responses = [
[{"id": 1}, {"id": 2}]
]
responses = [[{"id": 1}, {"id": 2}]]

def safe_get(url, params=None):
def safe_get(_url, **kwargs):
return MagicMock(json=lambda: responses.pop(0))

monkeypatch.setattr(user_engagement_metrics, "safe_get", safe_get)
Expand All @@ -75,6 +103,9 @@ def safe_get(url, params=None):


def test_get_starred_repos_count_no_link(monkeypatch):
"""
Test that get_starred_repos_count works correctly when no pagination Link header is present.
"""
m_resp = MagicMock()
m_resp.headers = {}
m_resp.json.return_value = [1, 2, 3]
Expand All @@ -83,6 +114,9 @@ def test_get_starred_repos_count_no_link(monkeypatch):


def test_get_starred_repos_count_with_link(monkeypatch):
"""
Test that get_starred_repos_count correctly parses the Link header for total count.
"""
m_resp = MagicMock()
m_resp.headers = {
"Link": '<https://api.github.com/user/123/starred?page=42>; rel="last"'
Expand All @@ -93,6 +127,9 @@ def test_get_starred_repos_count_with_link(monkeypatch):


def test_get_orgs(monkeypatch):
"""
Test that get_orgs correctly processes the API response.
"""
monkeypatch.setattr(
user_engagement_metrics,
"safe_get",
Expand All @@ -102,6 +139,9 @@ def test_get_orgs(monkeypatch):


def test_search_user_contributions_commit(monkeypatch):
"""
Test that search_user_contributions correctly handles commit searches.
"""
m_resp = MagicMock()
m_resp.json.return_value = {"total_count": 123}
monkeypatch.setattr(
Expand All @@ -113,6 +153,9 @@ def test_search_user_contributions_commit(monkeypatch):


def test_search_user_contributions_issue(monkeypatch):
"""
Test that search_user_contributions correctly handles issue searches.
"""
m_resp = MagicMock()
m_resp.json.return_value = {"total_count": 99}
monkeypatch.setattr(
Expand All @@ -124,20 +167,29 @@ def test_search_user_contributions_issue(monkeypatch):


def test_load_completed_usernames(tmp_path, monkeypatch):
"""
Test that load_completed_usernames correctly reads and processes the checkpoint file.
"""
file_path = tmp_path / "completed_usernames.txt"
file_path.write_text("a\nb\n\nc\n")
monkeypatch.setattr(user_engagement_metrics, "CHECKPOINT_FILE", str(file_path))
assert user_engagement_metrics.load_completed_usernames() == {"a", "b", "c"}


def test_append_completed_username(tmp_path, monkeypatch):
"""
Test that append_completed_username correctly writes to the checkpoint file.
"""
file_path = tmp_path / "completed_usernames.txt"
monkeypatch.setattr(user_engagement_metrics, "CHECKPOINT_FILE", str(file_path))
user_engagement_metrics.append_completed_username("dude")
assert file_path.read_text().strip() == "dude"


def test_append_result(tmp_path, monkeypatch):
"""
Test that append_result correctly writes results to the output file.
"""
file_path = tmp_path / "user_results.jsonl"
monkeypatch.setattr(user_engagement_metrics, "OUTPUT_FILE", str(file_path))
user_engagement_metrics.append_result({"foo": "bar"})
Expand Down
Loading