diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0e2488133e9..4ca326f2578 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -2,7 +2,8 @@ name: Generate Stats Images on: push: - branches: [ master ] + branches: + - master schedule: - cron: "5 0 * * *" workflow_dispatch: @@ -10,45 +11,41 @@ on: permissions: contents: write +defaults: + run: + shell: bash -euxo pipefail {0} + jobs: build: runs-on: ubuntu-latest - steps: - # Check out repository under $GITHUB_WORKSPACE, so the job can access it - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 + - name: Checkout history branch + run: | + git config --global user.name "jstrieb/github-stats" + git config --global user.email "github-stats[bot]@jstrieb.github.io" + # Push generated files to the generated branch + git checkout generated || git checkout -b generated + git merge master - # Run using Python 3.8 for consistency and aiohttp - - name: Set up Python 3.8 - uses: actions/setup-python@v4 + - uses: mlugg/setup-zig@v2 with: - python-version: '3.8' - architecture: 'x64' - cache: 'pip' + version: 0.15.2 - # Install dependencies with `pip` - - name: Install requirements + - name: Build run: | - python3 -m pip install --upgrade pip setuptools wheel - python3 -m pip install -r requirements.txt + zig build --release - # Generate all statistics images - name: Generate images run: | - python3 --version - python3 generate_images.py + ./zig-out/bin/github_stats env: - ACCESS_TOKEN: ${{ secrets.ACCESS_TOKEN }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - EXCLUDED: ${{ secrets.EXCLUDED }} + API_KEY: ${{ secrets.GITHUB_TOKEN }} + EXCLUDED_REPOS: ${{ secrets.EXCLUDED_REPOS }} EXCLUDED_LANGS: ${{ secrets.EXCLUDED_LANGS }} - EXCLUDE_FORKED_REPOS: true - # Commit all changed files to the repository - name: Commit to the repo run: | - git config --global user.name "jstrieb/github-stats" - git config --global user.email "github-stats[bot]@jstrieb.github.io" git add . # Force the build to succeed, even if no files were changed git commit -m 'Update generated files' || true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..8374f327e5b --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,38 @@ +name: Build Release Binaries + +on: + push: + tags: + - '*' + workflow_dispatch: + +defaults: + run: + shell: bash -euxo pipefail {0} + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - uses: mlugg/setup-zig@v2 + with: + version: 0.15.2 + + - name: Build + run: | + zig build release + + - name: Upload Release Artifacts + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG: ${{ github.ref_name }} + run: | + ( + cd zig-out/bin/ + gh release create \ + "${TAG}" \ + --title "${TAG} Release" \ + * + ) diff --git a/.gitignore b/.gitignore index a9e4db76055..38c3d4792fe 100644 --- a/.gitignore +++ b/.gitignore @@ -134,3 +134,7 @@ dmypy.json # PyCharm project files .idea + +# Zig files +.zig-cache +zig-out diff --git a/README.md b/README.md index a24323b1cdc..63955777653 100644 --- a/README.md +++ b/README.md @@ -3,143 +3,84 @@ + +
- - - - + + + + +
Generate visualizations of GitHub user and repository statistics with GitHub -Actions. Visualizations can include data for both private repositories, and for +Actions. Visualizations can include data from private repositories, and from repositories you have contributed to, but do not own. Generated images automatically switch between GitHub light theme and GitHub dark theme. + ## Background -When someone views a profile on GitHub, it is often because they are curious -about a user's open source projects and contributions. Unfortunately, that -user's stars, forks, and pinned repositories do not necessarily reflect the -contributions they make to private repositories. The data likewise does not -present a complete picture of the user's total contributions beyond the current -year. +When someone views a GitHub profile, it is often because they are curious about +the user's open source contributions. Unfortunately, that user's stars, forks, +and pinned repositories do not necessarily reflect the contributions they make +to private repositories. The data likewise does not present a complete picture +of the user's total contributions beyond the current year. This project aims to collect a variety of profile and repository statistics using the GitHub API. It then generates images that can be displayed in repository READMEs, or in a user's [Profile README](https://docs.github.com/en/github/setting-up-and-managing-your-github-profile/managing-your-profile-readme). +It also dumps all statistics to a JSON file that can be used for further data +analysis. + +Since this project runs on GitHub Actions, no server is required to regularly +regenerate the images with updated statistics. Likewise, since the user runs the +analysis code themselves via GitHub Actions, they can use their GitHub access +token to collect statistics on private repositories that an external service +would be unable to access. -Since the project runs on GitHub Actions, no server is required to regularly -regenerate the images with updated statistics. Likewise, since the user runs -the analysis code themselves via GitHub Actions, they can use their GitHub -access token to collect statistics on private repositories that an external -service would be unable to access. ## Disclaimer -If the project is used with an access token that has sufficient permissions to -read private repositories, it may leak details about those repositories in -error messages. For example, the `aiohttp` library—used for asynchronous API -requests—may include the requested URL in exceptions, which can leak the name -of private repositories. If there is an exception caused by `aiohttp`, this -exception will be viewable in the Actions tab of the repository fork, and -anyone may be able to see the name of one or more private repositories. - -Due to some issues with the GitHub statistics API, there are some situations -where it returns inaccurate results. Specifically, the repository view count -statistics and total lines of code modified are probably somewhat inaccurate. -Unexpectedly, these values will become more accurate over time as GitHub -caches statistics for your repositories. Additionally, repositories that were -last contributed to more than a year ago may not be included in the statistics -due to limitations in the results returned by the API. - -For more information on inaccuracies, see issue -[#2](https://github.com/jstrieb/github-stats/issues/2), -[#3](https://github.com/jstrieb/github-stats/issues/3), and -[#13](https://github.com/jstrieb/github-stats/issues/13). +The GitHub statistics API returns inaccurate results in some situations: + +- Repository view count statistics often seem too low, and many referring sites + are not captured + - If you lack permissions to access the view count for a repository, it will + be tallied as zero views – this is common for external repositories where + your only contribution is making a pull request +- Total lines of code modified may be inflated – it counts changes to files like + `package.json` that may impact the line count in surprising ways +- Only repositories with commit contributions are counted, so if you only open + an issue on a repo, it will not show up in the statistics + - Repos you created and own may not be counted if you never commit to them, or + if the committer email is not connected to your GitHub account + # Installation - - -1. Create a personal access token (not the default GitHub Actions token) using - the instructions - [here](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token). - Personal access token must have permissions: `read:user` and `repo`. Copy - the access token when it is generated – if you lose it, you will have to - regenerate the token. - - Some users are reporting that it can take a few minutes for the personal - access token to work. For more, see - [#30](https://github.com/jstrieb/github-stats/issues/30). -2. Create a copy of this repository by clicking - [here](https://github.com/jstrieb/github-stats/generate). Note: this is - **not** the same as forking a copy because it copies everything fresh, - without the huge commit history. -3. Go to the "Secrets" page of your copy of the repository. If this is the - README of your copy, click [this link](../../settings/secrets/actions) to go - to the "Secrets" page. Otherwise, go to the "Settings" tab of the - newly-created repository and go to the "Secrets" page (bottom left). -4. Create a new secret with the name `ACCESS_TOKEN` and paste the copied - personal access token as the value. -5. It is possible to change the type of statistics reported by adding other - repository secrets. - - To ignore certain repos, add them (in owner/name format e.g., - `jstrieb/github-stats`) separated by commas to a new secret—created as - before—called `EXCLUDED`. - - To ignore certain languages, add them (separated by commas) to a new - secret called `EXCLUDED_LANGS`. For example, to exclude HTML and TeX you - could set the value to `html,tex`. - - To show statistics only for "owned" repositories and not forks with - contributions, add an environment variable (under the `env` header in the - [main - workflow](https://github.com/jstrieb/github-stats/blob/master/.github/workflows/main.yml)) - called `EXCLUDE_FORKED_REPOS` with a value of `true`. - - These other values are added as secrets by default to prevent leaking - information about private repositories. If you're not worried about that, - you can change the values directly [in the Actions workflow - itself](https://github.com/jstrieb/github-stats/blob/05de1314b870febd44d19ad2f55d5e59d83f5857/.github/workflows/main.yml#L48-L53). -6. Go to the [Actions - Page](../../actions?query=workflow%3A"Generate+Stats+Images") and press "Run - Workflow" on the right side of the screen to generate images for the first - time. - - The images will be automatically regenerated every 24 hours, but they can - be regenerated manually by running the workflow this way. -7. Take a look at the images that have been created in the - [`generated`](generated) folder. -8. To add your statistics to your GitHub Profile README, copy and paste the - following lines of code into your markdown content. Change the `username` - value to your GitHub username. - ```md - ![](https://raw.githubusercontent.com/username/github-stats/master/generated/overview.svg#gh-dark-mode-only) - ![](https://raw.githubusercontent.com/username/github-stats/master/generated/overview.svg#gh-light-mode-only) - ``` - ```md - ![](https://raw.githubusercontent.com/username/github-stats/master/generated/languages.svg#gh-dark-mode-only) - ![](https://raw.githubusercontent.com/username/github-stats/master/generated/languages.svg#gh-light-mode-only) - ``` -9. Link back to this repository so that others can generate their own - statistics images. -10. Star this repo if you like it! +TODO # Support the Project -There are a few things you can do to support the project: +If this project is useful to you, please support it! - Star the repository (and follow me on GitHub for more) - Share and upvote on sites like Twitter, Reddit, and Hacker News - Report any bugs, glitches, or errors that you find These things motivate me to keep sharing what I build, and they provide -validation that my work is appreciated! They also help me improve the -project. Thanks in advance! +validation that my work is appreciated! They also help me improve the project. +Thanks in advance! If you are insistent on spending money to show your support, I encourage you to -instead make a generous donation to one of the following organizations. By advocating -for Internet freedoms, organizations like these help me to feel comfortable -releasing work publicly on the Web. +instead make a generous donation to one of the following organizations. By +advocating for Internet freedoms, organizations like these help me to feel +comfortable releasing work publicly on the Web. - [Electronic Frontier Foundation](https://supporters.eff.org/donate/) - [Signal Foundation](https://signal.org/donate/) @@ -147,9 +88,25 @@ releasing work publicly on the Web. - [The Internet Archive](https://archive.org/donate/index.php) +## Project Status + +This project is actively maintained, but not actively developed. In other words, +I will fix bugs, but will rarely continue adding features (if at all). If there +are no recent commits, it means that everything has been running smoothly! + +If you want to contribute to the project, please open an issue to discuss first. +Pull requests that are not discussed with me ahead of time may be ignored. It's +nothing personal, I'm just busy, and reviewing others' code is not my idea of +fun. + +Even if something were to happen to me, and I could not continue to work on the +project, it will continue to work as long as the GitHub API endpoints it uses +remain active and unchanged. + + # Related Projects - Inspired by a desire to improve upon [anuraghazra/github-readme-stats](https://github.com/anuraghazra/github-readme-stats) -- Makes use of [GitHub Octicons](https://primer.style/octicons/) to precisely - match the GitHub UI +- Uses [GitHub Octicons](https://primer.style/octicons/) to precisely match the + GitHub UI diff --git a/build.zig b/build.zig new file mode 100644 index 00000000000..74d4843c466 --- /dev/null +++ b/build.zig @@ -0,0 +1,82 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) !void { + const default_target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{ + .preferred_optimize_mode = .ReleaseSafe, + }); + + const exe = b.addExecutable(.{ + .name = "github-stats", + .root_module = b.createModule(.{ + .root_source_file = b.path("src/main.zig"), + .target = default_target, + .optimize = optimize, + }), + }); + b.installArtifact(exe); + + const run_step = b.step("run", "Run the app"); + const run_cmd = b.addRunArtifact(exe); + run_step.dependOn(&run_cmd.step); + run_cmd.step.dependOn(b.getInstallStep()); + if (b.args) |args| { + run_cmd.addArgs(args); + } + + const tests = b.addTest(.{ .root_module = exe.root_module }); + const run_tests = b.addRunArtifact(tests); + const test_step = b.step("test", "Run the tests"); + test_step.dependOn(&run_tests.step); + + const release_step = b.step("release", "Cross-compile release binaries"); + const release_targets: []const std.Target.Query = &.{ + // Zig tier 1 supported compiler targets (manually tested) + .{ .cpu_arch = .x86_64, .os_tag = .linux }, + .{ .cpu_arch = .x86_64, .os_tag = .macos }, + // Zig tier 2 supported compiler targets (manually tested) + .{ .cpu_arch = .aarch64, .os_tag = .macos }, + .{ .cpu_arch = .x86_64, .os_tag = .windows }, + // Zig tier 2 supported compiler targets (untested) + .{ .cpu_arch = .aarch64, .os_tag = .freebsd }, + .{ .cpu_arch = .aarch64, .os_tag = .linux }, + .{ .cpu_arch = .aarch64, .os_tag = .netbsd }, + .{ .cpu_arch = .aarch64, .os_tag = .windows }, + .{ .cpu_arch = .arm, .os_tag = .freebsd }, + .{ .cpu_arch = .arm, .os_tag = .linux }, + .{ .cpu_arch = .arm, .os_tag = .netbsd }, + .{ .cpu_arch = .loongarch64, .os_tag = .linux }, + .{ .cpu_arch = .powerpc, .os_tag = .linux }, + .{ .cpu_arch = .powerpc, .os_tag = .netbsd }, + .{ .cpu_arch = .powerpc64, .os_tag = .freebsd }, + .{ .cpu_arch = .powerpc64, .os_tag = .linux }, + .{ .cpu_arch = .powerpc64le, .os_tag = .freebsd }, + .{ .cpu_arch = .powerpc64le, .os_tag = .linux }, + .{ .cpu_arch = .riscv32, .os_tag = .linux }, + .{ .cpu_arch = .riscv64, .os_tag = .freebsd }, + .{ .cpu_arch = .riscv64, .os_tag = .linux }, + .{ .cpu_arch = .thumb, .os_tag = .windows }, + .{ .cpu_arch = .thumb, .os_tag = .linux }, + // Fails with error due to networking + // .{ .cpu_arch = .wasm32, .os_tag = .wasi }, + .{ .cpu_arch = .x86, .os_tag = .linux }, + .{ .cpu_arch = .x86, .os_tag = .windows }, + .{ .cpu_arch = .x86_64, .os_tag = .freebsd }, + .{ .cpu_arch = .x86_64, .os_tag = .netbsd }, + }; + for (release_targets) |t| { + const cross_exe = b.addExecutable(.{ + .name = try std.fmt.allocPrint( + b.allocator, + "github-stats_{s}", + .{try t.zigTriple(b.allocator)}, + ), + .root_module = b.createModule(.{ + .root_source_file = b.path("src/main.zig"), + .target = b.resolveTargetQuery(t), + .optimize = .ReleaseFast, + }), + }); + release_step.dependOn(&b.addInstallArtifact(cross_exe, .{}).step); + } +} diff --git a/build.zig.zon b/build.zig.zon new file mode 100644 index 00000000000..4b090bd8871 --- /dev/null +++ b/build.zig.zon @@ -0,0 +1,14 @@ +.{ + .name = .github_stats, + .version = "0.0.0", + .fingerprint = 0x80bb05a632422e37, // Changing this has security and trust implications. + .minimum_zig_version = "0.15.2", + .dependencies = .{}, + .paths = .{ + "build.zig", + "build.zig.zon", + "src", + "LICENSE", + "README.md", + }, +} diff --git a/generate_images.py b/generate_images.py deleted file mode 100644 index e800b9357ea..00000000000 --- a/generate_images.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/python3 - -import asyncio -import os -import re - -import aiohttp - -from github_stats import Stats - - -################################################################################ -# Helper Functions -################################################################################ - - -def generate_output_folder() -> None: - """ - Create the output folder if it does not already exist - """ - if not os.path.isdir("generated"): - os.mkdir("generated") - - -################################################################################ -# Individual Image Generation Functions -################################################################################ - - -async def generate_overview(s: Stats) -> None: - """ - Generate an SVG badge with summary statistics - :param s: Represents user's GitHub statistics - """ - with open("templates/overview.svg", "r") as f: - output = f.read() - - output = re.sub("{{ name }}", await s.name, output) - output = re.sub("{{ stars }}", f"{await s.stargazers:,}", output) - output = re.sub("{{ forks }}", f"{await s.forks:,}", output) - output = re.sub("{{ contributions }}", f"{await s.total_contributions:,}", output) - changed = (await s.lines_changed)[0] + (await s.lines_changed)[1] - output = re.sub("{{ lines_changed }}", f"{changed:,}", output) - output = re.sub("{{ views }}", f"{await s.views:,}", output) - output = re.sub("{{ repos }}", f"{len(await s.repos):,}", output) - - generate_output_folder() - with open("generated/overview.svg", "w") as f: - f.write(output) - - -async def generate_languages(s: Stats) -> None: - """ - Generate an SVG badge with summary languages used - :param s: Represents user's GitHub statistics - """ - with open("templates/languages.svg", "r") as f: - output = f.read() - - progress = "" - lang_list = "" - sorted_languages = sorted( - (await s.languages).items(), reverse=True, key=lambda t: t[1].get("size") - ) - delay_between = 150 - for i, (lang, data) in enumerate(sorted_languages): - color = data.get("color") - color = color if color is not None else "#000000" - progress += ( - f'' - ) - lang_list += f""" -
  • - -{lang} -{data.get("prop", 0):0.2f}% -
  • - -""" - - output = re.sub(r"{{ progress }}", progress, output) - output = re.sub(r"{{ lang_list }}", lang_list, output) - - generate_output_folder() - with open("generated/languages.svg", "w") as f: - f.write(output) - - -################################################################################ -# Main Function -################################################################################ - - -async def main() -> None: - """ - Generate all badges - """ - access_token = os.getenv("ACCESS_TOKEN") - if not access_token: - # access_token = os.getenv("GITHUB_TOKEN") - raise Exception("A personal access token is required to proceed!") - user = os.getenv("GITHUB_ACTOR") - if user is None: - raise RuntimeError("Environment variable GITHUB_ACTOR must be set.") - exclude_repos = os.getenv("EXCLUDED") - excluded_repos = ( - {x.strip() for x in exclude_repos.split(",")} if exclude_repos else None - ) - exclude_langs = os.getenv("EXCLUDED_LANGS") - excluded_langs = ( - {x.strip() for x in exclude_langs.split(",")} if exclude_langs else None - ) - # Convert a truthy value to a Boolean - raw_ignore_forked_repos = os.getenv("EXCLUDE_FORKED_REPOS") - ignore_forked_repos = ( - not not raw_ignore_forked_repos - and raw_ignore_forked_repos.strip().lower() != "false" - ) - async with aiohttp.ClientSession() as session: - s = Stats( - user, - access_token, - session, - exclude_repos=excluded_repos, - exclude_langs=excluded_langs, - ignore_forked_repos=ignore_forked_repos, - ) - await asyncio.gather(generate_languages(s), generate_overview(s)) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/generated/languages.svg b/generated/languages.svg deleted file mode 100644 index 02d63624f6b..00000000000 --- a/generated/languages.svg +++ /dev/null @@ -1,392 +0,0 @@ - - - - - - -
    - -

    Languages Used (By File Size)

    - -
    - - - -
    - -
      - - -
    • - -Python -29.30% -
    • - - -
    • - -C -17.71% -
    • - - -
    • - -Zig -11.03% -
    • - - -
    • - -JavaScript -9.41% -
    • - - -
    • - -Svelte -7.45% -
    • - - -
    • - -Standard ML -6.67% -
    • - - -
    • - -Shell -5.34% -
    • - - -
    • - -Go -2.41% -
    • - - -
    • - -SMT -2.05% -
    • - - -
    • - -TeX -1.88% -
    • - - -
    • - -CSS -1.68% -
    • - - -
    • - -Makefile -1.41% -
    • - - -
    • - -Java -1.31% -
    • - - -
    • - -C++ -0.66% -
    • - - -
    • - -OpenSCAD -0.35% -
    • - - -
    • - -Vim Script -0.19% -
    • - - -
    • - -TypeScript -0.19% -
    • - - -
    • - -Assembly -0.17% -
    • - - -
    • - -GDB -0.16% -
    • - - -
    • - -Nix -0.16% -
    • - - -
    • - -PHP -0.13% -
    • - - -
    • - -Tree-sitter Query -0.12% -
    • - - -
    • - -Just -0.09% -
    • - - -
    • - -Dockerfile -0.08% -
    • - - -
    • - -CMake -0.03% -
    • - - -
    • - -sed -0.01% -
    • - - -
    • - -Batchfile -0.01% -
    • - - - -
    - -
    -
    -
    -
    -
    diff --git a/generated/overview.svg b/generated/overview.svg deleted file mode 100644 index e2b0b6baf34..00000000000 --- a/generated/overview.svg +++ /dev/null @@ -1,113 +0,0 @@ - - - - - - -
    - - - - - - - - - - - - - - - - - - - - -
    Jacob Strieb's GitHub Statistics
    Stars7,515
    Forks1,160
    All-time contributions4,543
    Lines of code changed2,777,663
    Repository views (past two weeks)1,568
    Repositories with contributions128
    - -
    -
    -
    -
    -
    diff --git a/github_stats.py b/github_stats.py deleted file mode 100644 index b663896e942..00000000000 --- a/github_stats.py +++ /dev/null @@ -1,545 +0,0 @@ -#!/usr/bin/python3 - -import asyncio -import os -from typing import Dict, List, Optional, Set, Tuple, Any, cast - -import aiohttp -import requests - - -############################################################################### -# Main Classes -############################################################################### - - -class Queries(object): - """ - Class with functions to query the GitHub GraphQL (v4) API and the REST (v3) - API. Also includes functions to dynamically generate GraphQL queries. - """ - - def __init__( - self, - username: str, - access_token: str, - session: aiohttp.ClientSession, - max_connections: int = 10, - ): - self.username = username - self.access_token = access_token - self.session = session - self.semaphore = asyncio.Semaphore(max_connections) - - async def query(self, generated_query: str) -> Dict: - """ - Make a request to the GraphQL API using the authentication token from - the environment - :param generated_query: string query to be sent to the API - :return: decoded GraphQL JSON output - """ - headers = { - "Authorization": f"Bearer {self.access_token}", - } - try: - async with self.semaphore: - r_async = await self.session.post( - "https://api.github.com/graphql", - headers=headers, - json={"query": generated_query}, - ) - result = await r_async.json() - if result is not None: - return result - except: - print("aiohttp failed for GraphQL query") - # Fall back on non-async requests - async with self.semaphore: - r_requests = requests.post( - "https://api.github.com/graphql", - headers=headers, - json={"query": generated_query}, - ) - result = r_requests.json() - if result is not None: - return result - return dict() - - async def query_rest(self, path: str, params: Optional[Dict] = None) -> Dict: - """ - Make a request to the REST API - :param path: API path to query - :param params: Query parameters to be passed to the API - :return: deserialized REST JSON output - """ - - for _ in range(60): - headers = { - "Authorization": f"token {self.access_token}", - } - if params is None: - params = dict() - if path.startswith("/"): - path = path[1:] - try: - async with self.semaphore: - r_async = await self.session.get( - f"https://api.github.com/{path}", - headers=headers, - params=tuple(params.items()), - ) - if r_async.status == 202: - # print(f"{path} returned 202. Retrying...") - print(f"A path returned 202. Retrying...") - await asyncio.sleep(2) - continue - - result = await r_async.json() - if result is not None: - return result - except: - print("aiohttp failed for rest query") - # Fall back on non-async requests - async with self.semaphore: - r_requests = requests.get( - f"https://api.github.com/{path}", - headers=headers, - params=tuple(params.items()), - ) - if r_requests.status_code == 202: - print(f"A path returned 202. Retrying...") - await asyncio.sleep(2) - continue - elif r_requests.status_code == 200: - return r_requests.json() - # print(f"There were too many 202s. Data for {path} will be incomplete.") - print("There were too many 202s. Data for this repository will be incomplete.") - return dict() - - @staticmethod - def repos_overview( - contrib_cursor: Optional[str] = None, owned_cursor: Optional[str] = None - ) -> str: - """ - :return: GraphQL query with overview of user repositories - """ - return f"""{{ - viewer {{ - login, - name, - repositories( - first: 100, - orderBy: {{ - field: UPDATED_AT, - direction: DESC - }}, - isFork: false, - after: {"null" if owned_cursor is None else '"'+ owned_cursor +'"'} - ) {{ - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - nameWithOwner - stargazers {{ - totalCount - }} - forkCount - languages(first: 10, orderBy: {{field: SIZE, direction: DESC}}) {{ - edges {{ - size - node {{ - name - color - }} - }} - }} - }} - }} - repositoriesContributedTo( - first: 100, - includeUserRepositories: false, - orderBy: {{ - field: UPDATED_AT, - direction: DESC - }}, - contributionTypes: [ - COMMIT, - PULL_REQUEST, - REPOSITORY, - PULL_REQUEST_REVIEW - ] - after: {"null" if contrib_cursor is None else '"'+ contrib_cursor +'"'} - ) {{ - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - nameWithOwner - stargazers {{ - totalCount - }} - forkCount - languages(first: 10, orderBy: {{field: SIZE, direction: DESC}}) {{ - edges {{ - size - node {{ - name - color - }} - }} - }} - }} - }} - }} -}} -""" - - @staticmethod - def contrib_years() -> str: - """ - :return: GraphQL query to get all years the user has been a contributor - """ - return """ -query { - viewer { - contributionsCollection { - contributionYears - } - } -} -""" - - @staticmethod - def contribs_by_year(year: str) -> str: - """ - :param year: year to query for - :return: portion of a GraphQL query with desired info for a given year - """ - return f""" - year{year}: contributionsCollection( - from: "{year}-01-01T00:00:00Z", - to: "{int(year) + 1}-01-01T00:00:00Z" - ) {{ - contributionCalendar {{ - totalContributions - }} - }} -""" - - @classmethod - def all_contribs(cls, years: List[str]) -> str: - """ - :param years: list of years to get contributions for - :return: query to retrieve contribution information for all user years - """ - by_years = "\n".join(map(cls.contribs_by_year, years)) - return f""" -query {{ - viewer {{ - {by_years} - }} -}} -""" - - -class Stats(object): - """ - Retrieve and store statistics about GitHub usage. - """ - - def __init__( - self, - username: str, - access_token: str, - session: aiohttp.ClientSession, - exclude_repos: Optional[Set] = None, - exclude_langs: Optional[Set] = None, - ignore_forked_repos: bool = False, - ): - self.username = username - self._ignore_forked_repos = ignore_forked_repos - self._exclude_repos = set() if exclude_repos is None else exclude_repos - self._exclude_langs = set() if exclude_langs is None else exclude_langs - self.queries = Queries(username, access_token, session) - - self._name: Optional[str] = None - self._stargazers: Optional[int] = None - self._forks: Optional[int] = None - self._total_contributions: Optional[int] = None - self._languages: Optional[Dict[str, Any]] = None - self._repos: Optional[Set[str]] = None - self._lines_changed: Optional[Tuple[int, int]] = None - self._views: Optional[int] = None - - async def to_str(self) -> str: - """ - :return: summary of all available statistics - """ - languages = await self.languages_proportional - formatted_languages = "\n - ".join( - [f"{k}: {v:0.4f}%" for k, v in languages.items()] - ) - lines_changed = await self.lines_changed - return f"""Name: {await self.name} -Stargazers: {await self.stargazers:,} -Forks: {await self.forks:,} -All-time contributions: {await self.total_contributions:,} -Repositories with contributions: {len(await self.repos)} -Lines of code added: {lines_changed[0]:,} -Lines of code deleted: {lines_changed[1]:,} -Lines of code changed: {lines_changed[0] + lines_changed[1]:,} -Project page views: {await self.views:,} -Languages: - - {formatted_languages}""" - - async def get_stats(self) -> None: - """ - Get lots of summary statistics using one big query. Sets many attributes - """ - self._stargazers = 0 - self._forks = 0 - self._languages = dict() - self._repos = set() - - exclude_langs_lower = {x.lower() for x in self._exclude_langs} - - next_owned = None - next_contrib = None - while True: - raw_results = await self.queries.query( - Queries.repos_overview( - owned_cursor=next_owned, contrib_cursor=next_contrib - ) - ) - raw_results = raw_results if raw_results is not None else {} - - self._name = raw_results.get("data", {}).get("viewer", {}).get("name", None) - if self._name is None: - self._name = ( - raw_results.get("data", {}) - .get("viewer", {}) - .get("login", "No Name") - ) - - contrib_repos = ( - raw_results.get("data", {}) - .get("viewer", {}) - .get("repositoriesContributedTo", {}) - ) - owned_repos = ( - raw_results.get("data", {}).get("viewer", {}).get("repositories", {}) - ) - - repos = owned_repos.get("nodes", []) - if not self._ignore_forked_repos: - repos += contrib_repos.get("nodes", []) - - for repo in repos: - if repo is None: - continue - name = repo.get("nameWithOwner") - if name in self._repos or name in self._exclude_repos: - continue - self._repos.add(name) - self._stargazers += repo.get("stargazers").get("totalCount", 0) - self._forks += repo.get("forkCount", 0) - - for lang in repo.get("languages", {}).get("edges", []): - name = lang.get("node", {}).get("name", "Other") - languages = await self.languages - if name.lower() in exclude_langs_lower: - continue - if name in languages: - languages[name]["size"] += lang.get("size", 0) - languages[name]["occurrences"] += 1 - else: - languages[name] = { - "size": lang.get("size", 0), - "occurrences": 1, - "color": lang.get("node", {}).get("color"), - } - - if owned_repos.get("pageInfo", {}).get( - "hasNextPage", False - ) or contrib_repos.get("pageInfo", {}).get("hasNextPage", False): - next_owned = owned_repos.get("pageInfo", {}).get( - "endCursor", next_owned - ) - next_contrib = contrib_repos.get("pageInfo", {}).get( - "endCursor", next_contrib - ) - else: - break - - # TODO: Improve languages to scale by number of contributions to - # specific filetypes - langs_total = sum([v.get("size", 0) for v in self._languages.values()]) - for k, v in self._languages.items(): - v["prop"] = 100 * (v.get("size", 0) / langs_total) - - @property - async def name(self) -> str: - """ - :return: GitHub user's name (e.g., Jacob Strieb) - """ - if self._name is not None: - return self._name - await self.get_stats() - assert self._name is not None - return self._name - - @property - async def stargazers(self) -> int: - """ - :return: total number of stargazers on user's repos - """ - if self._stargazers is not None: - return self._stargazers - await self.get_stats() - assert self._stargazers is not None - return self._stargazers - - @property - async def forks(self) -> int: - """ - :return: total number of forks on user's repos - """ - if self._forks is not None: - return self._forks - await self.get_stats() - assert self._forks is not None - return self._forks - - @property - async def languages(self) -> Dict: - """ - :return: summary of languages used by the user - """ - if self._languages is not None: - return self._languages - await self.get_stats() - assert self._languages is not None - return self._languages - - @property - async def languages_proportional(self) -> Dict: - """ - :return: summary of languages used by the user, with proportional usage - """ - if self._languages is None: - await self.get_stats() - assert self._languages is not None - - return {k: v.get("prop", 0) for (k, v) in self._languages.items()} - - @property - async def repos(self) -> Set[str]: - """ - :return: list of names of user's repos - """ - if self._repos is not None: - return self._repos - await self.get_stats() - assert self._repos is not None - return self._repos - - @property - async def total_contributions(self) -> int: - """ - :return: count of user's total contributions as defined by GitHub - """ - if self._total_contributions is not None: - return self._total_contributions - - self._total_contributions = 0 - years = ( - (await self.queries.query(Queries.contrib_years())) - .get("data", {}) - .get("viewer", {}) - .get("contributionsCollection", {}) - .get("contributionYears", []) - ) - by_year = ( - (await self.queries.query(Queries.all_contribs(years))) - .get("data", {}) - .get("viewer", {}) - .values() - ) - for year in by_year: - self._total_contributions += year.get("contributionCalendar", {}).get( - "totalContributions", 0 - ) - return cast(int, self._total_contributions) - - @property - async def lines_changed(self) -> Tuple[int, int]: - """ - :return: count of total lines added, removed, or modified by the user - """ - if self._lines_changed is not None: - return self._lines_changed - additions = 0 - deletions = 0 - for repo in await self.repos: - r = await self.queries.query_rest(f"/repos/{repo}/stats/contributors") - for author_obj in r: - # Handle malformed response from the API by skipping this repo - if not isinstance(author_obj, dict) or not isinstance( - author_obj.get("author", {}), dict - ): - continue - author = author_obj.get("author", {}).get("login", "") - if author != self.username: - continue - - for week in author_obj.get("weeks", []): - additions += week.get("a", 0) - deletions += week.get("d", 0) - - self._lines_changed = (additions, deletions) - return self._lines_changed - - @property - async def views(self) -> int: - """ - Note: only returns views for the last 14 days (as-per GitHub API) - :return: total number of page views the user's projects have received - """ - if self._views is not None: - return self._views - - total = 0 - for repo in await self.repos: - r = await self.queries.query_rest(f"/repos/{repo}/traffic/views") - for view in r.get("views", []): - total += view.get("count", 0) - - self._views = total - return total - - -############################################################################### -# Main Function -############################################################################### - - -async def main() -> None: - """ - Used mostly for testing; this module is not usually run standalone - """ - access_token = os.getenv("ACCESS_TOKEN") - user = os.getenv("GITHUB_ACTOR") - if access_token is None or user is None: - raise RuntimeError( - "ACCESS_TOKEN and GITHUB_ACTOR environment variables cannot be None!" - ) - async with aiohttp.ClientSession() as session: - s = Stats(user, access_token, session) - print(await s.to_str()) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 84b68da70b9..00000000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -requests -aiohttp \ No newline at end of file diff --git a/src/argparse.zig b/src/argparse.zig new file mode 100644 index 00000000000..701e52c39b2 --- /dev/null +++ b/src/argparse.zig @@ -0,0 +1,230 @@ +const std = @import("std"); + +// Since parse is the only public function, these variables can be set there and +// used globally. +var stdout: *std.Io.Writer = undefined; +var stderr: *std.Io.Writer = undefined; + +pub fn parse( + allocator: std.mem.Allocator, + T: type, + errorCheck: ?fn (args: T, stderr: *std.Io.Writer) anyerror!bool, +) !T { + var stdout_writer = std.fs.File.stdout().writer(&.{}); + stdout = &stdout_writer.interface; + var stderr_writer = std.fs.File.stderr().writer(&.{}); + stderr = &stderr_writer.interface; + + var arena: std.heap.ArenaAllocator = .init(allocator); + defer arena.deinit(); + const a = arena.allocator(); + + const fields = @typeInfo(T).@"struct".fields; + var seen = [_]bool{false} ** fields.len; + var result: T = undefined; + errdefer { + inline for (fields, seen) |field, seen_field| { + if (seen_field) { + free_field(allocator, @field(result, field.name)); + } + } + } + + const args = try std.process.argsAlloc(a); + defer std.process.argsFree(a, args); + try setFromCli(T, allocator, &arena, args, &seen, &result); + try setFromEnv(T, allocator, &arena, &seen, &result); + try setFromDefaults(T, allocator, &seen, &result); + + inline for (fields, seen) |field, seen_field| { + if (!seen_field) { + if (@typeInfo(strip_optional(field.type)) == .bool) { + @field(result, field.name) = false; + } else { + try stderr.print( + "Missing required argument {s}\n", + .{field.name}, + ); + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(1); + } + } + } + + if (errorCheck) |check| { + if (!(try check(result, stderr))) { + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(1); + } + } + + return result; +} + +fn setFromCli( + T: type, + allocator: std.mem.Allocator, + arena: *std.heap.ArenaAllocator, + args: []const []const u8, + seen: []bool, + result: *T, +) !void { + const a = arena.allocator(); + var i: usize = 1; + args: while (i < args.len) : (i += 1) { + const raw_arg = args[i]; + if (std.mem.eql(u8, raw_arg, "-h") or + std.mem.eql(u8, raw_arg, "--help")) + { + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(0); + } + + // TODO: Handle one-letter arguments + if (!std.mem.startsWith(u8, raw_arg, "--")) { + try stderr.print("Unknown argument: '{s}'\n", .{raw_arg}); + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(1); + } + + const arg = try a.dupe(u8, raw_arg[2..]); + defer a.free(arg); + std.mem.replaceScalar(u8, arg, '-', '_'); + inline for (@typeInfo(T).@"struct".fields, seen) |field, *seen_field| { + if (!seen_field.* and std.ascii.eqlIgnoreCase(arg, field.name)) { + const t = @typeInfo(strip_optional(field.type)); + if (t == .bool) { + @field(result, field.name) = true; + } else { + i += 1; + if (i >= args.len) { + try stderr.print( + "Missing required value for argument {s} {s}\n", + .{ raw_arg, field.name }, + ); + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(1); + } + switch (t) { + // TODO + .int, .float, .@"enum" => comptime unreachable, + .pointer => @field( + result, + field.name, + ) = try allocator.dupe(u8, args[i]), + .bool => comptime unreachable, + else => @compileError( + "Disallowed struct field type.", + ), + } + } + seen_field.* = true; + continue :args; + } + } + + try stderr.print("Unknown argument: '{s}'\n", .{raw_arg}); + try printUsage(T, arena.allocator(), args[0]); + std.process.exit(1); + } +} + +fn setFromEnv( + T: type, + allocator: std.mem.Allocator, + arena: *std.heap.ArenaAllocator, + seen: []bool, + result: *T, +) !void { + const a = arena.allocator(); + var env = try std.process.getEnvMap(a); + defer env.deinit(); + var iterator = env.iterator(); + while (iterator.next()) |entry| { + const key = try a.dupe(u8, entry.key_ptr.*); + defer a.free(key); + std.mem.replaceScalar(u8, key, '-', '_'); + inline for (@typeInfo(T).@"struct".fields, seen) |field, *seen_field| { + if (!seen_field.* and std.ascii.eqlIgnoreCase(key, field.name)) { + switch (@typeInfo(strip_optional(field.type))) { + .bool => { + const value = try a.dupe(u8, entry.value_ptr.*); + defer a.free(value); + @field(result, field.name) = value.len > 0 and + !std.ascii.eqlIgnoreCase(value, "false"); + }, + // TODO + .int, .float, .@"enum" => comptime unreachable, + .pointer => @field( + result, + field.name, + ) = try allocator.dupe(u8, entry.value_ptr.*), + else => @compileError("Disallowed struct field type."), + } + seen_field.* = true; + } + } + } +} + +fn setFromDefaults( + T: type, + allocator: std.mem.Allocator, + seen: []bool, + result: *T, +) !void { + inline for (@typeInfo(T).@"struct".fields, seen) |field, *seen_field| { + if (!seen_field.*) { + if (field.defaultValue()) |default| { + switch (@typeInfo(strip_optional(field.type))) { + .bool, .int, .float, .@"enum" => { + @field(result, field.name) = default; + }, + .pointer => @field( + result, + field.name, + ) = if (default) |p| try allocator.dupe(u8, p) else null, + else => @compileError("Disallowed struct field type."), + } + seen_field.* = true; + } + } + } +} + +fn printUsage(T: type, allocator: std.mem.Allocator, argv0: []const u8) !void { + try stdout.print("Usage: {s} [options]\n\n", .{argv0}); + try stdout.print("Options:\n", .{}); + const fields = @typeInfo(T).@"struct".fields; + inline for (fields) |field| { + switch (@typeInfo(strip_optional(field.type))) { + .bool => { + const flag_version = try allocator.dupe(u8, field.name); + defer allocator.free(flag_version); + std.mem.replaceScalar(u8, flag_version, '_', '-'); + try stdout.print("--{s}\n", .{flag_version}); + }, + else => { + const flag_version = try allocator.dupe(u8, field.name); + defer allocator.free(flag_version); + std.mem.replaceScalar(u8, flag_version, '_', '-'); + try stdout.print("--{s} {s}\n", .{ flag_version, field.name }); + }, + } + } +} + +fn strip_optional(T: type) type { + const info = @typeInfo(T); + if (info != .optional) return T; + return strip_optional(info.optional.child); +} + +fn free_field(allocator: std.mem.Allocator, field: anytype) void { + switch (@typeInfo(@TypeOf(field))) { + .pointer => allocator.free(field), + .optional => if (field) |v| free_field(allocator, v), + .bool, .int, .float, .@"enum" => {}, + else => @compileError("Disallowed struct field type."), + } +} diff --git a/src/glob.zig b/src/glob.zig new file mode 100644 index 00000000000..a296b16c5cf --- /dev/null +++ b/src/glob.zig @@ -0,0 +1,110 @@ +const std = @import("std"); + +/// Recursive-backtracking glob matching. Potentially very slow if there are a +/// lot of globs. Good enough for now, though. (If it's good enough for the GNU +/// glob function, it's good enough for me.) +/// +/// Max recursion depth is the number of stars in the globbing pattern plus one. +pub fn match(pattern: []const u8, s: []const u8) bool { + if (std.mem.indexOfScalar(u8, pattern, '*')) |star_offset| { + if (!(star_offset <= s.len and std.ascii.eqlIgnoreCase( + s[0..star_offset], + pattern[0..star_offset], + ))) { + return false; + } + const rest = pattern[star_offset + 1 ..]; + for (0..s.len + 1) |glob_end| { + if (match(rest, s[glob_end..])) { + return true; + } + } + return false; + } else { + return std.ascii.eqlIgnoreCase(pattern, s); + } +} + +pub fn matchAny(patterns: []const []const u8, s: []const u8) bool { + for (patterns) |pattern| { + if (match(pattern, s)) { + return true; + } + } + return false; +} + +test match { + const testing = std.testing; + + try testing.expect(match("", "")); + try testing.expect(match("*", "")); + try testing.expect(match("**", "")); + try testing.expect(match("***", "")); + + try testing.expect(match("*", "a")); + try testing.expect(match("**", "a")); + try testing.expect(match("***", "a")); + + try testing.expect(match("*", "abcd")); + try testing.expect(match("**", "abcd")); + try testing.expect(match("****", "abcd")); + try testing.expect(match("****d", "abcd")); + try testing.expect(match("a****", "abcd")); + try testing.expect(match("a****d", "abcd")); + try testing.expect(!match("****c", "abcd")); + + try testing.expect(match("abc", "abc")); + try testing.expect(!match("abc", "abcd")); + try testing.expect(!match("abc", "dabc")); + try testing.expect(!match("abc", "dabcd")); + + try testing.expect(match("*abc", "dabc")); + try testing.expect(!match("*abc", "dabcd")); + + try testing.expect(match("abc*", "abcd")); + try testing.expect(!match("abc*", "dabcd")); + + try testing.expect(match("*abc*", "abc")); + try testing.expect(match("*abc*", "dabc")); + try testing.expect(match("*abc*", "abcd")); + try testing.expect(match("*abc*", "dabcd")); + + try testing.expect(!match("*c*", "this is a test")); + try testing.expect(match("*e*", "this is a test")); + + try testing.expect(match("som*thing", "something")); + try testing.expect(match("som*thing", "someeeething")); + try testing.expect(match("som*thing", "som thing")); + try testing.expect(match("som*thing", "somabcthing")); + try testing.expect(match("som*thing", "somthing")); + + try testing.expect(match( + "s*a" ++ "*s" ** 8, + "s" ** 10 ++ "a" ++ "s" ** 10, + )); + try testing.expect(match( + "s" ++ "*s" ** 8, + "s" ** 10 ++ "a" ++ "s" ** 10, + )); + try testing.expect(match( + "s*" ** 8 ++ "a*s", + "s" ** 10 ++ "a" ++ "s" ** 10, + )); + // Trigger slow (exponential) worst-case + try testing.expect(!match("s*" ** 8 ++ "a", "s" ** 30)); + + // Globbing here doesn't separate on slashes like globbing in the shell + try testing.expect(match("*", "///")); + try testing.expect(match("*", "/asdf//")); + try testing.expect(match("/*sdf/*/*", "/asdf//")); + try testing.expect(match("/*sdf/*", "/asdf//")); +} + +test matchAny { + const testing = std.testing; + + try testing.expect(matchAny(&.{ "*waw", "wew*", "wow", "www" }, "wow")); + try testing.expect(!matchAny(&.{ "*waw", "wew*", "www" }, "wow")); + try testing.expect(matchAny(&.{ "w*w", "www" }, "wow")); +} diff --git a/src/http_client.zig b/src/http_client.zig new file mode 100644 index 00000000000..f735593b1eb --- /dev/null +++ b/src/http_client.zig @@ -0,0 +1,151 @@ +//! Naive, unoptimized HTTP client with .get and .post methods. Simple, and not +//! particularly efficient. Response bodies stay allocated for the lifetime of +//! the client. + +const std = @import("std"); + +gpa: std.mem.Allocator, +arena: *std.heap.ArenaAllocator, +client: std.http.Client, +bearer: []const u8, + +const Self = @This(); +const Response = struct { []const u8, std.http.Status }; + +pub fn init(allocator: std.mem.Allocator, token: []const u8) !Self { + const arena = try allocator.create(std.heap.ArenaAllocator); + arena.* = std.heap.ArenaAllocator.init(allocator); + const a = arena.allocator(); + return .{ + .gpa = allocator, + .arena = arena, + .client = .{ .allocator = a }, + .bearer = try std.fmt.allocPrint(a, "Bearer {s}", .{token}), + }; +} + +pub fn deinit(self: *Self) void { + self.client.deinit(); + self.arena.deinit(); + self.gpa.destroy(self.arena); +} + +pub fn get( + self: *Self, + url: []const u8, + headers: std.http.Client.Request.Headers, + extra_headers: []const std.http.Header, + retries: isize, +) !Response { + if (retries <= -1) { + return error.TooManyRetries; + } + + var writer = try std.Io.Writer.Allocating.initCapacity( + self.arena.allocator(), + 1024, + ); + errdefer writer.deinit(); + const status = (try (self.client.fetch(.{ + .location = .{ .url = url }, + .response_writer = &writer.writer, + .headers = headers, + .extra_headers = extra_headers, + }) catch |err| switch (err) { + error.HttpConnectionClosing => { + // Handle a Zig HTTP bug where keep-alive connections are closed by + // the server after a timeout, but the client doesn't handle it + // properly. For now we nuke the whole client (and associated + // connection pool) and make a new one, but there might be a better + // way to handle this. + std.log.debug( + "Keep alive connection closed. Initializing a new client.", + .{}, + ); + self.client.deinit(); + self.client = .{ .allocator = self.arena.allocator() }; + return self.get(url, headers, extra_headers, retries - 1); + }, + else => err, + })).status; + return .{ try writer.toOwnedSlice(), status }; +} + +pub fn post( + self: *Self, + url: []const u8, + body: []const u8, + headers: std.http.Client.Request.Headers, + retries: isize, +) !Response { + if (retries <= -1) { + return error.TooManyRetries; + } + + var writer = try std.Io.Writer.Allocating.initCapacity( + self.arena.allocator(), + 1024, + ); + errdefer writer.deinit(); + const status = (try (self.client.fetch(.{ + .location = .{ .url = url }, + .response_writer = &writer.writer, + .payload = body, + .headers = headers, + }) catch |err| switch (err) { + error.HttpConnectionClosing => { + // Handle a Zig HTTP bug where keep-alive connections are closed by + // the server after a timeout, but the client doesn't handle it + // properly. For now we nuke the whole client (and associated + // connection pool) and make a new one, but there might be a better + // way to handle this. + std.log.debug( + "Keep alive connection closed. Initializing a new client.", + .{}, + ); + self.client.deinit(); + self.client = .{ .allocator = self.arena.allocator() }; + return self.post(url, body, headers, retries - 1); + }, + else => err, + })).status; + return .{ try writer.toOwnedSlice(), status }; +} + +pub fn graphql( + self: *Self, + body: []const u8, + variables: anytype, +) !Response { + var arena = std.heap.ArenaAllocator.init(self.arena.allocator()); + defer arena.deinit(); + const allocator = arena.allocator(); + + return try self.post( + "https://api.github.com/graphql", + try std.json.Stringify.valueAlloc(allocator, .{ + .query = body, + .variables = variables, + }, .{}), + .{ + .authorization = .{ .override = self.bearer }, + .content_type = .{ .override = "application/json" }, + }, + 8, + ); +} + +pub fn rest( + self: *Self, + url: []const u8, +) !Response { + return try self.get( + url, + .{ + .authorization = .{ .override = self.bearer }, + .content_type = .{ .override = "application/json" }, + }, + &.{.{ .name = "X-GitHub-Api-Version", .value = "2022-11-28" }}, + 8, + ); +} diff --git a/src/main.zig b/src/main.zig new file mode 100644 index 00000000000..a6b0b2e4937 --- /dev/null +++ b/src/main.zig @@ -0,0 +1,379 @@ +const builtin = @import("builtin"); +const std = @import("std"); + +const argparse = @import("argparse.zig"); +const glob = @import("glob.zig"); + +const HttpClient = @import("http_client.zig"); +const Statistics = @import("statistics.zig"); + +pub const std_options: std.Options = .{ + .logFn = logFn, + // Even though we change it later, this is necessary to ensure that debug + // logs aren't stripped in release builds. + .log_level = .debug, +}; + +var log_level: std.log.Level = switch (builtin.mode) { + .Debug => .debug, + else => .warn, +}; + +fn logFn( + comptime message_level: std.log.Level, + comptime scope: @TypeOf(.enum_literal), + comptime format: []const u8, + args: anytype, +) void { + if (@intFromEnum(message_level) <= @intFromEnum(log_level)) { + std.log.defaultLog(message_level, scope, format, args); + } +} + +const Args = struct { + api_key: ?[]const u8 = null, + json_input_file: ?[]const u8 = null, + json_output_file: ?[]const u8 = null, + silent: bool = false, + debug: bool = false, + verbose: bool = false, + excluded_repos: ?[]const u8 = null, + excluded_langs: ?[]const u8 = null, + exclude_private: bool = false, + overview_output_file: ?[]const u8 = null, + languages_output_file: ?[]const u8 = null, + overview_template: ?[]const u8 = null, + languages_template: ?[]const u8 = null, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator) !Self { + return try argparse.parse(allocator, Self, struct { + fn errorCheck(a: Self, stderr: *std.Io.Writer) !bool { + if (a.api_key == null and a.json_input_file == null) { + try stderr.print( + "You must pass either an input file or an API key.\n", + .{}, + ); + return false; + } + return true; + } + }.errorCheck); + } + + pub fn deinit(self: Self, allocator: std.mem.Allocator) void { + if (self.api_key) |s| allocator.free(s); + if (self.json_input_file) |s| allocator.free(s); + if (self.json_output_file) |s| allocator.free(s); + if (self.excluded_repos) |s| allocator.free(s); + if (self.excluded_langs) |s| allocator.free(s); + if (self.overview_output_file) |s| allocator.free(s); + if (self.languages_output_file) |s| allocator.free(s); + if (self.overview_template) |s| allocator.free(s); + if (self.languages_template) |s| allocator.free(s); + } +}; + +fn overview( + arena: *std.heap.ArenaAllocator, + stats: anytype, + template: []const u8, +) ![]const u8 { + const a = arena.allocator(); + var out_data = template; + // Vulnerable to template injection. In practice, this should never happen. + inline for (@typeInfo(@TypeOf(stats)).@"struct".fields) |field| { + switch (@typeInfo(field.type)) { + .int => { + out_data = try std.mem.replaceOwned( + u8, + a, + out_data, + "{{ " ++ field.name ++ " }}", + try decimalToString(a, @field(stats, field.name)), + ); + }, + .pointer => { + out_data = try std.mem.replaceOwned( + u8, + a, + out_data, + "{{ " ++ field.name ++ " }}", + @field(stats, field.name), + ); + }, + .@"struct" => {}, + else => comptime unreachable, + } + } + return out_data; +} + +fn languages( + arena: *std.heap.ArenaAllocator, + stats: anytype, + template: []const u8, +) ![]const u8 { + const a = arena.allocator(); + const progress = try a.alloc([]const u8, stats.languages.count()); + const lang_list = try a.alloc([]const u8, stats.languages.count()); + for ( + stats.languages.keys(), + stats.languages.values(), + progress, + lang_list, + 0.., + ) |language, count, *progress_s, *lang_s, i| { + const color = stats.language_colors.get(language); + const percent = + 100 * if (stats.languages_total == 0) + 0.0 + else + @as(f64, @floatFromInt(count)) / + @as(f64, @floatFromInt(stats.languages_total)); + progress_s.* = try std.fmt.allocPrint(a, + \\ + , .{ color orelse "#000", percent }); + lang_s.* = try std.fmt.allocPrint(a, + \\
  • + \\ + \\ {s} + \\ {d:.2}% + \\
  • + \\ + , .{ (i + 1) * 150, color orelse "#000", language, percent }); + } + // Vulnerable to template injection. In practice, this should never happen. + return try std.mem.replaceOwned(u8, a, try std.mem.replaceOwned( + u8, + a, + template, + "{{ lang_list }}", + try std.mem.concat(a, u8, lang_list), + ), "{{ progress }}", try std.mem.concat(a, u8, progress)); +} + +pub fn main() !void { + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); + + const args = try Args.init(allocator); + defer args.deinit(allocator); + if (args.silent) { + log_level = .err; + } else if (args.debug) { + log_level = .debug; + } else if (args.verbose) { + log_level = .info; + } + const excluded_repos = if (args.excluded_repos) |excluded| excluded: { + var list = try std.ArrayList([]const u8).initCapacity(allocator, 16); + errdefer list.deinit(allocator); + var iterator = std.mem.tokenizeAny(u8, excluded, ", \t\r\n|\"'\x00"); + while (iterator.next()) |pattern| { + try list.append(allocator, pattern); + } + break :excluded try list.toOwnedSlice(allocator); + } else null; + defer if (excluded_repos) |excluded| allocator.free(excluded); + const excluded_langs = if (args.excluded_langs) |excluded| excluded: { + var list = try std.ArrayList([]const u8).initCapacity(allocator, 16); + errdefer list.deinit(allocator); + var iterator = std.mem.tokenizeAny(u8, excluded, ",\t\r\n|\"'\x00"); + while (iterator.next()) |pattern| { + try list.append(allocator, std.mem.trim(u8, pattern, " ")); + } + break :excluded try list.toOwnedSlice(allocator); + } else null; + defer if (excluded_langs) |excluded| allocator.free(excluded); + + var stats: Statistics = undefined; + if (args.json_input_file) |path| { + const data = try readFile(allocator, path); + defer allocator.free(data); + stats = try Statistics.initFromJson(allocator, data); + } else if (args.api_key) |api_key| { + std.log.info("Collecting statistics from GitHub API", .{}); + var client: HttpClient = try .init(allocator, api_key); + defer client.deinit(); + stats = try Statistics.init(&client, allocator); + } else unreachable; + defer stats.deinit(allocator); + + if (args.json_output_file) |path| { + var arena = std.heap.ArenaAllocator.init(allocator); + defer arena.deinit(); + try writeFile( + path, + try std.json.Stringify.valueAlloc( + arena.allocator(), + stats, + .{ .whitespace = .indent_2 }, + ), + ); + } + + var aggregate_stats: struct { + languages: std.StringArrayHashMap(u64), + language_colors: std.StringArrayHashMap([]const u8), + contributions: usize, + name: []const u8, + languages_total: usize = 0, + stars: usize = 0, + forks: usize = 0, + lines_changed: usize = 0, + views: usize = 0, + repos: usize = 0, + } = .{ + .contributions = stats.repo_contributions + + stats.issue_contributions + + stats.commit_contributions + + stats.pr_contributions + + stats.review_contributions, + .languages = .init(allocator), + .language_colors = .init(allocator), + .name = stats.name, + }; + defer aggregate_stats.languages.deinit(); + defer aggregate_stats.language_colors.deinit(); + for (stats.repositories) |repository| { + if (glob.matchAny(excluded_repos orelse &.{}, repository.name) or + (args.exclude_private and repository.private)) + { + continue; + } + aggregate_stats.stars += repository.stars; + aggregate_stats.forks += repository.forks; + aggregate_stats.lines_changed += repository.lines_changed; + aggregate_stats.views += repository.views; + aggregate_stats.repos += 1; + if (repository.languages) |langs| for (langs) |language| { + if (glob.matchAny(excluded_langs orelse &.{}, language.name)) { + continue; + } + if (language.color) |color| { + try aggregate_stats.language_colors.put(language.name, color); + } + var total = aggregate_stats.languages.get(language.name) orelse 0; + total += language.size; + try aggregate_stats.languages.put(language.name, total); + aggregate_stats.languages_total += language.size; + }; + } + aggregate_stats.languages.sort(struct { + values: @TypeOf(aggregate_stats.languages.values()), + pub fn lessThan(self: @This(), a: usize, b: usize) bool { + // Sort in reverse order + return self.values[a] > self.values[b]; + } + }{ .values = aggregate_stats.languages.values() }); + + { + var arena = std.heap.ArenaAllocator.init(allocator); + defer arena.deinit(); + + try writeFile( + args.overview_output_file orelse "overview.svg", + try overview( + &arena, + aggregate_stats, + if (args.overview_template) |template| + try readFile(arena.allocator(), template) + else + @embedFile("templates/overview.svg"), + ), + ); + + try writeFile( + args.languages_output_file orelse "languages.svg", + try languages( + &arena, + aggregate_stats, + if (args.languages_template) |template| + try readFile(arena.allocator(), template) + else + @embedFile("templates/languages.svg"), + ), + ); + } +} + +test { + std.testing.refAllDecls(@This()); +} + +fn readFile(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { + std.log.info("Reading data from '{s}'", .{path}); + const in = + if (std.mem.eql(u8, path, "-")) + std.fs.File.stdin() + else + try std.fs.cwd().openFile(path, .{}); + defer if (!std.mem.eql(u8, path, "-")) in.close(); + var read_buffer: [64 * 1024]u8 = undefined; + var reader = in.reader(&read_buffer); + return try (&reader.interface).allocRemaining(allocator, .unlimited); +} + +fn writeFile( + path: []const u8, + data: []const u8, +) !void { + std.log.info("Writing data to '{s}'", .{path}); + const out = + if (std.mem.eql(u8, path, "-")) + std.fs.File.stdout() + else + try std.fs.cwd().createFile(path, .{}); + defer if (!std.mem.eql(u8, path, "-")) out.close(); + var write_buffer: [64 * 1024]u8 = undefined; + var writer = out.writer(&write_buffer); + try writer.interface.writeAll(data); + try writer.interface.flush(); +} + +fn decimalToString(allocator: std.mem.Allocator, n: anytype) ![]const u8 { + const info = @typeInfo(@TypeOf(n)); + if (info != .int or info.int.signedness != .unsigned) { + @compileError("Only implemented for unsigned integers."); + } + + const s = try std.fmt.allocPrint(allocator, "{d}", .{n}); + defer allocator.free(s); + const digits = s.len; + const commas = (digits - 1) / 3; + const result = try allocator.alloc(u8, digits + commas); + errdefer comptime unreachable; + + var i: usize = result.len - 1; + var j: usize = s.len - 1; + while (true) { + if ((result.len - i) % 4 == 0) { + result[i] = ','; + i -= 1; + } + result[i] = s[j]; + if (i == 0 and j == 0) { + break; + } else if (i > 0 and j > 0) {} else unreachable; + i -= 1; + j -= 1; + } + return result; +} diff --git a/src/statistics.zig b/src/statistics.zig new file mode 100644 index 00000000000..eeca8906d62 --- /dev/null +++ b/src/statistics.zig @@ -0,0 +1,558 @@ +const std = @import("std"); +const HttpClient = @import("http_client.zig"); + +repositories: []Repository, +user: []const u8, +name: []const u8, +repo_contributions: u32 = 0, +issue_contributions: u32 = 0, +commit_contributions: u32 = 0, +pr_contributions: u32 = 0, +review_contributions: u32 = 0, + +const Statistics = @This(); + +const Repository = struct { + name: []const u8, + stars: u32, + forks: u32, + languages: ?[]Language, + lines_changed: u32, + views: u32, + private: bool, + + pub fn deinit(self: @This(), allocator: std.mem.Allocator) void { + allocator.free(self.name); + if (self.languages) |languages| { + for (languages) |language| { + language.deinit(allocator); + } + allocator.free(languages); + } + } + + pub fn get_lines_changed( + self: *@This(), + arena: *std.heap.ArenaAllocator, + client: *HttpClient, + user: []const u8, + ) !std.http.Status { + std.log.debug( + "Trying to get lines of code changed for {s}...", + .{self.name}, + ); + const response, const status = try client.rest( + try std.mem.concat( + arena.allocator(), + u8, + &.{ + "https://api.github.com/repos/", + self.name, + "/stats/contributors", + }, + ), + ); + if (status == .ok) { + const authors = (try std.json.parseFromSliceLeaky( + []struct { + author: struct { login: []const u8 }, + weeks: []struct { + a: u32, + d: u32, + }, + }, + arena.allocator(), + response, + .{ .ignore_unknown_fields = true }, + )); + self.lines_changed = 0; + for (authors) |o| { + if (!std.mem.eql(u8, o.author.login, user)) { + continue; + } + for (o.weeks) |week| { + self.lines_changed += week.a; + self.lines_changed += week.d; + } + } + std.log.info( + "Got {d} line{s} changed by {s} in {s}", + .{ + self.lines_changed, + if (self.lines_changed != 1) "s" else "", + user, + self.name, + }, + ); + } + return status; + } +}; + +const Language = struct { + name: []const u8, + size: u32, + color: ?[]const u8 = null, + + pub fn deinit(self: @This(), allocator: std.mem.Allocator) void { + allocator.free(self.name); + if (self.color) |color| allocator.free(color); + } +}; + +pub fn init(client: *HttpClient, allocator: std.mem.Allocator) !Statistics { + var arena = std.heap.ArenaAllocator.init(allocator); + defer arena.deinit(); + + var self: Statistics = try get_repos(allocator, &arena, client); + errdefer self.deinit(allocator); + try self.get_lines_changed(&arena, client); + return self; +} + +pub fn initFromJson(allocator: std.mem.Allocator, s: []const u8) !Statistics { + var arena = std.heap.ArenaAllocator.init(allocator); + defer arena.deinit(); + + const parsed = try std.json.parseFromSliceLeaky( + Statistics, + arena.allocator(), + s, + .{ .ignore_unknown_fields = true }, + ); + return try deepcopy(allocator, parsed); +} + +pub fn deinit(self: Statistics, allocator: std.mem.Allocator) void { + for (self.repositories) |repository| { + repository.deinit(allocator); + } + allocator.free(self.repositories); + allocator.free(self.user); + allocator.free(self.name); +} + +fn get_basic_info( + client: *HttpClient, + allocator: std.mem.Allocator, +) !struct { []u32, []const u8, ?[]const u8 } { + std.log.info("Getting contribution years...", .{}); + const response, const status = try client.graphql( + \\query { + \\ viewer { + \\ login + \\ name + \\ contributionsCollection { + \\ contributionYears + \\ } + \\ } + \\} + , null); + if (status != .ok) { + std.log.err( + "Failed to get contribution years ({?s})", + .{status.phrase()}, + ); + return error.RequestFailed; + } + const parsed = (try std.json.parseFromSliceLeaky( + struct { data: struct { viewer: struct { + login: []const u8, + name: ?[]const u8, + contributionsCollection: struct { + contributionYears: []u32, + }, + } } }, + allocator, + response, + .{ .ignore_unknown_fields = true }, + )).data.viewer; + return .{ + parsed.contributionsCollection.contributionYears, + parsed.login, + parsed.name, + }; +} + +fn get_repos_by_year( + context: struct { + allocator: std.mem.Allocator, + arena: *std.heap.ArenaAllocator, + client: *HttpClient, + user: []const u8, + result: *Statistics, + seen: *std.StringHashMap(bool), + repositories: *std.ArrayList(Repository), + }, + year: usize, + start_month: usize, + months: usize, +) !void { + std.log.info( + "Getting {d} month{s} of data starting from {d}/{d}...", + .{ months, if (months != 1) "s" else "", start_month + 1, year }, + ); + var response, var status = try context.client.graphql( + \\query ($from: DateTime, $to: DateTime) { + \\ viewer { + \\ contributionsCollection(from: $from, to: $to) { + \\ totalRepositoryContributions + \\ totalIssueContributions + \\ totalCommitContributions + \\ totalPullRequestContributions + \\ totalPullRequestReviewContributions + \\ commitContributionsByRepository(maxRepositories: 100) { + \\ repository { + \\ nameWithOwner + \\ stargazerCount + \\ forkCount + \\ isPrivate + \\ languages( + \\ first: 100, + \\ orderBy: { direction: DESC, field: SIZE } + \\ ) { + \\ edges { + \\ size + \\ node { + \\ name + \\ color + \\ } + \\ } + \\ } + \\ } + \\ } + \\ } + \\ } + \\} + , + .{ + .from = try std.fmt.allocPrint( + context.arena.allocator(), + "{d}-{d:02}-01T00:00:00Z", + .{ year, start_month + 1 }, + ), + .to = try std.fmt.allocPrint( + context.arena.allocator(), + "{d}-{d:02}-01T00:00:00Z", + .{ + year + (start_month + months) / 12, + (start_month + months) % 12 + 1, + }, + ), + }, + ); + if (status != .ok) { + std.log.err( + "Failed to get data from {d} ({?s})", + .{ year, status.phrase() }, + ); + return error.RequestFailed; + } + const stats = (try std.json.parseFromSliceLeaky( + struct { data: struct { viewer: struct { + contributionsCollection: struct { + totalRepositoryContributions: u32, + totalIssueContributions: u32, + totalCommitContributions: u32, + totalPullRequestContributions: u32, + totalPullRequestReviewContributions: u32, + commitContributionsByRepository: []struct { + repository: struct { + nameWithOwner: []const u8, + stargazerCount: u32, + forkCount: u32, + isPrivate: bool, + languages: ?struct { + edges: ?[]struct { + size: u32, + node: struct { + name: []const u8, + color: ?[]const u8, + }, + }, + }, + }, + }, + }, + } } }, + context.arena.allocator(), + response, + .{ .ignore_unknown_fields = true }, + )).data.viewer.contributionsCollection; + std.log.info( + "Parsed {d} total repositories from {d}", + .{ stats.commitContributionsByRepository.len, year }, + ); + + const limit = 100; + if (stats.commitContributionsByRepository.len >= limit) { + for (&[_]usize{ 2, 3 }) |factor| { + if (months % factor == 0) { + for (0..factor) |i| { + try get_repos_by_year( + context, + year, + start_month + (months / factor) * i, + months / factor, + ); + } + return; + } + } else { + std.log.warn( + "More than {d} repos returned for {d}/{d}. " ++ + "Some data may be omitted due to GitHub API limitations.", + .{ limit, start_month + 1, year }, + ); + } + } + + context.result.repo_contributions += stats.totalRepositoryContributions; + context.result.issue_contributions += stats.totalIssueContributions; + context.result.commit_contributions += stats.totalCommitContributions; + context.result.pr_contributions += stats.totalPullRequestContributions; + context.result.review_contributions += + stats.totalPullRequestReviewContributions; + + for (stats.commitContributionsByRepository) |x| { + const raw_repo = x.repository; + if (context.seen.get(raw_repo.nameWithOwner) orelse false) { + std.log.debug( + "Skipping {s} (seen)", + .{raw_repo.nameWithOwner}, + ); + continue; + } + var repository = Repository{ + .name = try context.allocator.dupe(u8, raw_repo.nameWithOwner), + .stars = raw_repo.stargazerCount, + .forks = raw_repo.forkCount, + .private = raw_repo.isPrivate, + .languages = null, + .views = 0, + .lines_changed = 0, + }; + errdefer repository.deinit(context.allocator); + if (raw_repo.languages) |repo_languages| { + if (repo_languages.edges) |raw_languages| { + repository.languages = try context.allocator.alloc( + Language, + raw_languages.len, + ); + errdefer { + context.allocator.free(repository.languages.?); + repository.languages = null; + } + for ( + raw_languages, + repository.languages.?, + 0.., + ) |raw, *language, i| { + errdefer { + for (0..i, repository.languages.?) |_, l| { + context.allocator.free(l.name); + if (l.color) |c| context.allocator.free(c); + } + } + language.* = .{ + .name = try context.allocator.dupe(u8, raw.node.name), + .size = raw.size, + }; + errdefer context.allocator.free(language.name); + if (raw.node.color) |color| { + language.color = try context.allocator.dupe(u8, color); + } + errdefer if (language.color) |c| context.allocator.free(c); + } + } + } + + std.log.info( + "Getting views for {s}...", + .{raw_repo.nameWithOwner}, + ); + response, status = try context.client.rest( + try std.mem.concat( + context.arena.allocator(), + u8, + &.{ + "https://api.github.com/repos/", + raw_repo.nameWithOwner, + "/traffic/views", + }, + ), + ); + if (status == .ok) { + repository.views = (try std.json.parseFromSliceLeaky( + struct { count: u32 }, + context.arena.allocator(), + response, + .{ .ignore_unknown_fields = true }, + )).count; + } else { + std.log.info( + "Failed to get views for {s} ({?s})", + .{ raw_repo.nameWithOwner, status.phrase() }, + ); + } + + _ = try repository.get_lines_changed( + context.arena, + context.client, + context.user, + ); + + try context.seen.put(raw_repo.nameWithOwner, true); + try context.repositories.append(context.allocator, repository); + } +} + +fn get_repos( + allocator: std.mem.Allocator, + arena: *std.heap.ArenaAllocator, + client: *HttpClient, +) !Statistics { + var result: Statistics = .{ + .user = undefined, + .name = undefined, + .repositories = undefined, + }; + var repositories: std.ArrayList(Repository) = + try .initCapacity(allocator, 32); + errdefer { + for (repositories.items) |repo| { + repo.deinit(allocator); + } + repositories.deinit(allocator); + } + var seen: std.StringHashMap(bool) = .init(arena.allocator()); + defer seen.deinit(); + + const years, const user, const name = + try get_basic_info(client, arena.allocator()); + if (name) |n| { + std.log.info("Getting data for {s} ({s})...", .{ n, user }); + } else { + std.log.info("Getting data for user {s}...", .{user}); + } + for (years) |year| { + try get_repos_by_year(.{ + .allocator = allocator, + .arena = arena, + .client = client, + .user = user, + .result = &result, + .seen = &seen, + .repositories = &repositories, + }, year, 0, 12); + } + + result.repositories = try repositories.toOwnedSlice(allocator); + errdefer { + for (result.repositories) |repository| { + repository.deinit(allocator); + } + allocator.free(result.repositories); + } + std.sort.pdq(Repository, result.repositories, {}, struct { + pub fn lessThanFn(_: void, lhs: Repository, rhs: Repository) bool { + if (rhs.views == lhs.views) { + return rhs.stars + rhs.forks < lhs.stars + lhs.forks; + } + return rhs.views < lhs.views; + } + }.lessThanFn); + + result.user = try allocator.dupe(u8, user); + errdefer allocator.free(result.user); + result.name = try allocator.dupe(u8, name orelse user); + errdefer allocator.free(result.name); + return result; +} + +fn get_lines_changed( + self: *Statistics, + arena: *std.heap.ArenaAllocator, + client: *HttpClient, +) !void { + const T = struct { + repo: *Repository, + delay: i64, + timestamp: i64, + }; + var q: std.PriorityQueue(T, void, struct { + pub fn compareFn(_: void, lhs: T, rhs: T) std.math.Order { + return std.math.order(lhs.timestamp, rhs.timestamp); + } + }.compareFn) = .init(arena.allocator(), {}); + defer q.deinit(); + for (self.repositories) |*repo| { + if (repo.lines_changed > 0) { + continue; + } + try q.add(.{ + .repo = repo, + .delay = 8, + .timestamp = std.time.timestamp(), + }); + } + while (q.count() > 0) { + var item = q.remove(); + const now = std.time.timestamp(); + if (item.timestamp > now) { + const delay: u64 = @intCast(item.timestamp - now); + std.log.debug("Sleeping for {d}s. Waiting for {d} repo{s}.", .{ + delay, + q.count() + 1, + if (q.count() != 0) "s" else "", + }); + std.Thread.sleep(delay * std.time.ns_per_s); + } + switch (try item.repo.get_lines_changed(arena, client, self.user)) { + .ok => {}, + .accepted => { + item.timestamp = std.time.timestamp() + item.delay; + // Exponential backoff (in expectation) with jitter + item.delay += + std.crypto.random.intRangeAtMost(i64, 2, item.delay); + item.delay = @min(item.delay, 600); + try q.add(item); + }, + else => |status| { + std.log.err( + "Failed to get contribution data for {s} ({?s})", + .{ item.repo.name, status.phrase() }, + ); + return error.RequestFailed; + }, + } + } +} + +// May not correctly free memory if there are errors during copying +fn deepcopy(a: std.mem.Allocator, o: anytype) !@TypeOf(o) { + return switch (@typeInfo(@TypeOf(o))) { + .pointer => |p| switch (p.size) { + .slice => v: { + const result = try a.dupe(p.child, o); + errdefer a.free(result); + for (o, result) |src, *dest| { + dest.* = try deepcopy(a, src); + } + break :v result; + }, + // Only slices in this struct + else => comptime unreachable, + }, + .@"struct" => |s| v: { + var result = o; + inline for (s.fields) |field| { + @field(result, field.name) = + try deepcopy(a, @field(o, field.name)); + } + break :v result; + }, + .optional => if (o) |v| try deepcopy(a, v) else null, + else => o, + }; +} diff --git a/templates/languages.svg b/src/templates/languages.svg similarity index 98% rename from templates/languages.svg rename to src/templates/languages.svg index 66b9b62844a..2d3aded586d 100644 --- a/templates/languages.svg +++ b/src/templates/languages.svg @@ -51,7 +51,7 @@ ul { li { display: inline-flex; font-size: 12px; - margin-right: 2ch; + margin-right: 1ch; align-items: center; flex-wrap: nowrap; transform: translateX(-500%); @@ -65,7 +65,7 @@ li { } div.ellipsis { - height: 100%; + height: 176px; overflow: hidden; text-overflow: ellipsis; } diff --git a/templates/overview.svg b/src/templates/overview.svg similarity index 100% rename from templates/overview.svg rename to src/templates/overview.svg