From e8418b70764227e2a0e71933e8151a42bb658d9e Mon Sep 17 00:00:00 2001 From: "F." Date: Wed, 8 Apr 2026 19:36:10 +0200 Subject: [PATCH 1/2] feat: modernize codebase with CI/CD, tooling, and Go idiom improvements Add GitHub Actions workflows for build, lint, test, security scanning (CodeQL, Codacy, gitleaks, govulncheck, gosec), SBOM generation, and SLSA provenance. Add Dependabot config for automated dependency updates. Add Makefile with targets for test, lint, vet, sec, and toolchain management. Add .golangci.yaml with comprehensive golangci-lint v2 configuration and .project-settings.env for shared tool version pinning. Refactor cron scheduler: - Replace sort.Sort(byTime) with slices.SortFunc and explicit comparators - Extract schedulerLoop() into focused helper methods (initializeEntries, processSchedulerEvent, handleTimerFired, handleEntryAdded, etc.) - Use sync.WaitGroup.Go in startJob instead of manual Add/Done - Wrap parse errors with context in AddJob via fmt.Errorf %w - Add errPanicValue sentinel and use %w wrapping in Recover() - Replace deprecated io/ioutil with io throughout tests Improve test quality: - Add t.Parallel() to all tests and subtests - Extract magic numbers and strings into named constants - Add test helpers (mustAddFunc, mustAddJob, requireContextDoneWithin, requireContextPendingFor, waitForJobStarted, signalJobStarted) - Refactor TestStopAndWait and TestChainSkipIfStillRunning subtests into separate top-level functions for clarity - Update doc.go and README.md to modern Go doc-comment format --- .github/dependabot.yml | 13 + .github/workflows/codacy.yml | 65 ++++ .github/workflows/codeql.yml | 76 ++++ .github/workflows/gitleaks.yml | 23 ++ .github/workflows/go.yml | 55 +++ .github/workflows/lint.yml | 57 +++ .github/workflows/provenance.yml | 78 ++++ .github/workflows/sbom.yml | 24 ++ .github/workflows/security.yml | 42 +++ .github/workflows/test.yml | 43 +++ .gitignore | 2 + .golangci.yaml | 304 +++++++++++++++ .project-settings.env | 5 + Makefile | 125 +++++++ README.md | 19 +- chain.go | 41 ++- chain_test.go | 286 ++++++++++----- constantdelay.go | 1 + constantdelay_test.go | 54 ++- cron.go | 305 +++++++++------ cron_test.go | 545 ++++++++++++++++----------- doc.go | 39 +- go.mod | 2 +- helpers_test.go | 58 +++ logger.go | 47 ++- option_test.go | 24 +- parser.go | 611 ++++++++++++++++++++----------- parser_test.go | 381 ++++++++++++------- spec.go | 415 ++++++++++++++------- spec_test.go | 368 +++++++++++-------- 30 files changed, 2969 insertions(+), 1139 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/codacy.yml create mode 100644 .github/workflows/codeql.yml create mode 100644 .github/workflows/gitleaks.yml create mode 100644 .github/workflows/go.yml create mode 100644 .github/workflows/lint.yml create mode 100644 .github/workflows/provenance.yml create mode 100644 .github/workflows/sbom.yml create mode 100644 .github/workflows/security.yml create mode 100644 .github/workflows/test.yml create mode 100644 .golangci.yaml create mode 100644 .project-settings.env create mode 100644 Makefile create mode 100644 helpers_test.go diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..1721e20 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +--- +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml new file mode 100644 index 0000000..2790839 --- /dev/null +++ b/.github/workflows/codacy.yml @@ -0,0 +1,65 @@ +--- +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow checks out code, performs a Codacy security scan +# and integrates the results with the +# GitHub Advanced Security code scanning feature. For more information on +# the Codacy security scan action usage and parameters, see +# https://github.com/codacy/codacy-analysis-cli-action. +# For more information on Codacy Analysis CLI in general, see +# https://github.com/codacy/codacy-analysis-cli. + +name: Codacy Security Scan + +on: + push: + branches: ["main"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["main"] + schedule: + - cron: "40 11 * * 5" + +permissions: + contents: read + +jobs: + codacy-security-scan: + permissions: + # for actions/checkout to fetch code + contents: read + # for github/codeql-action/upload-sarif to upload SARIF results + security-events: write + # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status + actions: read + name: Codacy Security Scan + runs-on: ubuntu-latest + steps: + # Checkout the repository to the GitHub Actions runner + - name: Checkout code + uses: actions/checkout@v6 + + # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis + - name: Run Codacy Analysis CLI + uses: codacy/codacy-analysis-cli-action@562ee3e92b8e92df8b67e0a5ff8aa8e261919c08 + with: + # Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository + # You can also omit the token and run the tools that support default configurations + project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} + verbose: true + output: results.sarif + format: sarif + # Adjust severity of non-security issues + gh-code-scanning-compat: true + # Force 0 exit code to allow SARIF file generation + # This will handover control about PR rejection to the GitHub side + max-allowed-issues: 2147483647 + + # Upload the SARIF file generated in the previous step + - name: Upload SARIF results file + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: results.sarif diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..6dd0404 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,76 @@ +--- +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: ["main"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["main"] + schedule: + - cron: "33 23 * * 3" + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: ["go"] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v4 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/gitleaks.yml b/.github/workflows/gitleaks.yml new file mode 100644 index 0000000..b71a776 --- /dev/null +++ b/.github/workflows/gitleaks.yml @@ -0,0 +1,23 @@ +--- +name: gitleaks +permissions: + contents: read +on: + pull_request: + push: + branches: [main] + workflow_dispatch: + schedule: + # run once a day at 4 AM + - cron: "0 4 * * *" +jobs: + scan: + name: gitleaks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + - uses: gitleaks/gitleaks-action@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000..7df844c --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,55 @@ +--- +name: Go +permissions: + contents: read + +on: + pull_request: + push: + branches: [main] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Load project settings + id: settings + run: | + set -a + source .project-settings.env + set +a + echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" + echo "gci_prefix=${GCI_PREFIX:-github.com/robfig/cron/v3}" >> "$GITHUB_OUTPUT" + echo "golangci_lint_version=${GOLANGCI_LINT_VERSION}" >> "$GITHUB_OUTPUT" + echo "proto_enabled=${PROTO_ENABLED:-false}" >> "$GITHUB_OUTPUT" + + - name: Set up Go + uses: actions/setup-go@v6.1.0 + with: + go-version: "${{ steps.settings.outputs.go_version }}" + check-latest: true + + - name: Cache Go modules + uses: actions/cache@v5 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}- + + - name: Modules download + run: go mod download + + - name: Tidy check + run: | + go mod tidy + git diff --exit-code go.mod go.sum + + - name: Verify + run: go mod verify + + - name: Build + run: go build -v ./... diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..a314b65 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,57 @@ +--- +name: lint + +on: + pull_request: + push: + branches: [main] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Load project settings + id: settings + run: | + set -a + source .project-settings.env + set +a + echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" + echo "gci_prefix=${GCI_PREFIX:-github.com/robfig/cron/v3}" >> "$GITHUB_OUTPUT" + echo "golangci_lint_version=${GOLANGCI_LINT_VERSION}" >> "$GITHUB_OUTPUT" + echo "proto_enabled=${PROTO_ENABLED:-true}" >> "$GITHUB_OUTPUT" + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "${{ steps.settings.outputs.go_version }}" + check-latest: true + - name: Cache Go modules + uses: actions/cache@v5 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}- + - name: Install tools + run: | + go install github.com/daixiang0/gci@latest + go install mvdan.cc/gofumpt@latest + go install honnef.co/go/tools/cmd/staticcheck@latest + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b "$(go env GOPATH)/bin" "${{ steps.settings.outputs.golangci_lint_version }}" + - name: Modules + run: go mod download + - name: Tidy check + run: | + go mod tidy + git diff --exit-code go.mod go.sum + - name: gci + run: gci write -s standard -s default -s blank -s dot -s "prefix(${{ steps.settings.outputs.gci_prefix }})" -s localmodule --skip-vendor --skip-generated $(find . -type f -name '*.go' -not -path "./pkg/api/*" -not -path "./vendor/*" -not -path "./.gocache/*" -not -path "./.git/*") + - name: gofumpt + run: gofumpt -l -w $(find . -type f -name '*.go' -not -path "./pkg/api/*" -not -path "./vendor/*" -not -path "./.gocache/*" -not -path "./.git/*") + - name: staticcheck + run: staticcheck ./... + - name: golangci-lint + run: golangci-lint run -v ./... diff --git a/.github/workflows/provenance.yml b/.github/workflows/provenance.yml new file mode 100644 index 0000000..59d7abe --- /dev/null +++ b/.github/workflows/provenance.yml @@ -0,0 +1,78 @@ +--- +name: provenance + +on: + release: + types: [published] + workflow_dispatch: + +jobs: + source: + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write + outputs: + base64_subjects: ${{ steps.subjects.outputs.base64 }} + archive_name: ${{ steps.archive.outputs.archive }} + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + - name: Create source archive + id: archive + run: | + set -euo pipefail + ref="${GITHUB_REF_NAME}" + safe_ref="${ref//\//-}" + archive="starter-${safe_ref}.tar.gz" + git archive --format=tar.gz --prefix="starter-${safe_ref}/" -o "${archive}" "${ref}" + echo "archive=${archive}" >> "$GITHUB_OUTPUT" + - name: Compute subjects + id: subjects + run: | + set -euo pipefail + archive="${{ steps.archive.outputs.archive }}" + sum=$(sha256sum "${archive}" | awk '{print $1}') + printf '%s %s\n' "$sum" "$archive" | base64 -w0 > subjects.b64 + echo "base64=$(cat subjects.b64)" >> "$GITHUB_OUTPUT" + - uses: sigstore/cosign-installer@v3 + - name: Sign source archive + # COSIGN_EXPERIMENTAL enables keyless signing via Sigstore (Fulcio/Rekor). + # This requires network access to the Sigstore infrastructure and will fail + # in air-gapped environments. For offline use, disable keyless mode and + # configure cosign with a non-keyless signing method instead. + env: + COSIGN_EXPERIMENTAL: "true" + run: | + set -euo pipefail + archive="${{ steps.archive.outputs.archive }}" + cosign sign-blob --yes --output-signature "${archive}.sig" --output-certificate "${archive}.pem" "${archive}" + - name: Upload source artifact + uses: actions/upload-artifact@v7 + with: + name: source-archive + path: | + ${{ steps.archive.outputs.archive }} + ${{ steps.archive.outputs.archive }}.sig + ${{ steps.archive.outputs.archive }}.pem + - name: Upload release assets + if: github.event_name == 'release' + uses: softprops/action-gh-release@v2 + with: + files: | + ${{ steps.archive.outputs.archive }} + ${{ steps.archive.outputs.archive }}.sig + ${{ steps.archive.outputs.archive }}.pem + + provenance: + needs: [source] + permissions: + actions: read + contents: write + id-token: write + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 + with: + base64-subjects: ${{ needs.source.outputs.base64_subjects }} + upload-assets: ${{ github.event_name == 'release' }} + upload-tag-name: ${{ github.ref_name }} diff --git a/.github/workflows/sbom.yml b/.github/workflows/sbom.yml new file mode 100644 index 0000000..08db0cb --- /dev/null +++ b/.github/workflows/sbom.yml @@ -0,0 +1,24 @@ +--- +name: sbom + +on: + pull_request: + push: + branches: [main] + schedule: + - cron: "0 5 * * 1" + +jobs: + sbom: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Generate SBOM + uses: anchore/sbom-action@v0 + with: + path: . + format: cyclonedx-json + output-file: sbom.cdx.json + artifact-name: sbom.cdx.json diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml new file mode 100644 index 0000000..3f871e6 --- /dev/null +++ b/.github/workflows/security.yml @@ -0,0 +1,42 @@ +--- +name: security + +on: + pull_request: + push: + branches: [main] + schedule: + - cron: "0 3 * * 1" + +jobs: + security: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Load project settings + id: settings + run: | + set -a + source .project-settings.env + set +a + echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "${{ steps.settings.outputs.go_version }}" + check-latest: true + - name: Cache Go modules + uses: actions/cache@v5 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}- + - name: Modules + run: go mod download + - name: govulncheck + run: go install golang.org/x/vuln/cmd/govulncheck@latest && govulncheck ./... + - name: gosec + run: go install github.com/securego/gosec/v2/cmd/gosec@latest && gosec -exclude-generated ./... diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..6c3d2aa --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,43 @@ +--- +name: test + +on: + pull_request: + push: + branches: [main] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Load project settings + id: settings + run: | + set -a + source .project-settings.env + set +a + echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "${{ steps.settings.outputs.go_version }}" + check-latest: true + - name: Cache Go modules + uses: actions/cache@v5 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-${{ steps.settings.outputs.go_version }}- + - name: Modules + run: go mod download + - name: Test (race + coverage) + run: go test -race -coverprofile=coverage.out ./... + - name: Upload coverage artifact + uses: actions/upload-artifact@v7 + with: + name: coverage + path: coverage.out diff --git a/.gitignore b/.gitignore index 0026861..6c40069 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ _cgo_export.* _testmain.go *.exe +.vscode +.DS_Store diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..bffe783 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,304 @@ +--- +version: "2" +# Options for analysis running. +run: + # Number of operating system threads (`GOMAXPROCS`) that can execute golangci-lint simultaneously. + # If it is explicitly set to 0 (i.e. not the default) then golangci-lint will automatically set the value to match Linux container CPU quota. + # Default: the number of logical CPUs in the machine + concurrency: 12 + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 5m + # Exit code when at least one issue was found. + # Default: 1 + issues-exit-code: 2 + # Include test files or not. + # Default: true + tests: true + # List of build tags, all linters use it. + # Default: [] + # build-tags: + # - tag + # If set, we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + # + # Allowed values: readonly|vendor|mod + # Default: "" + modules-download-mode: readonly + # Allow multiple parallel golangci-lint instances running. + # If false, golangci-lint acquires file lock on start. + # Default: false + allow-parallel-runners: true + # Allow multiple golangci-lint instances running, but serialize them around a lock. + # If false, golangci-lint exits with an error if it fails to acquire file lock on start. + # Default: false + allow-serial-runners: true + # Define the Go version limit. + # Mainly related to generics support since go1.18. + # Default: use Go version from the go.mod file, fallback on the env var `GOVERSION`, fallback on 1.17 + go: "1.26.2" + +linters: + # Enable specific linter + # https://golangci-lint.run/usage/linters/#enabled-by-default + default: all + enable: + - wsl_v5 + disable: + - exhaustruct + - gosmopolitan + - ireturn + - testpackage + - wsl + + # exclusions: + # # Exclude files by path. Paths are relative to the config file location and support glob patterns. + # # Default: [] + # paths: + # - "admin-ui/*" + + settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 15 + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + # Such cases aren't reported by default. + # Default: false + check-blank: true + # To disable the errcheck built-in exclude list. + # See `-excludeonly` option in https://github.com/kisielk/errcheck#excluding-functions for details. + # Default: false + disable-default-exclusions: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - fmt.Fprintf + - fmt.Fprintln + funlen: + lines: 100 + + lll: + # Max line length, lines longer will be reported. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option. + # Default: 120. + line-length: 150 + # Tab width in spaces. + # Default: 1 + tab-width: 1 + + ireturn: + # ireturn does not allow using `allow` and `reject` settings at the same time. + # Both settings are lists of the keywords and regular expressions matched to interface or package names. + # keywords: + # - `empty` for `any` + # - `error` for errors + # - `stdlib` for standard library + # - `anon` for anonymous interfaces + # - `generic` for generic interfaces added in go 1.18 + + # By default, it allows using errors, empty interfaces, anonymous interfaces, + # and interfaces provided by the standard library. + allow: + - anon + - error + - empty + - stdlib + # You can specify idiomatic endings for interface + - (or|er|ry)$ + + revive: + enable-all-rules: true + rules: + - name: add-constant + severity: warning + disabled: false + arguments: + - max-lit-count: "3" + allow-strs: '"","-","*","now","entry"' + allow-ints: "-1,0,1,2,10" + allow-floats: "0.0,1.0" + + - name: cognitive-complexity + severity: warning + disabled: false + arguments: [15] + + - name: cyclomatic + arguments: [15] + + # # - name: function-length + # # arguments: [80, 0] + + - name: max-public-structs + arguments: [10] + + # - name: nested-structs + # disabled: true + + - name: flag-parameter + disabled: true + + - name: line-length-limit + arguments: [140] + + - name: var-naming + disabled: true + # https://github.com/mgechev/revive/blob/HEAD/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + severity: warning + disabled: true + exclude: [""] + + wrapcheck: + # An array of strings that specify globs of packages to ignore. + # Default: [] + ignore-package-globs: + - github.com/hyp3rd/* + + varnamelen: + # The longest distance, in source lines, that is being considered a "small scope". + # Variables used in at most this many lines will be ignored. + # Default: 5 + max-distance: 6 + # The minimum length of a variable's name that is considered "long". + # Variable names that are at least this long will be ignored. + # Default: 3 + min-name-length: 2 + # Check method receivers. + # Default: false + check-receiver: false + # Check named return values. + # Default: false + check-return: true + # Check type parameters. + # Default: false + check-type-param: true + # Ignore "ok" variables that hold the bool return value of a type assertion. + # Default: false + ignore-type-assert-ok: true + # Ignore "ok" variables that hold the bool return value of a map index. + # Default: false + ignore-map-index-ok: true + # Ignore "ok" variables that hold the bool return value of a channel receive. + # Default: false + ignore-chan-recv-ok: true + # Optional list of variable names that should be ignored completely. + # Default: [] + ignore-names: + - err + # Optional list of variable declarations that should be ignored completely. + # Entries must be in one of the following forms (see below for examples): + # - for variables, parameters, named return values, method receivers, or type parameters: + # ( can also be a pointer/slice/map/chan/...) + # - for constants: const + # + # Default: [] + ignore-decls: + - i int + - j int + +formatters: + enable: + # - gci + - gofumpt + - goimports + - golines + settings: + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot > alias > localmodule`. + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/hyp3rd/*) # Custom section: groups all imports with the specified Prefix. + - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. + - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. + - alias # Alias section: contains all alias imports. This section is not present unless explicitly enabled. + - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled. + # Checks that no inline comments are present. + # Default: false + no-inline-comments: true + # Checks that no prefix comments (comment lines above an import) are present. + # Default: false + no-prefix-comments: true + # Enable custom order of sections. + # If `true`, make the section order the same as the order of `sections`. + # Default: false + custom-order: true + # Drops lexical ordering for custom sections. + # Default: false + no-lex-order: true + + gofumpt: + # Module path which contains the source code being formatted. + # Default: "" + module-path: github.com/robfig/cron/v3 + # Choose whether to use the extra rules. + # Default: false + extra-rules: true + + goimports: + # A list of prefixes, which, if set, checks import paths + # with the given prefixes are grouped after 3rd-party packages. + # Default: [] + local-prefixes: + - github.com/robfig/cron/v3 + + golines: + # Target maximum line length. + # Default: 100 + max-len: 140 + # Length of a tabulation. + # Default: 4 + # tab-len: 8 + # Shorten single-line comments. + # Default: false + shorten-comments: true + # Default: true + reformat-tags: true + # Split chained methods on the dots as opposed to the arguments. + # Default: true + chain-split-dots: true + +# output configuration options +output: + # Order to use when sorting results. + # Require `sort-results` to `true`. + # Possible values: `file`, `linter`, and `severity`. + # + # If the severity values are inside the following list, they are ordered in this order: + # 1. error + # 2. warning + # 3. high + # 4. medium + # 5. low + # Either they are sorted alphabetically. + # + # Default: ["file"] + sort-order: + - linter + - severity + - file # filepath, line, and column. + # Show statistics per linter. + # Default: false + show-stats: true + +issues: + # Make issues output unique by line. + # Default: true + uniq-by-line: false diff --git a/.project-settings.env b/.project-settings.env new file mode 100644 index 0000000..34a3d53 --- /dev/null +++ b/.project-settings.env @@ -0,0 +1,5 @@ +GOLANGCI_LINT_VERSION=v2.11.4 +BUF_VERSION=v1.67.0 +GO_VERSION=1.26.2 +GCI_PREFIX=github.com/robfig/cron/v3 +PROTO_ENABLED=false diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..1bdaa94 --- /dev/null +++ b/Makefile @@ -0,0 +1,125 @@ +include .project-settings.env + +GOLANGCI_LINT_VERSION ?= v2.11.4 +BUF_VERSION ?= v1.67.0 +GO_VERSION ?= 1.26.2 +GCI_PREFIX ?= github.com/robfig/cron/v3 +PROTO_ENABLED ?= true + +GOFILES = $(shell find . -type f -name '*.go' -not -path "./pkg/api/*" -not -path "./vendor/*" -not -path "./.gocache/*" -not -path "./.git/*") + +test: + RUN_INTEGRATION_TEST=yes go test -v -timeout 5m -cover ./... + +test-race: + go test -race ./... + +bench: + go test -bench=. -benchtime=3s -benchmem -run=^-memprofile=mem.out ./... + + +update-deps: + go get -u -t ./... && go mod tidy -v && go mod verify + + +prepare-toolchain: + $(call check_command_exists,git) || (echo "git is not present on the system, install it before starting to code." && exit 1) + + $(call check_command_exists,go) || (echo "golang is not present on the system, download and install it at https://go.dev/dl" && exit 1) + + @echo "Installing gci...\n" + $(call check_command_exists,gci) || go install github.com/daixiang0/gci@latest + + @echo "Installing gofumpt...\n" + $(call check_command_exists,gofumpt) || go install mvdan.cc/gofumpt@latest + + @echo "Installing golangci-lint $(GOLANGCI_LINT_VERSION)...\n" + $(call check_command_exists,golangci-lint) || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s -- -b "$(go env GOPATH)/bin" $(GOLANGCI_LINT_VERSION) + + @echo "Installing staticcheck...\n" + $(call check_command_exists,staticcheck) || go install honnef.co/go/tools/cmd/staticcheck@latest + + @echo "Installing govulncheck...\n" + $(call check_command_exists,govulncheck) || go install golang.org/x/vuln/cmd/govulncheck@latest + + @echo "Installing gosec...\n" + $(call check_command_exists,gosec) || go install github.com/securego/gosec/v2/cmd/gosec@latest + +update-toolchain: + @echo "Updating gci...\n" + go install github.com/daixiang0/gci@latest + + @echo "Updating gofumpt...\n" + go install mvdan.cc/gofumpt@latest + + @echo "Updating govulncheck...\n" + go install golang.org/x/vuln/cmd/govulncheck@latest + + @echo "Updating gosec...\n" + go install github.com/securego/gosec/v2/cmd/gosec@latest + + @echo "Updating staticcheck...\n" + go install honnef.co/go/tools/cmd/staticcheck@latest + +lint: prepare-toolchain + @echo "Running gci..." + @for file in ${GOFILES}; do \ + gci write -s standard -s default -s blank -s dot -s "prefix($(GCI_PREFIX))" -s localmodule --skip-vendor --skip-generated $$file; \ + done + + @echo "\nRunning gofumpt..." + gofumpt -l -w ${GOFILES} + + @echo "\nRunning staticcheck..." + staticcheck ./... + + @echo "\nRunning golangci-lint $(GOLANGCI_LINT_VERSION)..." + golangci-lint run -v --fix ./... + +vet: + @echo "Running go vet..." + + $(call check_command_exists,shadow) || go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest + + @for file in ${GOFILES}; do \ + go vet -vettool=$(shell which shadow) $$file; \ + done + +sec: + @echo "Running govulncheck..." + govulncheck ./... + + @echo "\nRunning gosec..." + gosec -exclude-generated ./... + +all: .PHONY + +# check_command_exists is a helper function that checks if a command exists. +define check_command_exists +@which $(1) > /dev/null 2>&1 || (echo "$(1) command not found" && exit 1) +endef + +ifeq ($(call check_command_exists,$(1)),false) + $(error "$(1) command not found") +endif + +# help prints a list of available targets and their descriptions. +help: + @echo "Available targets:" + @echo + @echo "Development commands:" + @echo " prepare-toolchain\t\tInstall and configure all required development tools" + @echo + @echo "Testing commands:" + @echo " test\t\t\t\tRun all tests in the project" + @echo " test-race\t\t\tRun all tests with the race detector enabled" + @echo + @echo "Code quality commands:" + @echo " lint\t\t\t\tRun all linters (gci, gofumpt, model-tags-check, staticcheck, golangci-lint)" + @echo " update-deps\t\t\tUpdate all dependencies and tidy go.mod" + @echo " vet\t\t\t\tRun go vet with shadow analysis" + @echo " sec\t\t\t\tRun security checks (govulncheck and gosec)" + @echo + @echo "For more information, see the project README." + +.PHONY: update-deps lint test test-race sec diff --git a/README.md b/README.md index 38c4d8a..84d20ed 100644 --- a/README.md +++ b/README.md @@ -6,17 +6,21 @@ Cron V3 has been released! To download the specific tagged release, run: + ```bash go get github.com/robfig/cron/v3@v3.0.0 ``` + Import it in your program as: + ```go import "github.com/robfig/cron/v3" ``` + It requires Go 1.11 or later due to usage of Go Modules. Refer to the documentation here: -http://godoc.org/github.com/robfig/cron + The rest of this document describes the the advances in v3 and a list of breaking changes for users that wish to upgrade from an earlier version. @@ -47,7 +51,7 @@ New features: year field (optional in Quartz) is not supported. - Extensible, key/value logging via an interface that complies with - the https://github.com/go-logr/logr project. + the project. - The new Chain & JobWrapper types allow you to install "interceptors" to add cross-cutting behavior like the following: @@ -65,15 +69,17 @@ It is backwards incompatible with both v1 and v2. These updates are required: UPDATING: To retain the old behavior, construct your Cron with a custom parser: + ```go // Seconds field, required cron.New(cron.WithSeconds()) // Seconds field, optional cron.New(cron.WithParser(cron.NewParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor, + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor, ))) ``` + - The Cron type now accepts functional options on construction rather than the previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). @@ -92,16 +98,19 @@ cron.New(cron.WithParser(cron.NewParser( has been removed to accommodate the more general JobWrapper type. UPDATING: To opt into panic recovery and configure the panic logger: + ```go cron.New(cron.WithChain( - cron.Recover(logger), // or use cron.DefaultLogger + cron.Recover(logger), // or use cron.DefaultLogger() )) ``` -- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + +- In adding support for , `cron.WithVerboseLogger` was removed, since it is duplicative with the leveled logging. UPDATING: Callers should use `WithLogger` and specify a logger that does not discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + ```go cron.New( cron.WithLogger(cron.VerbosePrintfLogger(logger))) diff --git a/chain.go b/chain.go index 9c087b7..01ea334 100644 --- a/chain.go +++ b/chain.go @@ -1,12 +1,15 @@ package cron import ( + "errors" "fmt" "runtime" "sync" "time" ) +var errPanicValue = errors.New("panic value") + // JobWrapper decorates the given Job with some behavior. type JobWrapper func(Job) Job @@ -24,33 +27,41 @@ func NewChain(c ...JobWrapper) Chain { // Then decorates the given job with all JobWrappers in the chain. // // This: -// NewChain(m1, m2, m3).Then(job) +// +// NewChain(m1, m2, m3).Then(job) +// // is equivalent to: -// m1(m2(m3(job))) +// +// m1(m2(m3(job))) func (c Chain) Then(j Job) Job { for i := range c.wrappers { j = c.wrappers[len(c.wrappers)-i-1](j) } + return j } // Recover panics in wrapped jobs and log them with the provided logger. func Recover(logger Logger) JobWrapper { - return func(j Job) Job { + return func(job Job) Job { return FuncJob(func() { defer func() { - if r := recover(); r != nil { + if recovered := recover(); recovered != nil { const size = 64 << 10 + buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] - err, ok := r.(error) + + err, ok := recovered.(error) if !ok { - err = fmt.Errorf("%v", r) + err = fmt.Errorf("%w: %v", errPanicValue, recovered) } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) } }() - j.Run() + + job.Run() }) } } @@ -59,16 +70,20 @@ func Recover(logger Logger) JobWrapper { // previous one is complete. Jobs running after a delay of more than a minute // have the delay logged at Info. func DelayIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { + return func(job Job) Job { var mu sync.Mutex + return FuncJob(func() { start := time.Now() + mu.Lock() defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { logger.Info("delay", "duration", dur) } - j.Run() + + job.Run() }) } } @@ -76,14 +91,16 @@ func DelayIfStillRunning(logger Logger) JobWrapper { // SkipIfStillRunning skips an invocation of the Job if a previous invocation is // still running. It logs skips to the given logger at Info level. func SkipIfStillRunning(logger Logger) JobWrapper { - return func(j Job) Job { - var ch = make(chan struct{}, 1) + return func(job Job) Job { + ch := make(chan struct{}, 1) ch <- struct{}{} + return FuncJob(func() { select { case v := <-ch: defer func() { ch <- v }() - j.Run() + + job.Run() default: logger.Info("skip") } diff --git a/chain_test.go b/chain_test.go index ec91097..d97ba1b 100644 --- a/chain_test.go +++ b/chain_test.go @@ -1,7 +1,7 @@ package cron import ( - "io/ioutil" + "io" "log" "reflect" "sync" @@ -9,61 +9,87 @@ import ( "time" ) +const ( + jobCompletionWait = 2 * time.Millisecond + twoJobCompletionWait = 3 * time.Millisecond + delayedJobDuration = 10 * time.Millisecond + waitForFirstJob = 5 * time.Millisecond + waitForDelayedJobs = 25 * time.Millisecond + rapidFireJobRuns = 11 + rapidFireCompletionWait = 200 * time.Millisecond + independentJobsWait = 100 * time.Millisecond +) + func appendingJob(slice *[]int, value int) Job { var m sync.Mutex + return FuncJob(func() { m.Lock() + *slice = append(*slice, value) m.Unlock() }) } func appendingWrapper(slice *[]int, value int) JobWrapper { - return func(j Job) Job { + return func(job Job) Job { return FuncJob(func() { appendingJob(slice, value).Run() - j.Run() + job.Run() }) } } func TestChain(t *testing.T) { - var nums []int + t.Parallel() + var ( + nums []int append1 = appendingWrapper(&nums, 1) append2 = appendingWrapper(&nums, 2) append3 = appendingWrapper(&nums, 3) append4 = appendingJob(&nums, 4) ) + NewChain(append1, append2, append3).Then(append4).Run() + if !reflect.DeepEqual(nums, []int{1, 2, 3, 4}) { t.Error("unexpected order of calls:", nums) } } func TestChainRecover(t *testing.T) { + t.Parallel() + panickingJob := FuncJob(func() { panic("panickingJob panics") }) t.Run("panic exits job by default", func(t *testing.T) { + t.Parallel() + defer func() { if err := recover(); err == nil { - t.Errorf("panic expected, but none received") + t.Error("panic expected, but none received") } }() + NewChain().Then(panickingJob). Run() }) t.Run("Recovering JobWrapper recovers", func(t *testing.T) { - NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))). + t.Parallel() + + NewChain(Recover(PrintfLogger(log.New(io.Discard, "", 0)))). Then(panickingJob). Run() }) t.Run("composed with the *IfStillRunning wrappers", func(t *testing.T) { - NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))). + t.Parallel() + + NewChain(Recover(PrintfLogger(log.New(io.Discard, "", 0)))). Then(panickingJob). Run() }) @@ -88,155 +114,215 @@ func (j *countJob) Run() { func (j *countJob) Started() int { defer j.m.Unlock() + j.m.Lock() + return j.started } func (j *countJob) Done() int { defer j.m.Unlock() + j.m.Lock() + return j.done } func TestChainDelayIfStillRunning(t *testing.T) { + t.Parallel() t.Run("runs immediately", func(t *testing.T) { - var j countJob - wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + t.Parallel() + + var jobCounter countJob + + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) go wrappedJob.Run() - time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete. - if c := j.Done(); c != 1 { + + time.Sleep(jobCompletionWait) + + if c := jobCounter.Done(); c != 1 { t.Errorf("expected job run once, immediately, got %d", c) } }) t.Run("second run immediate if first done", func(t *testing.T) { - var j countJob - wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + t.Parallel() + + var jobCounter countJob + + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) + go func() { go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() }() - time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete. - if c := j.Done(); c != 2 { + + time.Sleep(twoJobCompletionWait) + + if c := jobCounter.Done(); c != 2 { t.Errorf("expected job run twice, immediately, got %d", c) } }) t.Run("second run delayed if first not done", func(t *testing.T) { - var j countJob - j.delay = 10 * time.Millisecond - wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + t.Parallel() + + var jobCounter countJob + + jobCounter.delay = delayedJobDuration + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) + go func() { go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() }() // After 5ms, the first job is still in progress, and the second job was // run but should be waiting for it to finish. - time.Sleep(5 * time.Millisecond) - started, done := j.Started(), j.Done() + time.Sleep(waitForFirstJob) + + started, done := jobCounter.Started(), jobCounter.Done() if started != 1 || done != 0 { t.Error("expected first job started, but not finished, got", started, done) } // Verify that the second job completes. - time.Sleep(25 * time.Millisecond) - started, done = j.Started(), j.Done() + time.Sleep(waitForDelayedJobs) + + started, done = jobCounter.Started(), jobCounter.Done() if started != 2 || done != 2 { t.Error("expected both jobs done, got", started, done) } }) - } func TestChainSkipIfStillRunning(t *testing.T) { + t.Parallel() - t.Run("runs immediately", func(t *testing.T) { - var j countJob - wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) + t.Run("runs immediately", testChainSkipRunsImmediately) + t.Run("second run immediate if first done", testChainSkipSecondRunImmediate) + t.Run("second run skipped if first not done", testChainSkipSecondRunSkipped) + t.Run("skip 10 jobs on rapid fire", testChainSkipRapidFire) + t.Run("different jobs independent", testChainSkipDifferentJobs) +} + +func testChainSkipRunsImmediately(t *testing.T) { + t.Parallel() + + var jobCounter countJob + + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) + go wrappedJob.Run() + + time.Sleep(jobCompletionWait) + + if c := jobCounter.Done(); c != 1 { + t.Errorf("expected job run once, immediately, got %d", c) + } +} + +func testChainSkipSecondRunImmediate(t *testing.T) { + t.Parallel() + + var jobCounter countJob + + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) + + go func() { go wrappedJob.Run() - time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete. - if c := j.Done(); c != 1 { - t.Errorf("expected job run once, immediately, got %d", c) - } - }) - t.Run("second run immediate if first done", func(t *testing.T) { - var j countJob - wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) - go func() { - go wrappedJob.Run() - time.Sleep(time.Millisecond) - go wrappedJob.Run() - }() - time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete. - if c := j.Done(); c != 2 { - t.Errorf("expected job run twice, immediately, got %d", c) - } - }) + time.Sleep(time.Millisecond) - t.Run("second run skipped if first not done", func(t *testing.T) { - var j countJob - j.delay = 10 * time.Millisecond - wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) - go func() { - go wrappedJob.Run() - time.Sleep(time.Millisecond) - go wrappedJob.Run() - }() + go wrappedJob.Run() + }() - // After 5ms, the first job is still in progress, and the second job was - // aleady skipped. - time.Sleep(5 * time.Millisecond) - started, done := j.Started(), j.Done() - if started != 1 || done != 0 { - t.Error("expected first job started, but not finished, got", started, done) - } + time.Sleep(twoJobCompletionWait) - // Verify that the first job completes and second does not run. - time.Sleep(25 * time.Millisecond) - started, done = j.Started(), j.Done() - if started != 1 || done != 1 { - t.Error("expected second job skipped, got", started, done) - } - }) + if c := jobCounter.Done(); c != 2 { + t.Errorf("expected job run twice, immediately, got %d", c) + } +} - t.Run("skip 10 jobs on rapid fire", func(t *testing.T) { - var j countJob - j.delay = 10 * time.Millisecond - wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) - for i := 0; i < 11; i++ { - go wrappedJob.Run() - } - time.Sleep(200 * time.Millisecond) - done := j.Done() - if done != 1 { - t.Error("expected 1 jobs executed, 10 jobs dropped, got", done) - } - }) +func testChainSkipSecondRunSkipped(t *testing.T) { + t.Parallel() - t.Run("different jobs independent", func(t *testing.T) { - var j1, j2 countJob - j1.delay = 10 * time.Millisecond - j2.delay = 10 * time.Millisecond - chain := NewChain(SkipIfStillRunning(DiscardLogger)) - wrappedJob1 := chain.Then(&j1) - wrappedJob2 := chain.Then(&j2) - for i := 0; i < 11; i++ { - go wrappedJob1.Run() - go wrappedJob2.Run() - } - time.Sleep(100 * time.Millisecond) - var ( - done1 = j1.Done() - done2 = j2.Done() - ) - if done1 != 1 || done2 != 1 { - t.Error("expected both jobs executed once, got", done1, "and", done2) - } - }) + var jobCounter countJob + + jobCounter.delay = delayedJobDuration + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) + + go func() { + go wrappedJob.Run() + time.Sleep(time.Millisecond) + + go wrappedJob.Run() + }() + + time.Sleep(waitForFirstJob) + + started, done := jobCounter.Started(), jobCounter.Done() + if started != 1 || done != 0 { + t.Error("expected first job started, but not finished, got", started, done) + } + + time.Sleep(waitForDelayedJobs) + + started, done = jobCounter.Started(), jobCounter.Done() + if started != 1 || done != 1 { + t.Error("expected second job skipped, got", started, done) + } +} + +func testChainSkipRapidFire(t *testing.T) { + t.Parallel() + + var jobCounter countJob + + jobCounter.delay = delayedJobDuration + + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) + for range [rapidFireJobRuns]struct{}{} { + go wrappedJob.Run() + } + + time.Sleep(rapidFireCompletionWait) + + done := jobCounter.Done() + if done != 1 { + t.Error("expected 1 jobs executed, 10 jobs dropped, got", done) + } +} + +func testChainSkipDifferentJobs(t *testing.T) { + t.Parallel() + + var firstJob, secondJob countJob + + firstJob.delay = delayedJobDuration + secondJob.delay = delayedJobDuration + chain := NewChain(SkipIfStillRunning(DiscardLogger())) + wrappedJob1 := chain.Then(&firstJob) + wrappedJob2 := chain.Then(&secondJob) + + for range [rapidFireJobRuns]struct{}{} { + go wrappedJob1.Run() + go wrappedJob2.Run() + } + + time.Sleep(independentJobsWait) + + done1 := firstJob.Done() + + done2 := secondJob.Done() + if done1 != 1 || done2 != 1 { + t.Error("expected both jobs executed once, got", done1, "and", done2) + } } diff --git a/constantdelay.go b/constantdelay.go index cd6e7b1..3f83322 100644 --- a/constantdelay.go +++ b/constantdelay.go @@ -15,6 +15,7 @@ func Every(duration time.Duration) ConstantDelaySchedule { if duration < time.Second { duration = time.Second } + return ConstantDelaySchedule{ Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, } diff --git a/constantdelay_test.go b/constantdelay_test.go index f43a58a..81cb0ae 100644 --- a/constantdelay_test.go +++ b/constantdelay_test.go @@ -5,50 +5,68 @@ import ( "time" ) +const ( + quarterHourDelay = 15 * time.Minute + roundingNanosecondDelay = 50 * time.Nanosecond + thirtyFiveMinuteDelay = 35 * time.Minute + fourteenMinuteDelay = 14 * time.Minute + fortyFourMinuteDelay = 44 * time.Minute + twentyFourSecondDelay = 24 * time.Second + twentyFiveHourDelay = 25 * time.Hour + ninetyOneDayDelay = 91 * 24 * time.Hour + twentyFiveMinuteDelay = 25 * time.Minute + fifteenSecondDelay = 15 * time.Second + fifteenMillisecondDelay = 15 * time.Millisecond + nextQuarterHourTimestamp = "Mon Jul 9 15:00 2012" +) + func TestConstantDelayNext(t *testing.T) { + t.Parallel() + tests := []struct { time string delay time.Duration expected string }{ // Simple cases - {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59 2012", 15 * time.Minute, "Mon Jul 9 15:14 2012"}, - {"Mon Jul 9 14:59:59 2012", 15 * time.Minute, "Mon Jul 9 15:14:59 2012"}, + {"Mon Jul 9 14:45 2012", quarterHourDelay + roundingNanosecondDelay, nextQuarterHourTimestamp}, + {"Mon Jul 9 14:59 2012", quarterHourDelay, "Mon Jul 9 15:14 2012"}, + {"Mon Jul 9 14:59:59 2012", quarterHourDelay, "Mon Jul 9 15:14:59 2012"}, // Wrap around hours - {"Mon Jul 9 15:45 2012", 35 * time.Minute, "Mon Jul 9 16:20 2012"}, + {"Mon Jul 9 15:45 2012", thirtyFiveMinuteDelay, "Mon Jul 9 16:20 2012"}, // Wrap around days - {"Mon Jul 9 23:46 2012", 14 * time.Minute, "Tue Jul 10 00:00 2012"}, - {"Mon Jul 9 23:45 2012", 35 * time.Minute, "Tue Jul 10 00:20 2012"}, - {"Mon Jul 9 23:35:51 2012", 44*time.Minute + 24*time.Second, "Tue Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", 25*time.Hour + 44*time.Minute + 24*time.Second, "Thu Jul 11 01:20:15 2012"}, + {"Mon Jul 9 23:46 2012", fourteenMinuteDelay, "Tue Jul 10 00:00 2012"}, + {"Mon Jul 9 23:45 2012", thirtyFiveMinuteDelay, "Tue Jul 10 00:20 2012"}, + {"Mon Jul 9 23:35:51 2012", fortyFourMinuteDelay + twentyFourSecondDelay, "Tue Jul 10 00:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", twentyFiveHourDelay + fortyFourMinuteDelay + twentyFourSecondDelay, "Thu Jul 11 01:20:15 2012"}, // Wrap around months - {"Mon Jul 9 23:35 2012", 91*24*time.Hour + 25*time.Minute, "Thu Oct 9 00:00 2012"}, + {"Mon Jul 9 23:35 2012", ninetyOneDayDelay + twentyFiveMinuteDelay, "Thu Oct 9 00:00 2012"}, // Wrap around minute, hour, day, month, and year - {"Mon Dec 31 23:59:45 2012", 15 * time.Second, "Tue Jan 1 00:00:00 2013"}, + {"Mon Dec 31 23:59:45 2012", fifteenSecondDelay, "Tue Jan 1 00:00:00 2013"}, // Round to nearest second on the delay - {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:45 2012", quarterHourDelay + roundingNanosecondDelay, nextQuarterHourTimestamp}, // Round up to 1 second if the duration is less. - {"Mon Jul 9 14:45:00 2012", 15 * time.Millisecond, "Mon Jul 9 14:45:01 2012"}, + {"Mon Jul 9 14:45:00 2012", fifteenMillisecondDelay, "Mon Jul 9 14:45:01 2012"}, // Round to nearest second when calculating the next time. - {"Mon Jul 9 14:45:00.005 2012", 15 * time.Minute, "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:45:00.005 2012", quarterHourDelay, nextQuarterHourTimestamp}, // Round to nearest second for both. - {"Mon Jul 9 14:45:00.005 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:45:00.005 2012", quarterHourDelay + roundingNanosecondDelay, nextQuarterHourTimestamp}, } - for _, c := range tests { - actual := Every(c.delay).Next(getTime(c.time)) - expected := getTime(c.expected) + for _, testCase := range tests { + actual := Every(testCase.delay).Next(getTime(testCase.time)) + + expected := getTime(testCase.expected) if actual != expected { - t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.delay, expected, actual) + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", testCase.time, testCase.delay, expected, actual) } } } diff --git a/cron.go b/cron.go index c7e9176..3a5ef4b 100644 --- a/cron.go +++ b/cron.go @@ -2,7 +2,8 @@ package cron import ( "context" - "sort" + "fmt" + "slices" "sync" "time" ) @@ -26,7 +27,7 @@ type Cron struct { jobWaiter sync.WaitGroup } -// ScheduleParser is an interface for schedule spec parsers that return a Schedule +// ScheduleParser is an interface for schedule spec parsers that return a Schedule. type ScheduleParser interface { Parse(spec string) (Schedule, error) } @@ -40,10 +41,10 @@ type Job interface { type Schedule interface { // Next returns the next activation time, later than the given time. // Next is invoked initially, and then each time the job is run. - Next(time.Time) time.Time + Next(next time.Time) time.Time } -// EntryID identifies an entry within a Cron instance +// EntryID identifies an entry within a Cron instance. type EntryID int // Entry consists of a schedule and the func to execute on that schedule. @@ -74,44 +75,25 @@ type Entry struct { // Valid returns true if this is not the zero entry. func (e Entry) Valid() bool { return e.ID != 0 } -// byTime is a wrapper for sorting the entry array by time -// (with zero time at the end). -type byTime []*Entry - -func (s byTime) Len() int { return len(s) } -func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byTime) Less(i, j int) bool { - // Two zero times should return false. - // Otherwise, zero is "greater" than any other time. - // (To sort it at the end of the list.) - if s[i].Next.IsZero() { - return false - } - if s[j].Next.IsZero() { - return true - } - return s[i].Next.Before(s[j].Next) -} - // New returns a new Cron job runner, modified by the given options. // // Available Settings // -// Time Zone -// Description: The time zone in which schedules are interpreted -// Default: time.Local +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local // -// Parser -// Description: Parser converts cron spec strings into cron.Schedules. -// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron // -// Chain -// Description: Wrap submitted jobs to customize behavior. -// Default: A chain that recovers panics and logs them to stderr. +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. // // See "cron.With*" to modify the default behavior. func New(opts ...Option) *Cron { - c := &Cron{ + cronInstance := &Cron{ entries: nil, chain: NewChain(), add: make(chan *Entry), @@ -120,19 +102,23 @@ func New(opts ...Option) *Cron { remove: make(chan EntryID), running: false, runningMu: sync.Mutex{}, - logger: DefaultLogger, + logger: DefaultLogger(), location: time.Local, - parser: standardParser, + parser: NewStandardParser(), } for _, opt := range opts { - opt(c) + opt(cronInstance) } - return c + + return cronInstance } -// FuncJob is a wrapper that turns a func() into a cron.Job +const idleTimerDuration = 100000 * time.Hour + +// FuncJob is a wrapper that turns a func() into a cron.Job. type FuncJob func() +// Run executes the wrapped func. func (f FuncJob) Run() { f() } // AddFunc adds a func to the Cron to be run on the given schedule. @@ -148,8 +134,9 @@ func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { schedule, err := c.parser.Parse(spec) if err != nil { - return 0, err + return 0, fmt.Errorf("parse schedule %q: %w", spec, err) } + return c.Schedule(schedule, cmd), nil } @@ -158,7 +145,9 @@ func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { c.runningMu.Lock() defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ ID: c.nextID, Schedule: schedule, @@ -170,6 +159,7 @@ func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { } else { c.add <- entry } + return entry.ID } @@ -177,15 +167,18 @@ func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { func (c *Cron) Entries() []Entry { c.runningMu.Lock() defer c.runningMu.Unlock() + if c.running { replyChan := make(chan []Entry, 1) c.snapshot <- replyChan + return <-replyChan } + return c.entrySnapshot() } -// Location gets the time zone location +// Location gets the time zone location. func (c *Cron) Location() *time.Location { return c.location } @@ -197,6 +190,7 @@ func (c *Cron) Entry(id EntryID) Entry { return entry } } + return Entry{} } @@ -204,6 +198,7 @@ func (c *Cron) Entry(id EntryID) Entry { func (c *Cron) Remove(id EntryID) { c.runningMu.Lock() defer c.runningMu.Unlock() + if c.running { c.remove <- id } else { @@ -215,11 +210,13 @@ func (c *Cron) Remove(id EntryID) { func (c *Cron) Start() { c.runningMu.Lock() defer c.runningMu.Unlock() + if c.running { return } + c.running = true - go c.run() + go c.schedulerLoop() } // Run the cron scheduler, or no-op if already running. @@ -227,120 +224,191 @@ func (c *Cron) Run() { c.runningMu.Lock() if c.running { c.runningMu.Unlock() + return } + c.running = true c.runningMu.Unlock() - c.run() + c.schedulerLoop() +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + + if c.running { + c.stop <- struct{}{} + + c.running = false + } + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + c.jobWaiter.Wait() + cancel() + }() + + return ctx } -// run the scheduler.. this is private just due to the need to synchronize -// access to the 'running' state variable. -func (c *Cron) run() { +// schedulerLoop runs the scheduler. It remains private because access to the +// running state is coordinated by Start and Run. +func (c *Cron) schedulerLoop() { c.logger.Info("start") - // Figure out the next activation times for each entry. + now := c.initializeEntries() + + for { + sortEntriesByNext(c.entries) + + var shouldStop bool + + now, shouldStop = c.processSchedulerEvent(now) + if shouldStop { + return + } + } +} + +func sortEntriesByNext(entries []*Entry) { + slices.SortFunc(entries, compareEntryNext) +} + +func compareEntryNext(left, right *Entry) int { + leftNext := left.Next + rightNext := right.Next + + switch { + case leftNext.IsZero() && rightNext.IsZero(): + return compareEntryID(left, right) + case leftNext.IsZero(): + return 1 + case rightNext.IsZero(): + return -1 + case leftNext.Before(rightNext): + return -1 + case rightNext.Before(leftNext): + return 1 + default: + return compareEntryID(left, right) + } +} + +func compareEntryID(left, right *Entry) int { + if left.ID < right.ID { + return -1 + } + + if left.ID > right.ID { + return 1 + } + + return 0 +} + +func (c *Cron) initializeEntries() time.Time { now := c.now() for _, entry := range c.entries { entry.Next = entry.Schedule.Next(now) c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) } + return now +} + +func (c *Cron) processSchedulerEvent(now time.Time) (time.Time, bool) { + timer := c.newSchedulerTimer(now) + defer timer.Stop() + for { - // Determine the next entry to run. - sort.Sort(byTime(c.entries)) - - var timer *time.Timer - if len(c.entries) == 0 || c.entries[0].Next.IsZero() { - // If there are no entries yet, just sleep - it still handles new entries - // and stop requests. - timer = time.NewTimer(100000 * time.Hour) - } else { - timer = time.NewTimer(c.entries[0].Next.Sub(now)) + select { + case firedAt := <-timer.C: + return c.handleTimerFired(firedAt), false + case newEntry := <-c.add: + return c.handleEntryAdded(newEntry), false + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + case <-c.stop: + c.logger.Info("stop") + + return now, true + case id := <-c.remove: + return c.handleEntryRemoved(id), false } + } +} - for { - select { - case now = <-timer.C: - now = now.In(c.location) - c.logger.Info("wake", "now", now) - - // Run every entry whose next time was less than now - for _, e := range c.entries { - if e.Next.After(now) || e.Next.IsZero() { - break - } - c.startJob(e.WrappedJob) - e.Prev = e.Next - e.Next = e.Schedule.Next(now) - c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) - } - - case newEntry := <-c.add: - timer.Stop() - now = c.now() - newEntry.Next = newEntry.Schedule.Next(now) - c.entries = append(c.entries, newEntry) - c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) - - case replyChan := <-c.snapshot: - replyChan <- c.entrySnapshot() - continue - - case <-c.stop: - timer.Stop() - c.logger.Info("stop") - return - - case id := <-c.remove: - timer.Stop() - now = c.now() - c.removeEntry(id) - c.logger.Info("removed", "entry", id) - } +func (c *Cron) newSchedulerTimer(now time.Time) *time.Timer { + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + return time.NewTimer(idleTimerDuration) + } + + return time.NewTimer(c.entries[0].Next.Sub(now)) +} + +func (c *Cron) handleTimerFired(firedAt time.Time) time.Time { + now := firedAt.In(c.location) + c.logger.Info("wake", "now", now) + c.runDueEntries(now) + return now +} + +func (c *Cron) runDueEntries(now time.Time) { + for _, entry := range c.entries { + if entry.Next.After(now) || entry.Next.IsZero() { break } + + c.startJob(entry.WrappedJob) + entry.Prev = entry.Next + entry.Next = entry.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", entry.ID, "next", entry.Next) } } +func (c *Cron) handleEntryAdded(newEntry *Entry) time.Time { + now := c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + return now +} + +func (c *Cron) handleEntryRemoved(id EntryID) time.Time { + now := c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + + return now +} + // startJob runs the given job in a new goroutine. func (c *Cron) startJob(j Job) { - c.jobWaiter.Add(1) - go func() { - defer c.jobWaiter.Done() + c.jobWaiter.Go(func() { j.Run() - }() + }) } -// now returns current time in c location +// now returns current time in c location. func (c *Cron) now() time.Time { return time.Now().In(c.location) } -// Stop stops the cron scheduler if it is running; otherwise it does nothing. -// A context is returned so the caller can wait for running jobs to complete. -func (c *Cron) Stop() context.Context { - c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.stop <- struct{}{} - c.running = false - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - c.jobWaiter.Wait() - cancel() - }() - return ctx -} - // entrySnapshot returns a copy of the current cron entry list. func (c *Cron) entrySnapshot() []Entry { - var entries = make([]Entry, len(c.entries)) + entries := make([]Entry, len(c.entries)) for i, e := range c.entries { entries[i] = *e } + return entries } @@ -351,5 +419,6 @@ func (c *Cron) removeEntry(id EntryID) { entries = append(entries, e) } } + c.entries = entries } diff --git a/cron_test.go b/cron_test.go index 36f06bf..5296c15 100644 --- a/cron_test.go +++ b/cron_test.go @@ -2,6 +2,7 @@ package cron import ( "bytes" + "context" "fmt" "log" "strings" @@ -16,21 +17,45 @@ import ( // compensate for a few milliseconds of runtime. const OneSecond = 1*time.Second + 50*time.Millisecond +const ( + everySecondSpec = "* * * * * ?" + everySecondWithSeconds = "* * * * * *" + januaryFirstSpec = "0 0 0 1 1 ?" + decemberThirtyFirstSpec = "0 0 0 31 12 ?" + invalidFebruarySpec = "0 0 0 30 Feb ?" + januaryFirstOffsetSpec = "1 0 0 1 1 ?" + secondsBoundaryThreshold = 58 + + expectedTwoFiringsMessage = "expected job fires 2 times" + wrongJobRetrievedMessage = "wrong job retrieved:" + stopContextDoneMessage = "context was not done immediately" + + slowStopJobDelay = 2 * time.Second + waitForStopCheck = 750 * time.Millisecond + waitForStopCompletion = 1500 * time.Millisecond +) + type syncWriter struct { wr bytes.Buffer m sync.Mutex } -func (sw *syncWriter) Write(data []byte) (n int, err error) { +func (sw *syncWriter) Write(data []byte) (int, error) { sw.m.Lock() - n, err = sw.wr.Write(data) + writtenBytes, err := sw.wr.Write(data) sw.m.Unlock() - return + + if err != nil { + return writtenBytes, fmt.Errorf("write sync buffer: %w", err) + } + + return writtenBytes, nil } func (sw *syncWriter) String() string { sw.m.Lock() defer sw.m.Unlock() + return sw.wr.String() } @@ -39,51 +64,59 @@ func newBufLogger(sw *syncWriter) Logger { } func TestFuncPanicRecovery(t *testing.T) { + t.Parallel() + var buf syncWriter - cron := New(WithParser(secondParser), + + cron := New(WithParser(testParserWithSeconds()), WithChain(Recover(newBufLogger(&buf)))) + cron.Start() defer cron.Stop() - cron.AddFunc("* * * * * ?", func() { + + mustAddFunc(t, cron, everySecondSpec, func() { panic("YOLO") }) - select { - case <-time.After(OneSecond): - if !strings.Contains(buf.String(), "YOLO") { - t.Error("expected a panic to be logged, got none") - } - return + time.Sleep(OneSecond) + + if !strings.Contains(buf.String(), "YOLO") { + t.Error("expected a panic to be logged, got none") } } type DummyJob struct{} -func (d DummyJob) Run() { +func (DummyJob) Run() { panic("YOLO") } func TestJobPanicRecovery(t *testing.T) { + t.Parallel() + var job DummyJob var buf syncWriter - cron := New(WithParser(secondParser), + + cron := New(WithParser(testParserWithSeconds()), WithChain(Recover(newBufLogger(&buf)))) + cron.Start() defer cron.Stop() - cron.AddJob("* * * * * ?", job) - select { - case <-time.After(OneSecond): - if !strings.Contains(buf.String(), "YOLO") { - t.Error("expected a panic to be logged, got none") - } - return + mustAddJob(t, cron, everySecondSpec, job) + + time.Sleep(OneSecond) + + if !strings.Contains(buf.String(), "YOLO") { + t.Error("expected a panic to be logged, got none") } } // Start and stop cron with no entries. func TestNoEntries(t *testing.T) { + t.Parallel() + cron := newWithSeconds() cron.Start() @@ -96,13 +129,15 @@ func TestNoEntries(t *testing.T) { // Start, stop, then add an entry. Verify entry doesn't run. func TestStopCausesJobsToNotRun(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() cron.Start() cron.Stop() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) select { case <-time.After(OneSecond): @@ -114,11 +149,14 @@ func TestStopCausesJobsToNotRun(t *testing.T) { // Add a job, start cron, expect it runs. func TestAddBeforeRunning(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + cron.Start() defer cron.Stop() @@ -132,13 +170,17 @@ func TestAddBeforeRunning(t *testing.T) { // Start cron, add a job, expect it runs. func TestAddWhileRunning(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() + cron.Start() defer cron.Stop() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) select { case <-time.After(OneSecond): @@ -147,16 +189,23 @@ func TestAddWhileRunning(t *testing.T) { } } -// Test for #34. Adding a job after calling start results in multiple job invocations +// Test for #34. Adding a job after calling start results in multiple job invocations. func TestAddWhileRunningWithDelay(t *testing.T) { + t.Parallel() + cron := newWithSeconds() + cron.Start() defer cron.Stop() + time.Sleep(5 * time.Second) + var calls int64 - cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) }) + + mustAddFunc(t, cron, everySecondWithSeconds, func() { atomic.AddInt64(&calls, 1) }) <-time.After(OneSecond) + if atomic.LoadInt64(&calls) != 1 { t.Errorf("called %d times, expected 1\n", calls) } @@ -164,12 +213,15 @@ func TestAddWhileRunningWithDelay(t *testing.T) { // Add a job, remove a job, start cron, expect nothing runs. func TestRemoveBeforeRunning(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() - id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() }) + id := mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) cron.Remove(id) + cron.Start() defer cron.Stop() @@ -183,13 +235,17 @@ func TestRemoveBeforeRunning(t *testing.T) { // Start cron, add a job, remove it, expect it doesn't run. func TestRemoveWhileRunning(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() + cron.Start() defer cron.Stop() - id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + id := mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) cron.Remove(id) select { @@ -201,19 +257,20 @@ func TestRemoveWhileRunning(t *testing.T) { // Test timing with Entries. func TestSnapshotEntries(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := New() - cron.AddFunc("@every 2s", func() { wg.Done() }) + mustAddFunc(t, cron, "@every 2s", func() { wg.Done() }) + cron.Start() defer cron.Stop() // Cron should fire in 2 seconds. After 1 second, call Entries. - select { - case <-time.After(OneSecond): - cron.Entries() - } + time.Sleep(OneSecond) + cron.Entries() // Even though Entries was called, the cron should fire at the 2 second mark. select { @@ -228,19 +285,22 @@ func TestSnapshotEntries(t *testing.T) { // that the immediate entry runs immediately. // Also: Test that multiple jobs run in the same instant. func TestMultipleEntries(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(2) cron := newWithSeconds() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - id1, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() }) - id2, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() }) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, januaryFirstSpec, func() {}) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + id1 := mustAddFunc(t, cron, everySecondSpec, func() { t.Fatal() }) + id2 := mustAddFunc(t, cron, everySecondSpec, func() { t.Fatal() }) + mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) cron.Remove(id1) cron.Start() + cron.Remove(id2) defer cron.Stop() @@ -253,32 +313,36 @@ func TestMultipleEntries(t *testing.T) { // Test running the same job twice. func TestRunningJobTwice(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(2) cron := newWithSeconds() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, januaryFirstSpec, func() {}) + mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) cron.Start() defer cron.Stop() select { case <-time.After(2 * OneSecond): - t.Error("expected job fires 2 times") + t.Error(expectedTwoFiringsMessage) case <-wait(wg): } } func TestRunningMultipleSchedules(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(2) cron := newWithSeconds() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, januaryFirstSpec, func() {}) + mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) cron.Schedule(Every(time.Minute), FuncJob(func() {})) cron.Schedule(Every(time.Second), FuncJob(func() { wg.Done() })) cron.Schedule(Every(time.Hour), FuncJob(func() {})) @@ -288,13 +352,15 @@ func TestRunningMultipleSchedules(t *testing.T) { select { case <-time.After(2 * OneSecond): - t.Error("expected job fires 2 times") + t.Error(expectedTwoFiringsMessage) case <-wait(wg): } } // Test that the cron is run in the local time zone (as opposed to UTC). func TestLocalTimezone(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(2) @@ -302,55 +368,59 @@ func TestLocalTimezone(t *testing.T) { // FIX: Issue #205 // This calculation doesn't work in seconds 58 or 59. // Take the easy way out and sleep. - if now.Second() >= 58 { + if now.Second() >= secondsBoundaryThreshold { time.Sleep(2 * time.Second) + now = time.Now() } + spec := fmt.Sprintf("%d,%d %d %d %d %d ?", now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) cron := newWithSeconds() - cron.AddFunc(spec, func() { wg.Done() }) + mustAddFunc(t, cron, spec, func() { wg.Done() }) + cron.Start() defer cron.Stop() select { case <-time.After(OneSecond * 2): - t.Error("expected job fires 2 times") + t.Error(expectedTwoFiringsMessage) case <-wait(wg): } } // Test that the cron is run in the given time zone (as opposed to local). func TestNonLocalTimezone(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(2) - loc, err := time.LoadLocation("Atlantic/Cape_Verde") - if err != nil { - fmt.Printf("Failed to load time zone Atlantic/Cape_Verde: %+v", err) - t.Fail() - } + loc := mustLoadLocation(t, "Atlantic/Cape_Verde") now := time.Now().In(loc) // FIX: Issue #205 // This calculation doesn't work in seconds 58 or 59. // Take the easy way out and sleep. - if now.Second() >= 58 { + if now.Second() >= secondsBoundaryThreshold { time.Sleep(2 * time.Second) + now = time.Now().In(loc) } + spec := fmt.Sprintf("%d,%d %d %d %d %d ?", now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) - cron := New(WithLocation(loc), WithParser(secondParser)) - cron.AddFunc(spec, func() { wg.Done() }) + cron := New(WithLocation(loc), WithParser(testParserWithSeconds())) + mustAddFunc(t, cron, spec, func() { wg.Done() }) + cron.Start() defer cron.Stop() select { case <-time.After(OneSecond * 2): - t.Error("expected job fires 2 times") + t.Error(expectedTwoFiringsMessage) case <-wait(wg): } } @@ -358,6 +428,8 @@ func TestNonLocalTimezone(t *testing.T) { // Test that calling stop before start silently returns without // blocking the stop channel. func TestStopWithoutStart(t *testing.T) { + t.Parallel() + cron := New() cron.Stop() } @@ -371,29 +443,35 @@ func (t testJob) Run() { t.wg.Done() } -// Test that adding an invalid job spec returns an error +// Test that adding an invalid job spec returns an error. func TestInvalidJobSpec(t *testing.T) { + t.Parallel() + cron := New() + _, err := cron.AddJob("this will not parse", nil) if err == nil { - t.Errorf("expected an error with invalid spec, got nil") + t.Error("expected an error with invalid spec, got nil") } } -// Test blocking run method behaves as Start() +// Test blocking run method behaves as Start(). func TestBlockingRun(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) + mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) - var unblockChan = make(chan struct{}) + unblockChan := make(chan struct{}) go func() { cron.Run() close(unblockChan) }() + defer cron.Stop() select { @@ -405,12 +483,14 @@ func TestBlockingRun(t *testing.T) { } } -// Test that double-running is a no-op +// Test that double-running is a no-op. func TestStartNoop(t *testing.T) { - var tickChan = make(chan struct{}, 2) + t.Parallel() + + tickChan := make(chan struct{}, 2) cron := newWithSeconds() - cron.AddFunc("* * * * * ?", func() { + mustAddFunc(t, cron, everySecondSpec, func() { tickChan <- struct{}{} }) @@ -434,23 +514,26 @@ func TestStartNoop(t *testing.T) { // Simple test using Runnables. func TestJob(t *testing.T) { + t.Parallel() + wg := &sync.WaitGroup{} wg.Add(1) cron := newWithSeconds() - cron.AddJob("0 0 0 30 Feb ?", testJob{wg, "job0"}) - cron.AddJob("0 0 0 1 1 ?", testJob{wg, "job1"}) - job2, _ := cron.AddJob("* * * * * ?", testJob{wg, "job2"}) - cron.AddJob("1 0 0 1 1 ?", testJob{wg, "job3"}) + mustAddJob(t, cron, invalidFebruarySpec, testJob{wg, "job0"}) + mustAddJob(t, cron, januaryFirstSpec, testJob{wg, "job1"}) + job2 := mustAddJob(t, cron, everySecondSpec, testJob{wg, "job2"}) + mustAddJob(t, cron, januaryFirstOffsetSpec, testJob{wg, "job3"}) cron.Schedule(Every(5*time.Second+5*time.Nanosecond), testJob{wg, "job4"}) job5 := cron.Schedule(Every(5*time.Minute), testJob{wg, "job5"}) // Test getting an Entry pre-Start. - if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" { - t.Error("wrong job retrieved:", actualName) + if actualName := requireTestJobName(t, cron, job2); actualName != "job2" { + t.Error(wrongJobRetrievedMessage, actualName) } - if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" { - t.Error("wrong job retrieved:", actualName) + + if actualName := requireTestJobName(t, cron, job5); actualName != "job5" { + t.Error(wrongJobRetrievedMessage, actualName) } cron.Start() @@ -465,9 +548,11 @@ func TestJob(t *testing.T) { // Ensure the entries are in the right order. expecteds := []string{"job2", "job4", "job5", "job1", "job3", "job0"} - var actuals []string - for _, entry := range cron.Entries() { - actuals = append(actuals, entry.Job.(testJob).name) + entries := cron.Entries() + + actuals := make([]string, 0, len(entries)) + for _, entry := range entries { + actuals = append(actuals, requireType[testJob](t, entry.Job).name) } for i, expected := range expecteds { @@ -477,19 +562,25 @@ func TestJob(t *testing.T) { } // Test getting Entries. - if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" { - t.Error("wrong job retrieved:", actualName) + if actualName := requireTestJobName(t, cron, job2); actualName != "job2" { + t.Error(wrongJobRetrievedMessage, actualName) } - if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" { - t.Error("wrong job retrieved:", actualName) + + if actualName := requireTestJobName(t, cron, job5); actualName != "job5" { + t.Error(wrongJobRetrievedMessage, actualName) } } // Issue #206 // Ensure that the next run of a job after removing an entry is accurate. func TestScheduleAfterRemoval(t *testing.T) { - var wg1 sync.WaitGroup - var wg2 sync.WaitGroup + t.Parallel() + + var ( + wg1 sync.WaitGroup + wg2 sync.WaitGroup + ) + wg1.Add(1) wg2.Add(1) @@ -497,27 +588,33 @@ func TestScheduleAfterRemoval(t *testing.T) { // 750ms later. Correct behavior would be to still run the job again in // 250ms, but the bug would cause it to run instead 1s later. - var calls int - var mu sync.Mutex + var ( + calls int + mu sync.Mutex + ) cron := newWithSeconds() hourJob := cron.Schedule(Every(time.Hour), FuncJob(func() {})) cron.Schedule(Every(time.Second), FuncJob(func() { mu.Lock() defer mu.Unlock() + switch calls { case 0: wg1.Done() + calls++ case 1: time.Sleep(750 * time.Millisecond) cron.Remove(hourJob) + calls++ case 2: calls++ + wg2.Done() - case 3: - panic("unexpected 3rd call") + default: + panic("unexpected extra call") } })) @@ -530,173 +627,213 @@ func TestScheduleAfterRemoval(t *testing.T) { select { case <-time.After(2 * OneSecond): - t.Error("expected job fires 2 times") + t.Error(expectedTwoFiringsMessage) case <-wait(&wg2): } } type ZeroSchedule struct{} -func (*ZeroSchedule) Next(time.Time) time.Time { +// Next always returns the zero time, which is never. +func (*ZeroSchedule) Next(_ time.Time) time.Time { return time.Time{} } -// Tests that job without time does not run +// Tests that job without time does not run. func TestJobWithZeroTimeDoesNotRun(t *testing.T) { + t.Parallel() + cron := newWithSeconds() + var calls int64 - cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) }) + + mustAddFunc(t, cron, everySecondWithSeconds, func() { atomic.AddInt64(&calls, 1) }) cron.Schedule(new(ZeroSchedule), FuncJob(func() { t.Error("expected zero task will not run") })) + cron.Start() defer cron.Stop() + <-time.After(OneSecond) + if atomic.LoadInt64(&calls) != 1 { t.Errorf("called %d times, expected 1\n", calls) } } func TestStopAndWait(t *testing.T) { - t.Run("nothing running, returns immediately", func(t *testing.T) { - cron := newWithSeconds() - cron.Start() - ctx := cron.Stop() - select { - case <-ctx.Done(): - case <-time.After(time.Millisecond): - t.Error("context was not done immediately") - } - }) - - t.Run("repeated calls to Stop", func(t *testing.T) { - cron := newWithSeconds() - cron.Start() - _ = cron.Stop() - time.Sleep(time.Millisecond) - ctx := cron.Stop() - select { - case <-ctx.Done(): - case <-time.After(time.Millisecond): - t.Error("context was not done immediately") - } - }) + t.Parallel() - t.Run("a couple fast jobs added, still returns immediately", func(t *testing.T) { - cron := newWithSeconds() - cron.AddFunc("* * * * * *", func() {}) - cron.Start() - cron.AddFunc("* * * * * *", func() {}) - cron.AddFunc("* * * * * *", func() {}) - cron.AddFunc("* * * * * *", func() {}) - time.Sleep(time.Second) - ctx := cron.Stop() - select { - case <-ctx.Done(): - case <-time.After(time.Millisecond): - t.Error("context was not done immediately") - } - }) - - t.Run("a couple fast jobs and a slow job added, waits for slow job", func(t *testing.T) { - cron := newWithSeconds() - cron.AddFunc("* * * * * *", func() {}) - cron.Start() - cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) }) - cron.AddFunc("* * * * * *", func() {}) - time.Sleep(time.Second) - - ctx := cron.Stop() - - // Verify that it is not done for at least 750ms - select { - case <-ctx.Done(): - t.Error("context was done too quickly immediately") - case <-time.After(750 * time.Millisecond): - // expected, because the job sleeping for 1 second is still running - } - - // Verify that it IS done in the next 500ms (giving 250ms buffer) - select { - case <-ctx.Done(): - // expected - case <-time.After(1500 * time.Millisecond): - t.Error("context not done after job should have completed") - } - }) - - t.Run("repeated calls to stop, waiting for completion and after", func(t *testing.T) { - cron := newWithSeconds() - cron.AddFunc("* * * * * *", func() {}) - cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) }) - cron.Start() - cron.AddFunc("* * * * * *", func() {}) - time.Sleep(time.Second) - ctx := cron.Stop() - ctx2 := cron.Stop() - - // Verify that it is not done for at least 1500ms - select { - case <-ctx.Done(): - t.Error("context was done too quickly immediately") - case <-ctx2.Done(): - t.Error("context2 was done too quickly immediately") - case <-time.After(1500 * time.Millisecond): - // expected, because the job sleeping for 2 seconds is still running - } - - // Verify that it IS done in the next 1s (giving 500ms buffer) - select { - case <-ctx.Done(): - // expected - case <-time.After(time.Second): - t.Error("context not done after job should have completed") - } - - // Verify that ctx2 is also done. - select { - case <-ctx2.Done(): - // expected - case <-time.After(time.Millisecond): - t.Error("context2 not done even though context1 is") - } - - // Verify that a new context retrieved from stop is immediately done. - ctx3 := cron.Stop() - select { - case <-ctx3.Done(): - // expected - case <-time.After(time.Millisecond): - t.Error("context not done even when cron Stop is completed") - } - - }) + t.Run("nothing running, returns immediately", testStopAndWaitNothingRunning) + t.Run("repeated calls to Stop", testStopAndWaitRepeatedCalls) + t.Run("a couple fast jobs added, still returns immediately", testStopAndWaitFastJobs) + t.Run("a couple fast jobs and a slow job added, waits for slow job", testStopAndWaitSlowJob) + t.Run("repeated calls to stop, waiting for completion and after", testStopAndWaitRepeatedWhileWaiting) } func TestMultiThreadedStartAndStop(t *testing.T) { + t.Parallel() + cron := New() go cron.Run() + time.Sleep(2 * time.Millisecond) cron.Stop() } func wait(wg *sync.WaitGroup) chan bool { ch := make(chan bool) + go func() { wg.Wait() + ch <- true }() + return ch } func stop(cron *Cron) chan bool { ch := make(chan bool) + go func() { cron.Stop() + ch <- true }() + return ch } // newWithSeconds returns a Cron with the seconds field enabled. func newWithSeconds() *Cron { - return New(WithParser(secondParser), WithChain()) + return New(WithParser(testParserWithSeconds()), WithChain()) +} + +func requireTestJobName(t *testing.T, cron *Cron, id EntryID) string { + t.Helper() + + return requireType[testJob](t, cron.Entry(id).Job).name +} + +func testStopAndWaitNothingRunning(t *testing.T) { + t.Parallel() + + cron := newWithSeconds() + cron.Start() + + requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) +} + +func testStopAndWaitRepeatedCalls(t *testing.T) { + t.Parallel() + + cron := newWithSeconds() + cron.Start() + _ = cron.Stop() + + time.Sleep(time.Millisecond) + + requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) +} + +func testStopAndWaitFastJobs(t *testing.T) { + t.Parallel() + + cron := newWithSeconds() + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + cron.Start() + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + time.Sleep(time.Second) + + requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) +} + +func testStopAndWaitSlowJob(t *testing.T) { + t.Parallel() + + slowJobStarted := make(chan struct{}, 1) + cron := newWithSeconds() + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + cron.Start() + mustAddFunc(t, cron, everySecondWithSeconds, func() { + signalJobStarted(slowJobStarted) + time.Sleep(slowStopJobDelay) + }) + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + + waitForJobStarted(t, slowJobStarted) + + ctx := cron.Stop() + requireContextPendingFor(ctx, t, waitForStopCheck, "context was done too quickly immediately") + requireContextDoneWithin(ctx, t, waitForStopCompletion, "context not done after job should have completed") +} + +func testStopAndWaitRepeatedWhileWaiting(t *testing.T) { + t.Parallel() + + slowJobStarted := make(chan struct{}, 1) + cron := newWithSeconds() + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + mustAddFunc(t, cron, everySecondWithSeconds, func() { + signalJobStarted(slowJobStarted) + time.Sleep(slowStopJobDelay) + }) + cron.Start() + mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + + waitForJobStarted(t, slowJobStarted) + + ctx := cron.Stop() + ctx2 := cron.Stop() + + select { + case <-ctx.Done(): + t.Error("context was done too quickly immediately") + case <-ctx2.Done(): + t.Error("context2 was done too quickly immediately") + case <-time.After(waitForStopCompletion): + } + + requireContextDoneWithin(ctx, t, time.Second, "context not done after job should have completed") + requireContextDoneWithin(ctx2, t, time.Millisecond, "context2 not done even though context1 is") + requireContextDoneWithin(cron.Stop(), t, time.Millisecond, "context not done even when cron Stop is completed") +} + +func requireContextDoneWithin(ctx context.Context, t *testing.T, timeout time.Duration, message string) { + t.Helper() + + select { + case <-ctx.Done(): + case <-time.After(timeout): + t.Error(message) + } +} + +func requireContextPendingFor(ctx context.Context, t *testing.T, duration time.Duration, message string) { + t.Helper() + + select { + case <-ctx.Done(): + t.Error(message) + case <-time.After(duration): + } +} + +func waitForJobStarted(t *testing.T, started <-chan struct{}) { + t.Helper() + + select { + case <-started: + case <-time.After(OneSecond): + t.Fatal("slow job did not start in time") + } +} + +func signalJobStarted(started chan<- struct{}) { + select { + case started <- struct{}{}: + default: + } } diff --git a/doc.go b/doc.go index fa5d08b..b9d00f5 100644 --- a/doc.go +++ b/doc.go @@ -1,7 +1,7 @@ /* Package cron implements a cron spec parser and job runner. -Installation +# Installation To download the specific tagged release, run: @@ -13,7 +13,7 @@ Import it in your program as: It requires Go 1.11 or later due to usage of Go Modules. -Usage +# Usage Callers may register Funcs to be invoked on a given schedule. Cron will run them in their own goroutines. @@ -36,7 +36,7 @@ them in their own goroutines. .. c.Stop() // Stop the scheduler (does not stop any jobs already running). -CRON Expression Format +# CRON Expression Format A cron expression represents a set of times, using 5 space-separated fields. @@ -54,7 +54,7 @@ Month and Day-of-week field values are case insensitive. "SUN", "Sun", and The specific interpretation of the format is based on the Cron Wikipedia page: https://en.wikipedia.org/wiki/Cron -Alternative Formats +# Alternative Formats Alternative Cron expression formats support other fields like seconds. You can implement that by creating a custom Parser as follows. @@ -73,7 +73,7 @@ parser you saw earlier, except that its seconds field is REQUIRED: That emulates Quartz, the most popular alternative Cron schedule format: http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html -Special Characters +# Special Characters Asterisk ( * ) @@ -105,7 +105,7 @@ Question mark ( ? ) Question mark may be used instead of '*' for leaving either day-of-month or day-of-week blank. -Predefined schedules +# Predefined schedules You may use one of several pre-defined schedules in place of a cron expression. @@ -117,12 +117,12 @@ You may use one of several pre-defined schedules in place of a cron expression. @daily (or @midnight) | Run once a day, midnight | 0 0 * * * @hourly | Run once an hour, beginning of hour | 0 * * * * -Intervals +# Intervals You may also schedule a job to execute at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this: - @every + @every where "duration" is a string accepted by time.ParseDuration (http://golang.org/pkg/time/#ParseDuration). @@ -134,13 +134,13 @@ Note: The interval does not take the job runtime into account. For example, if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, it will have only 2 minutes of idle time between each run. -Time zones +# Time zones By default, all interpretation and scheduling is done in the machine's local time zone (time.Local). You can specify a different time zone on construction: - cron.New( - cron.WithLocation(time.UTC)) + cron.New( + cron.WithLocation(time.UTC)) Individual cron schedules may also override the time zone they are to be interpreted in by providing an additional space-separated field at the beginning @@ -169,7 +169,7 @@ The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. Be aware that jobs scheduled during daylight-savings leap-ahead transitions will not be run! -Job Wrappers +# Job Wrappers A Cron runner may be configured with a chain of job wrappers to add cross-cutting functionality to all submitted jobs. For example, they may be used @@ -192,7 +192,7 @@ Install wrappers for individual jobs by explicitly wrapping them: cron.SkipIfStillRunning(logger), ).Then(job) -Thread safety +# Thread safety Since the Cron service runs concurrently with the calling code, some amount of care must be taken to ensure proper synchronization. @@ -200,7 +200,7 @@ care must be taken to ensure proper synchronization. All cron methods are designed to be correctly synchronized as long as the caller ensures that invocations have a clear happens-before ordering between them. -Logging +# Logging Cron defines a Logger interface that is a subset of the one defined in github.com/go-logr/logr. It has two logging levels (Info and Error), and @@ -216,16 +216,15 @@ Activate it with a one-off logger as follows: cron.WithLogger( cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) - -Implementation +# Implementation Cron entries are stored in an array, sorted by their next activation time. Cron sleeps until the next job is due to be run. Upon waking: - - it runs each entry that is active on that second - - it calculates the next run times for the jobs that were run - - it re-sorts the array of entries by next activation time. - - it goes to sleep until the soonest job. + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. */ package cron diff --git a/go.mod b/go.mod index 8c95bf4..0b45362 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ module github.com/robfig/cron/v3 -go 1.12 +go 1.26.2 diff --git a/helpers_test.go b/helpers_test.go new file mode 100644 index 0000000..0f4c4e4 --- /dev/null +++ b/helpers_test.go @@ -0,0 +1,58 @@ +package cron + +import ( + "testing" + "time" +) + +func testParserWithSeconds() Parser { + return NewParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor) +} + +func mustAddFunc(t *testing.T, cron *Cron, spec string, cmd func()) EntryID { + t.Helper() + + entryID, err := cron.AddFunc(spec, cmd) + if err != nil { + t.Fatalf("add func %q: %v", spec, err) + } + + return entryID +} + +func mustAddJob(t *testing.T, cron *Cron, spec string, job Job) EntryID { + t.Helper() + + entryID, err := cron.AddJob(spec, job) + if err != nil { + t.Fatalf("add job %q: %v", spec, err) + } + + return entryID +} + +func mustLoadLocation(t *testing.T, name string) *time.Location { + t.Helper() + + location, err := time.LoadLocation(name) + if err != nil { + t.Fatalf("load location %q: %v", name, err) + } + + return location +} + +func requireType[T any](t *testing.T, value any) T { + t.Helper() + + typedValue, ok := value.(T) + if !ok { + var zeroValue T + + t.Fatalf("unexpected type %T", value) + + return zeroValue + } + + return typedValue +} diff --git a/logger.go b/logger.go index b4efcc0..4974655 100644 --- a/logger.go +++ b/logger.go @@ -1,7 +1,7 @@ package cron import ( - "io/ioutil" + "io" "log" "os" "strings" @@ -9,51 +9,59 @@ import ( ) // DefaultLogger is used by Cron if none is specified. -var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) +func DefaultLogger() Logger { + return PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) +} // DiscardLogger can be used by callers to discard all log messages. -var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) +func DiscardLogger() Logger { + return PrintfLogger(log.New(io.Discard, "", 0)) +} // Logger is the interface used in this package for logging, so that any backend // can be plugged in. It is a subset of the github.com/go-logr/logr interface. type Logger interface { // Info logs routine messages about cron's operation. - Info(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...any) // Error logs an error condition. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) } // PrintfLogger wraps a Printf-based logger (such as the standard library "log") // into an implementation of the Logger interface which logs errors only. -func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { +func PrintfLogger(l interface{ Printf(_ string, _ ...any) }) Logger { return printfLogger{l, false} } // VerbosePrintfLogger wraps a Printf-based logger (such as the standard library // "log") into an implementation of the Logger interface which logs everything. -func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { +func VerbosePrintfLogger(l interface{ Printf(_ string, _ ...any) }) Logger { return printfLogger{l, true} } type printfLogger struct { - logger interface{ Printf(string, ...interface{}) } + logger interface{ Printf(_ string, _ ...any) } logInfo bool } -func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { +const errorLogFields = 2 + +// Info logs routine messages about cron's operation. +func (pl printfLogger) Info(msg string, keysAndValues ...any) { if pl.logInfo { keysAndValues = formatTimes(keysAndValues) pl.logger.Printf( formatString(len(keysAndValues)), - append([]interface{}{msg}, keysAndValues...)...) + append([]any{msg}, keysAndValues...)...) } } -func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { +// Error logs an error condition. +func (pl printfLogger) Error(err error, msg string, keysAndValues ...any) { keysAndValues = formatTimes(keysAndValues) pl.logger.Printf( - formatString(len(keysAndValues)+2), - append([]interface{}{msg, "error", err}, keysAndValues...)...) + formatString(len(keysAndValues)+errorLogFields), + append([]any{msg, "error", err}, keysAndValues...)...) } // formatString returns a logfmt-like format string for the number of @@ -61,26 +69,33 @@ func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{} func formatString(numKeysAndValues int) string { var sb strings.Builder sb.WriteString("%s") + if numKeysAndValues > 0 { sb.WriteString(", ") } - for i := 0; i < numKeysAndValues/2; i++ { + + for i := range numKeysAndValues / 2 { if i > 0 { sb.WriteString(", ") } + sb.WriteString("%v=%v") } + return sb.String() } // formatTimes formats any time.Time values as RFC3339. -func formatTimes(keysAndValues []interface{}) []interface{} { - var formattedArgs []interface{} +func formatTimes(keysAndValues []any) []any { + formattedArgs := make([]any, 0, len(keysAndValues)) + for _, arg := range keysAndValues { if t, ok := arg.(time.Time); ok { arg = t.Format(time.RFC3339) } + formattedArgs = append(formattedArgs, arg) } + return formattedArgs } diff --git a/option_test.go b/option_test.go index 8aef168..5739d54 100644 --- a/option_test.go +++ b/option_test.go @@ -8,6 +8,8 @@ import ( ) func TestWithLocation(t *testing.T) { + t.Parallel() + c := New(WithLocation(time.UTC)) if c.location != time.UTC { t.Errorf("expected UTC, got %v", c.location) @@ -15,7 +17,10 @@ func TestWithLocation(t *testing.T) { } func TestWithParser(t *testing.T) { - var parser = NewParser(Dow) + t.Parallel() + + parser := NewParser(Dow) + c := New(WithParser(parser)) if c.parser != parser { t.Error("expected provided parser") @@ -23,17 +28,22 @@ func TestWithParser(t *testing.T) { } func TestWithVerboseLogger(t *testing.T) { + t.Parallel() + var buf syncWriter - var logger = log.New(&buf, "", log.LstdFlags) - c := New(WithLogger(VerbosePrintfLogger(logger))) - if c.logger.(printfLogger).logger != logger { + + logger := log.New(&buf, "", log.LstdFlags) + + cron := New(WithLogger(VerbosePrintfLogger(logger))) + if requireType[printfLogger](t, cron.logger).logger != logger { t.Error("expected provided logger") } - c.AddFunc("@every 1s", func() {}) - c.Start() + mustAddFunc(t, cron, "@every 1s", func() {}) + cron.Start() time.Sleep(OneSecond) - c.Stop() + cron.Stop() + out := buf.String() if !strings.Contains(out, "schedule,") || !strings.Contains(out, "run,") { diff --git a/parser.go b/parser.go index 8da6547..1046118 100644 --- a/parser.go +++ b/parser.go @@ -1,6 +1,7 @@ package cron import ( + "errors" "fmt" "math" "strconv" @@ -8,43 +9,77 @@ import ( "time" ) -// Configuration options for creating a parser. Most options specify which +var ( + errEmptySpec = errors.New("empty spec string") + errDescriptorNotAllowed = errors.New("parser does not accept descriptors") + errMultipleOptionals = errors.New("multiple optionals may not be configured") + errUnexpectedFieldCount = errors.New("unexpected field count") + errUnknownOptional = errors.New("unknown optional field") + errTooManySlashes = errors.New("too many slashes") + errTooManyHyphens = errors.New("too many hyphens") + errRangeBelowMinimum = errors.New("beginning of range below minimum") + errRangeAboveMaximum = errors.New("end of range above maximum") + errRangeStartBeyondEnd = errors.New("beginning of range beyond end of range") + errRangeStepMustBePositive = errors.New("step of range should be a positive number") + errNegativeNumber = errors.New("negative number not allowed") + errUnrecognizedDescriptor = errors.New("unrecognized descriptor") +) + +const ( + splitPairCount = 2 + wrappedErrorWithValueFormat = "%w: %s" +) + +// ParseOption represents configuration options for creating a parser. Most options specify which // fields should be included, while others enable features. If a field is not // included the parser will assume a default value. These options do not change // the order fields are parse in. type ParseOption int const ( - Second ParseOption = 1 << iota // Seconds field, default 0 - SecondOptional // Optional seconds field, default 0 - Minute // Minutes field, default 0 - Hour // Hours field, default 0 - Dom // Day of month field, default * - Month // Month field, default * - Dow // Day of week field, default * - DowOptional // Optional day of week field, default * - Descriptor // Allow descriptors such as @monthly, @weekly, etc. + // Second field, default 0. + Second ParseOption = 1 << iota + // SecondOptional seconds field, default 0. + SecondOptional + // Minute field, default 0. + Minute + // Hour field, default 0. + Hour + // Dom - Day of month field, default *. + Dom + // Month field, default *. + Month + // Dow - Day of week field, default *. + Dow + // DowOptional - Optional day of week field, default *. + DowOptional + // Descriptor - Allow descriptors such as @monthly, @weekly, etc. + Descriptor ) -var places = []ParseOption{ - Second, - Minute, - Hour, - Dom, - Month, - Dow, +func places() []ParseOption { + return []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, + } } -var defaults = []string{ - "0", - "0", - "0", - "*", - "*", - "*", +func defaults() []string { + return []string{ + "0", + "0", + "0", + "*", + "*", + "*", + } } -// A custom Parser that can be configured. +// Parser that can be configured. type Parser struct { options ParseOption } @@ -56,100 +91,74 @@ type Parser struct { // // Examples // -// // Standard parser without descriptors -// specParser := NewParser(Minute | Hour | Dom | Month | Dow) -// sched, err := specParser.Parse("0 0 15 */3 *") -// -// // Same as above, just excludes time fields -// specParser := NewParser(Dom | Month | Dow) -// sched, err := specParser.Parse("15 */3 *") +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") // -// // Same as above, just makes Dow optional -// specParser := NewParser(Dom | Month | DowOptional) -// sched, err := specParser.Parse("15 */3") +// // Same as above, just excludes time fields +// specParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") // +// // Same as above, just makes Dow optional +// specParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") func NewParser(options ParseOption) Parser { optionals := 0 if options&DowOptional > 0 { optionals++ } + if options&SecondOptional > 0 { optionals++ } + if optionals > 1 { panic("multiple optionals may not be configured") } + return Parser{options} } +// NewStandardParser returns a Parser configured to parse standard 5-field crontab specs. +func NewStandardParser() Parser { + return NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, + ) +} + // Parse returns a new crontab schedule representing the given spec. // It returns a descriptive error if the spec is not valid. // It accepts crontab specs and features configured by NewParser. func (p Parser) Parse(spec string) (Schedule, error) { if len(spec) == 0 { - return nil, fmt.Errorf("empty spec string") + return nil, errEmptySpec } - // Extract timezone if present - var loc = time.Local - if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { - var err error - i := strings.Index(spec, " ") - eq := strings.Index(spec, "=") - if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { - return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) - } - spec = strings.TrimSpace(spec[i:]) + spec, loc, err := extractLocation(spec) + if err != nil { + return nil, err } // Handle named schedules (descriptors), if configured if strings.HasPrefix(spec, "@") { if p.options&Descriptor == 0 { - return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + return nil, fmt.Errorf(wrappedErrorWithValueFormat, errDescriptorNotAllowed, spec) } + return parseDescriptor(spec, loc) } - // Split on whitespace. - fields := strings.Fields(spec) - - // Validate & fill in any omitted or optional fields - var err error - fields, err = normalizeFields(fields, p.options) + fields, err := normalizeFields(strings.Fields(spec), p.options) if err != nil { return nil, err } - field := func(field string, r bounds) uint64 { - if err != nil { - return 0 - } - var bits uint64 - bits, err = getField(field, r) - return bits - } - - var ( - second = field(fields[0], seconds) - minute = field(fields[1], minutes) - hour = field(fields[2], hours) - dayofmonth = field(fields[3], dom) - month = field(fields[4], months) - dayofweek = field(fields[5], dow) - ) + schedule, err := parseScheduleFields(fields, loc) if err != nil { return nil, err } - return &SpecSchedule{ - Second: second, - Minute: minute, - Hour: hour, - Dom: dayofmonth, - Month: month, - Dow: dayofweek, - Location: loc, - }, nil + return schedule, nil } // normalizeFields takes a subset set of the time fields and returns the full set @@ -158,65 +167,25 @@ func (p Parser) Parse(spec string) (Schedule, error) { // As part of performing this function, it also validates that the provided // fields are compatible with the configured options. func normalizeFields(fields []string, options ParseOption) ([]string, error) { - // Validate optionals & add their field to options - optionals := 0 - if options&SecondOptional > 0 { - options |= Second - optionals++ - } - if options&DowOptional > 0 { - options |= Dow - optionals++ - } - if optionals > 1 { - return nil, fmt.Errorf("multiple optionals may not be configured") + normalizedOptions, optionalCount, err := normalizeOptions(options) + if err != nil { + return nil, err } - // Figure out how many fields we need - max := 0 - for _, place := range places { - if options&place > 0 { - max++ - } - } - min := max - optionals + fieldCounts := fieldCountBounds(normalizedOptions, optionalCount) - // Validate number of fields - if count := len(fields); count < min || count > max { - if min == max { - return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) - } - return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) - } - - // Populate the optional field if not provided - if min < max && len(fields) == min { - switch { - case options&DowOptional > 0: - fields = append(fields, defaults[5]) // TODO: improve access to default - case options&SecondOptional > 0: - fields = append([]string{defaults[0]}, fields...) - default: - return nil, fmt.Errorf("unknown optional field") - } + err = validateFieldCount(fields, fieldCounts.min, fieldCounts.max) + if err != nil { + return nil, err } - // Populate all fields not part of options with their defaults - n := 0 - expandedFields := make([]string, len(places)) - copy(expandedFields, defaults) - for i, place := range places { - if options&place > 0 { - expandedFields[i] = fields[n] - n++ - } + fields, err = populateOptionalField(fields, normalizedOptions, fieldCounts.min, fieldCounts.max) + if err != nil { + return nil, err } - return expandedFields, nil -} -var standardParser = NewParser( - Minute | Hour | Dom | Month | Dow | Descriptor, -) + return expandFields(fields, normalizedOptions), nil +} // ParseStandard returns a new crontab schedule representing the given // standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries @@ -227,7 +196,7 @@ var standardParser = NewParser( // - Standard crontab specs, e.g. "* * * * ?" // - Descriptors, e.g. "@midnight", "@every 1h30m" func ParseStandard(standardSpec string) (Schedule, error) { - return standardParser.Parse(standardSpec) + return NewStandardParser().Parse(standardSpec) } // getField returns an Int with the bits set representing all of the times that @@ -235,86 +204,280 @@ func ParseStandard(standardSpec string) (Schedule, error) { // list of "ranges". func getField(field string, r bounds) (uint64, error) { var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) for _, expr := range ranges { bit, err := getRange(expr, r) if err != nil { return bits, err } + bits |= bit } + return bits, nil } // getRange returns the bits indicated by the given expression: -// number | number "-" number [ "/" number ] +// +// number | number "-" number [ "/" number ] +// // or error parsing range. -func getRange(expr string, r bounds) (uint64, error) { - var ( - start, end, step uint - rangeAndStep = strings.Split(expr, "/") - lowAndHigh = strings.Split(rangeAndStep[0], "-") - singleDigit = len(lowAndHigh) == 1 - err error - ) +func getRange(expr string, valueBounds bounds) (uint64, error) { + stepResult, err := parseStep(expr) + if err != nil { + return 0, err + } - var extra uint64 - if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { - start = r.min - end = r.max - extra = starBit - } else { - start, err = parseIntOrName(lowAndHigh[0], r.names) - if err != nil { - return 0, err + parsedRange, singleValue, err := parseRangeExpr(stepResult.rangeExpr, valueBounds, expr) + if err != nil { + return 0, err + } + + if stepResult.hasStep && singleValue { + parsedRange.end = valueBounds.max + } + + if stepResult.hasStep && stepResult.step > 1 { + parsedRange.extra = 0 + } + + err = validateRange(parsedRange, stepResult.step, valueBounds, expr) + if err != nil { + return 0, err + } + + return getBits(parsedRange.start, parsedRange.end, stepResult.step) | parsedRange.extra, nil +} + +func normalizeOptions(options ParseOption) (ParseOption, int, error) { + optionalCount := 0 + + if options&SecondOptional > 0 { + options |= Second + optionalCount++ + } + + if options&DowOptional > 0 { + options |= Dow + optionalCount++ + } + + if optionalCount > 1 { + return 0, 0, errMultipleOptionals + } + + return options, optionalCount, nil +} + +type fieldCountRange struct { + min int + max int +} + +type stepParseResult struct { + rangeExpr string + step uint + hasStep bool +} + +func fieldCountBounds(options ParseOption, optionalCount int) fieldCountRange { + maxFields := 0 + + for _, place := range places() { + if options&place > 0 { + maxFields++ } - switch len(lowAndHigh) { - case 1: - end = start - case 2: - end, err = parseIntOrName(lowAndHigh[1], r.names) - if err != nil { - return 0, err - } - default: - return 0, fmt.Errorf("too many hyphens: %s", expr) + } + + return fieldCountRange{ + min: maxFields - optionalCount, + max: maxFields, + } +} + +func validateFieldCount(fields []string, minFields, maxFields int) error { + count := len(fields) + if count >= minFields && count <= maxFields { + return nil + } + + if minFields == maxFields { + return fmt.Errorf("%w: expected exactly %d fields, found %d: %v", errUnexpectedFieldCount, minFields, count, fields) + } + + return fmt.Errorf("%w: expected %d to %d fields, found %d: %v", errUnexpectedFieldCount, minFields, maxFields, count, fields) +} + +func extractLocation(spec string) (string, *time.Location, error) { + loc := time.Local + + if !strings.HasPrefix(spec, "TZ=") && !strings.HasPrefix(spec, "CRON_TZ=") { + return spec, loc, nil + } + + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + + loc, err := time.LoadLocation(spec[eq+1 : i]) + if err != nil { + return "", nil, fmt.Errorf("provided bad location %s: %w", spec[eq+1:i], err) + } + + return strings.TrimSpace(spec[i:]), loc, nil +} + +func parseScheduleFields(fields []string, loc *time.Location) (*SpecSchedule, error) { + second, err := getField(fields[0], secondBounds()) + if err != nil { + return nil, err + } + + minute, err := getField(fields[1], minuteBounds()) + if err != nil { + return nil, err + } + + hour, err := getField(fields[2], hourBounds()) + if err != nil { + return nil, err + } + + dayofmonth, err := getField(fields[3], dayOfMonthBounds()) + if err != nil { + return nil, err + } + + month, err := getField(fields[4], monthBounds()) + if err != nil { + return nil, err + } + + dayofweek, err := getField(fields[5], dayOfWeekBounds()) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +func populateOptionalField(fields []string, options ParseOption, minFields, maxFields int) ([]string, error) { + if minFields == maxFields || len(fields) != minFields { + return fields, nil + } + + defaultFields := defaults() + + switch { + case options&DowOptional > 0: + return append(fields, defaultFields[5]), nil + case options&SecondOptional > 0: + return append([]string{defaultFields[0]}, fields...), nil + default: + return nil, errUnknownOptional + } +} + +func expandFields(fields []string, options ParseOption) []string { + fieldIndex := 0 + expandedFields := make([]string, len(places())) + copy(expandedFields, defaults()) + + for i, place := range places() { + if options&place > 0 { + expandedFields[i] = fields[fieldIndex] + fieldIndex++ } } + return expandedFields +} + +type rangeBits struct { + start uint + end uint + extra uint64 +} + +func parseStep(expr string) (stepParseResult, error) { + rangeAndStep := strings.Split(expr, "/") switch len(rangeAndStep) { case 1: - step = 1 - case 2: - step, err = mustParseInt(rangeAndStep[1]) + return stepParseResult{rangeExpr: rangeAndStep[0], step: 1}, nil + case splitPairCount: + step, err := mustParseInt(rangeAndStep[1]) if err != nil { - return 0, err + return stepParseResult{}, err } - // Special handling: "N/step" means "N-max/step". - if singleDigit { - end = r.max - } - if step > 1 { - extra = 0 + return stepParseResult{ + rangeExpr: rangeAndStep[0], + step: step, + hasStep: true, + }, nil + default: + return stepParseResult{}, fmt.Errorf(wrappedErrorWithValueFormat, errTooManySlashes, expr) + } +} + +func parseRangeExpr(rangeExpr string, valueBounds bounds, originalExpr string) (rangeBits, bool, error) { + lowAndHigh := strings.Split(rangeExpr, "-") + singleValue := len(lowAndHigh) == 1 + + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + return rangeBits{ + start: valueBounds.min, + end: valueBounds.max, + extra: starBit, + }, singleValue, nil + } + + start, err := parseIntOrName(lowAndHigh[0], valueBounds.names) + if err != nil { + return rangeBits{}, false, err + } + + switch len(lowAndHigh) { + case 1: + return rangeBits{start: start, end: start}, true, nil + case splitPairCount: + end, err := parseIntOrName(lowAndHigh[1], valueBounds.names) + if err != nil { + return rangeBits{}, false, err } + + return rangeBits{start: start, end: end}, false, nil default: - return 0, fmt.Errorf("too many slashes: %s", expr) + return rangeBits{}, false, fmt.Errorf(wrappedErrorWithValueFormat, errTooManyHyphens, originalExpr) } +} - if start < r.min { - return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) +func validateRange(parsedRange rangeBits, step uint, valueBounds bounds, expr string) error { + if parsedRange.start < valueBounds.min { + return fmt.Errorf("%w: (%d) below minimum (%d): %s", errRangeBelowMinimum, parsedRange.start, valueBounds.min, expr) } - if end > r.max { - return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + + if parsedRange.end > valueBounds.max { + return fmt.Errorf("%w: (%d) above maximum (%d): %s", errRangeAboveMaximum, parsedRange.end, valueBounds.max, expr) } - if start > end { - return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + + if parsedRange.start > parsedRange.end { + return fmt.Errorf("%w: (%d) beyond end of range (%d): %s", errRangeStartBeyondEnd, parsedRange.start, parsedRange.end, expr) } + if step == 0 { - return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + return fmt.Errorf(wrappedErrorWithValueFormat, errRangeStepMustBePositive, expr) } - return getBits(start, end, step) | extra, nil + return nil } // parseIntOrName returns the (possibly-named) integer contained in expr. @@ -324,6 +487,7 @@ func parseIntOrName(expr string, names map[string]uint) (uint, error) { return namedInt, nil } } + return mustParseInt(expr) } @@ -331,104 +495,113 @@ func parseIntOrName(expr string, names map[string]uint) (uint, error) { func mustParseInt(expr string) (uint, error) { num, err := strconv.Atoi(expr) if err != nil { - return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + return 0, fmt.Errorf("failed to parse int from %s: %w", expr, err) } + if num < 0 { - return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + return 0, fmt.Errorf("%w: (%d): %s", errNegativeNumber, num, expr) } return uint(num), nil } -// getBits sets all bits in the range [min, max], modulo the given step size. -func getBits(min, max, step uint) uint64 { +// getBits sets all bits in the range [lower, upper], modulo the given step size. +func getBits(lower, upper, step uint) uint64 { var bits uint64 // If step is 1, use shifts. if step == 1 { - return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + return ^(math.MaxUint64 << (upper + 1)) & (math.MaxUint64 << lower) } // Else, use a simple loop. - for i := min; i <= max; i += step { + for i := lower; i <= upper; i += step { bits |= 1 << i } + return bits } -// all returns all bits within the given bounds. (plus the star bit) +// all returns all bits within the given bounds. (plus the star bit). func all(r bounds) uint64 { return getBits(r.min, r.max, 1) | starBit } // parseDescriptor returns a predefined schedule for the expression, or error if none matches. func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + secondRange := secondBounds() + minuteRange := minuteBounds() + hourRange := hourBounds() + domRange := dayOfMonthBounds() + monthRange := monthBounds() + dowRange := dayOfWeekBounds() + switch descriptor { case "@yearly", "@annually": return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: 1 << months.min, - Dow: all(dow), + Second: 1 << secondRange.min, + Minute: 1 << minuteRange.min, + Hour: 1 << hourRange.min, + Dom: 1 << domRange.min, + Month: 1 << monthRange.min, + Dow: all(dowRange), Location: loc, }, nil case "@monthly": return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: all(months), - Dow: all(dow), + Second: 1 << secondRange.min, + Minute: 1 << minuteRange.min, + Hour: 1 << hourRange.min, + Dom: 1 << domRange.min, + Month: all(monthRange), + Dow: all(dowRange), Location: loc, }, nil case "@weekly": return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: 1 << dow.min, + Second: 1 << secondRange.min, + Minute: 1 << minuteRange.min, + Hour: 1 << hourRange.min, + Dom: all(domRange), + Month: all(monthRange), + Dow: 1 << dowRange.min, Location: loc, }, nil case "@daily", "@midnight": return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: all(dow), + Second: 1 << secondRange.min, + Minute: 1 << minuteRange.min, + Hour: 1 << hourRange.min, + Dom: all(domRange), + Month: all(monthRange), + Dow: all(dowRange), Location: loc, }, nil case "@hourly": return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: all(hours), - Dom: all(dom), - Month: all(months), - Dow: all(dow), + Second: 1 << secondRange.min, + Minute: 1 << minuteRange.min, + Hour: all(hourRange), + Dom: all(domRange), + Month: all(monthRange), + Dow: all(dowRange), Location: loc, }, nil - } const every = "@every " if strings.HasPrefix(descriptor, every) { duration, err := time.ParseDuration(descriptor[len(every):]) if err != nil { - return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + return nil, fmt.Errorf("failed to parse duration %s: %w", descriptor, err) } + return Every(duration), nil } - return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) + return nil, fmt.Errorf(wrappedErrorWithValueFormat, errUnrecognizedDescriptor, descriptor) } diff --git a/parser_test.go b/parser_test.go index 41c8c52..918ec4e 100644 --- a/parser_test.go +++ b/parser_test.go @@ -7,9 +7,40 @@ import ( "time" ) -var secondParser = NewParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor) +const ( + minValueZero uint = 0 + minValueOne uint = 1 + maxValueThree uint = 3 + maxValueFour uint = 4 + valueFive uint = 5 + valueSix uint = 6 + maxValueSeven uint = 7 + + fieldValueZero = "0" + fieldValueFive = "5" + fieldValueFifteen = "15" + fieldValueWildcard = "*" + + fiveMinuteDelay = 5 * time.Minute + + failedToParseIntText = "failed to parse int from" + unexpectedErrorText = "%s => unexpected error %v" + + bitMaskZeroToFiftyNine uint64 = 0xfffffffffffffff + bitMaskZeroToTwentyFour uint64 = 0xffffff + bitMaskOneToThirtyOne uint64 = 0xfffffffe + bitMaskOneToTwelve uint64 = 0x1ffe + bitMaskZeroToSix uint64 = 0x7f + + singleBitZero uint64 = 0x1 + singleBitOne uint64 = 0x2 + alternatingBitsToFive uint64 = 0x2a + alternatingBitsToFour uint64 = 0xa +) func TestRange(t *testing.T) { + t.Parallel() + zero := uint64(0) ranges := []struct { expr string @@ -17,120 +48,143 @@ func TestRange(t *testing.T) { expected uint64 err string }{ - {"5", 0, 7, 1 << 5, ""}, - {"0", 0, 7, 1 << 0, ""}, - {"7", 0, 7, 1 << 7, ""}, - - {"5-5", 0, 7, 1 << 5, ""}, - {"5-6", 0, 7, 1<<5 | 1<<6, ""}, - {"5-7", 0, 7, 1<<5 | 1<<6 | 1<<7, ""}, - - {"5-6/2", 0, 7, 1 << 5, ""}, - {"5-7/2", 0, 7, 1<<5 | 1<<7, ""}, - {"5-7/1", 0, 7, 1<<5 | 1<<6 | 1<<7, ""}, - - {"*", 1, 3, 1<<1 | 1<<2 | 1<<3 | starBit, ""}, - {"*/2", 1, 3, 1<<1 | 1<<3, ""}, - - {"5--5", 0, 0, zero, "too many hyphens"}, - {"jan-x", 0, 0, zero, "failed to parse int from"}, - {"2-x", 1, 5, zero, "failed to parse int from"}, - {"*/-12", 0, 0, zero, "negative number"}, - {"*//2", 0, 0, zero, "too many slashes"}, - {"1", 3, 5, zero, "below minimum"}, - {"6", 3, 5, zero, "above maximum"}, - {"5-3", 3, 5, zero, "beyond end of range"}, - {"*/0", 0, 0, zero, "should be a positive number"}, + {fieldValueFive, minValueZero, maxValueSeven, 1 << valueFive, ""}, + {fieldValueZero, minValueZero, maxValueSeven, 1 << minValueZero, ""}, + {"7", minValueZero, maxValueSeven, 1 << maxValueSeven, ""}, + + {"5-5", minValueZero, maxValueSeven, 1 << valueFive, ""}, + {"5-6", minValueZero, maxValueSeven, 1< expected %v, got %v", c.expr, c.err, err) + for _, testCase := range ranges { + actual, err := getRange(testCase.expr, bounds{testCase.min, testCase.max, nil}) + if len(testCase.err) != 0 && (err == nil || !strings.Contains(err.Error(), testCase.err)) { + t.Errorf("%s => expected %v, got %v", testCase.expr, testCase.err, err) } - if len(c.err) == 0 && err != nil { - t.Errorf("%s => unexpected error %v", c.expr, err) + + if len(testCase.err) == 0 && err != nil { + t.Errorf(unexpectedErrorText, testCase.expr, err) } - if actual != c.expected { - t.Errorf("%s => expected %d, got %d", c.expr, c.expected, actual) + + if actual != testCase.expected { + t.Errorf("%s => expected %d, got %d", testCase.expr, testCase.expected, actual) } } } func TestField(t *testing.T) { + t.Parallel() + fields := []struct { expr string min, max uint expected uint64 }{ - {"5", 1, 7, 1 << 5}, - {"5,6", 1, 7, 1<<5 | 1<<6}, - {"5,6,7", 1, 7, 1<<5 | 1<<6 | 1<<7}, - {"1,5-7/2,3", 1, 7, 1<<1 | 1<<5 | 1<<7 | 1<<3}, + {fieldValueFive, minValueOne, maxValueSeven, 1 << valueFive}, + {"5,6", minValueOne, maxValueSeven, 1< expected %d, got %d", c.expr, c.expected, actual) + for _, testCase := range fields { + actual, err := getField(testCase.expr, bounds{testCase.min, testCase.max, nil}) + if err != nil { + t.Errorf(unexpectedErrorText, testCase.expr, err) + } + + if actual != testCase.expected { + t.Errorf("%s => expected %d, got %d", testCase.expr, testCase.expected, actual) } } } func TestAll(t *testing.T) { + t.Parallel() + + minuteRange := minuteBounds() + hourRange := hourBounds() + domRange := dayOfMonthBounds() + monthRange := monthBounds() + dowRange := dayOfWeekBounds() + allBits := []struct { r bounds expected uint64 }{ - {minutes, 0xfffffffffffffff}, // 0-59: 60 ones - {hours, 0xffffff}, // 0-23: 24 ones - {dom, 0xfffffffe}, // 1-31: 31 ones, 1 zero - {months, 0x1ffe}, // 1-12: 12 ones, 1 zero - {dow, 0x7f}, // 0-6: 7 ones + {minuteRange, bitMaskZeroToFiftyNine}, + {hourRange, bitMaskZeroToTwentyFour}, + {domRange, bitMaskOneToThirtyOne}, + {monthRange, bitMaskOneToTwelve}, + {dowRange, bitMaskZeroToSix}, } - for _, c := range allBits { - actual := all(c.r) // all() adds the starBit, so compensate for that.. - if c.expected|starBit != actual { + for _, testCase := range allBits { + actual := all(testCase.r) + if testCase.expected|starBit != actual { t.Errorf("%d-%d/%d => expected %b, got %b", - c.r.min, c.r.max, 1, c.expected|starBit, actual) + testCase.r.min, testCase.r.max, 1, testCase.expected|starBit, actual) } } } func TestBits(t *testing.T) { + t.Parallel() + bits := []struct { min, max, step uint expected uint64 }{ - {0, 0, 1, 0x1}, - {1, 1, 1, 0x2}, - {1, 5, 2, 0x2a}, // 101010 - {1, 4, 2, 0xa}, // 1010 + {minValueZero, minValueZero, minValueOne, singleBitZero}, + {minValueOne, minValueOne, minValueOne, singleBitOne}, + {minValueOne, valueFive, 2, alternatingBitsToFive}, + {minValueOne, maxValueFour, 2, alternatingBitsToFour}, } - for _, c := range bits { - actual := getBits(c.min, c.max, c.step) - if c.expected != actual { + for _, testCase := range bits { + actual := getBits(testCase.min, testCase.max, testCase.step) + if testCase.expected != actual { t.Errorf("%d-%d/%d => expected %b, got %b", - c.min, c.max, c.step, c.expected, actual) + testCase.min, testCase.max, testCase.step, testCase.expected, actual) } } } func TestParseScheduleErrors(t *testing.T) { - var tests = []struct{ expr, err string }{ - {"* 5 j * * *", "failed to parse int from"}, + t.Parallel() + + tests := []struct{ expr, err string }{ + {"* 5 j * * *", failedToParseIntText}, {"@every Xm", "failed to parse duration"}, {"@unrecognized", "unrecognized descriptor"}, {"* * * *", "expected 5 to 6 fields"}, {"", "empty spec string"}, } - for _, c := range tests { - actual, err := secondParser.Parse(c.expr) - if err == nil || !strings.Contains(err.Error(), c.err) { - t.Errorf("%s => expected %v, got %v", c.expr, c.err, err) + parserWithSeconds := testParserWithSeconds() + + for _, testCase := range tests { + actual, err := parserWithSeconds.Parse(testCase.expr) + if err == nil || !strings.Contains(err.Error(), testCase.err) { + t.Errorf("%s => expected %v, got %v", testCase.expr, testCase.err, err) } + if actual != nil { t.Errorf("expected nil schedule on error, got %v", actual) } @@ -138,50 +192,57 @@ func TestParseScheduleErrors(t *testing.T) { } func TestParseSchedule(t *testing.T) { - tokyo, _ := time.LoadLocation("Asia/Tokyo") + t.Parallel() + + tokyo := mustLoadLocation(t, "Asia/Tokyo") + parserWithSeconds := testParserWithSeconds() + entries := []struct { parser Parser expr string expected Schedule }{ - {secondParser, "0 5 * * * *", every5min(time.Local)}, - {standardParser, "5 * * * *", every5min(time.Local)}, - {secondParser, "CRON_TZ=UTC 0 5 * * * *", every5min(time.UTC)}, - {standardParser, "CRON_TZ=UTC 5 * * * *", every5min(time.UTC)}, - {secondParser, "CRON_TZ=Asia/Tokyo 0 5 * * * *", every5min(tokyo)}, - {secondParser, "@every 5m", ConstantDelaySchedule{5 * time.Minute}}, - {secondParser, "@midnight", midnight(time.Local)}, - {secondParser, "TZ=UTC @midnight", midnight(time.UTC)}, - {secondParser, "TZ=Asia/Tokyo @midnight", midnight(tokyo)}, - {secondParser, "@yearly", annual(time.Local)}, - {secondParser, "@annually", annual(time.Local)}, + {parserWithSeconds, "0 5 * * * *", every5min(time.Local)}, + {NewStandardParser(), "5 * * * *", every5min(time.Local)}, + {parserWithSeconds, "CRON_TZ=UTC 0 5 * * * *", every5min(time.UTC)}, + {NewStandardParser(), "CRON_TZ=UTC 5 * * * *", every5min(time.UTC)}, + {parserWithSeconds, "CRON_TZ=Asia/Tokyo 0 5 * * * *", every5min(tokyo)}, + {parserWithSeconds, "@every 5m", ConstantDelaySchedule{fiveMinuteDelay}}, + {parserWithSeconds, "@midnight", midnight(time.Local)}, + {parserWithSeconds, "TZ=UTC @midnight", midnight(time.UTC)}, + {parserWithSeconds, "TZ=Asia/Tokyo @midnight", midnight(tokyo)}, + {parserWithSeconds, "@yearly", annual(time.Local)}, + {parserWithSeconds, "@annually", annual(time.Local)}, { - parser: secondParser, + parser: parserWithSeconds, expr: "* 5 * * * *", expected: &SpecSchedule{ - Second: all(seconds), - Minute: 1 << 5, - Hour: all(hours), - Dom: all(dom), - Month: all(months), - Dow: all(dow), + Second: all(secondBounds()), + Minute: 1 << valueFive, + Hour: all(hourBounds()), + Dom: all(dayOfMonthBounds()), + Month: all(monthBounds()), + Dow: all(dayOfWeekBounds()), Location: time.Local, }, }, } - for _, c := range entries { - actual, err := c.parser.Parse(c.expr) + for _, testCase := range entries { + actual, err := testCase.parser.Parse(testCase.expr) if err != nil { - t.Errorf("%s => unexpected error %v", c.expr, err) + t.Errorf(unexpectedErrorText, testCase.expr, err) } - if !reflect.DeepEqual(actual, c.expected) { - t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + + if !reflect.DeepEqual(actual, testCase.expected) { + t.Errorf("%s => expected %b, got %b", testCase.expr, testCase.expected, actual) } } } func TestOptionalSecondSchedule(t *testing.T) { + t.Parallel() + parser := NewParser(SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor) entries := []struct { expr string @@ -192,18 +253,21 @@ func TestOptionalSecondSchedule(t *testing.T) { {"5 * * * *", every5min(time.Local)}, } - for _, c := range entries { - actual, err := parser.Parse(c.expr) + for _, testCase := range entries { + actual, err := parser.Parse(testCase.expr) if err != nil { - t.Errorf("%s => unexpected error %v", c.expr, err) + t.Errorf(unexpectedErrorText, testCase.expr, err) } - if !reflect.DeepEqual(actual, c.expected) { - t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + + if !reflect.DeepEqual(actual, testCase.expected) { + t.Errorf("%s => expected %b, got %b", testCase.expr, testCase.expected, actual) } } } func TestNormalizeFields(t *testing.T) { + t.Parallel() + tests := []struct { name string input []string @@ -212,62 +276,67 @@ func TestNormalizeFields(t *testing.T) { }{ { "AllFields_NoOptional", - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, Second | Minute | Hour | Dom | Month | Dow | Descriptor, - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, }, { "AllFields_SecondOptional_Provided", - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor, - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, }, { "AllFields_SecondOptional_NotProvided", - []string{"5", "*", "*", "*", "*"}, + []string{fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor, - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, }, { "SubsetFields_NoOptional", - []string{"5", "15", "*"}, + []string{fieldValueFive, fieldValueFifteen, fieldValueWildcard}, Hour | Dom | Month, - []string{"0", "0", "5", "15", "*", "*"}, + []string{fieldValueZero, fieldValueZero, fieldValueFive, fieldValueFifteen, fieldValueWildcard, fieldValueWildcard}, }, { "SubsetFields_DowOptional_Provided", - []string{"5", "15", "*", "4"}, + []string{fieldValueFive, fieldValueFifteen, fieldValueWildcard, "4"}, Hour | Dom | Month | DowOptional, - []string{"0", "0", "5", "15", "*", "4"}, + []string{fieldValueZero, fieldValueZero, fieldValueFive, fieldValueFifteen, fieldValueWildcard, "4"}, }, { "SubsetFields_DowOptional_NotProvided", - []string{"5", "15", "*"}, + []string{fieldValueFive, fieldValueFifteen, fieldValueWildcard}, Hour | Dom | Month | DowOptional, - []string{"0", "0", "5", "15", "*", "*"}, + []string{fieldValueZero, fieldValueZero, fieldValueFive, fieldValueFifteen, fieldValueWildcard, fieldValueWildcard}, }, { "SubsetFields_SecondOptional_NotProvided", - []string{"5", "15", "*"}, + []string{fieldValueFive, fieldValueFifteen, fieldValueWildcard}, SecondOptional | Hour | Dom | Month, - []string{"0", "0", "5", "15", "*", "*"}, + []string{fieldValueZero, fieldValueZero, fieldValueFive, fieldValueFifteen, fieldValueWildcard, fieldValueWildcard}, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual, err := normalizeFields(test.input, test.options) + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + actual, err := normalizeFields(testCase.input, testCase.options) if err != nil { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(actual, test.expected) { - t.Errorf("expected %v, got %v", test.expected, actual) + + if !reflect.DeepEqual(actual, testCase.expected) { + t.Errorf("expected %v, got %v", testCase.expected, actual) } }) } } func TestNormalizeFields_Errors(t *testing.T) { + t.Parallel() + tests := []struct { name string input []string @@ -276,7 +345,7 @@ func TestNormalizeFields_Errors(t *testing.T) { }{ { "TwoOptionals", - []string{"0", "5", "*", "*", "*", "*"}, + []string{fieldValueZero, fieldValueFive, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard, fieldValueWildcard}, SecondOptional | Minute | Hour | Dom | Month | DowOptional, "", }, @@ -299,36 +368,49 @@ func TestNormalizeFields_Errors(t *testing.T) { "", }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual, err := normalizeFields(test.input, test.options) + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + actual, err := normalizeFields(testCase.input, testCase.options) if err == nil { t.Errorf("expected an error, got none. results: %v", actual) } - if !strings.Contains(err.Error(), test.err) { - t.Errorf("expected error %q, got %q", test.err, err.Error()) + + if !strings.Contains(err.Error(), testCase.err) { + t.Errorf("expected error %q, got %q", testCase.err, err.Error()) } }) } } func TestStandardSpecSchedule(t *testing.T) { + t.Parallel() + entries := []struct { expr string expected Schedule err string }{ { - expr: "5 * * * *", - expected: &SpecSchedule{1 << seconds.min, 1 << 5, all(hours), all(dom), all(months), all(dow), time.Local}, + expr: "5 * * * *", + expected: &SpecSchedule{ + 1 << secondBounds().min, + 1 << valueFive, + all(hourBounds()), + all(dayOfMonthBounds()), + all(monthBounds()), + all(dayOfWeekBounds()), + time.Local, + }, }, { expr: "@every 5m", - expected: ConstantDelaySchedule{time.Duration(5) * time.Minute}, + expected: ConstantDelaySchedule{fiveMinuteDelay}, }, { expr: "5 j * * *", - err: "failed to parse int from", + err: failedToParseIntText, }, { expr: "* * * *", @@ -336,22 +418,27 @@ func TestStandardSpecSchedule(t *testing.T) { }, } - for _, c := range entries { - actual, err := ParseStandard(c.expr) - if len(c.err) != 0 && (err == nil || !strings.Contains(err.Error(), c.err)) { - t.Errorf("%s => expected %v, got %v", c.expr, c.err, err) + for _, testCase := range entries { + actual, err := ParseStandard(testCase.expr) + if len(testCase.err) != 0 && (err == nil || !strings.Contains(err.Error(), testCase.err)) { + t.Errorf("%s => expected %v, got %v", testCase.expr, testCase.err, err) } - if len(c.err) == 0 && err != nil { - t.Errorf("%s => unexpected error %v", c.expr, err) + + if len(testCase.err) == 0 && err != nil { + t.Errorf(unexpectedErrorText, testCase.expr, err) } - if !reflect.DeepEqual(actual, c.expected) { - t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + + if !reflect.DeepEqual(actual, testCase.expected) { + t.Errorf("%s => expected %b, got %b", testCase.expr, testCase.expected, actual) } } } func TestNoDescriptorParser(t *testing.T) { + t.Parallel() + parser := NewParser(Minute | Hour) + _, err := parser.Parse("@every 1m") if err == nil { t.Error("expected an error, got none") @@ -359,25 +446,41 @@ func TestNoDescriptorParser(t *testing.T) { } func every5min(loc *time.Location) *SpecSchedule { - return &SpecSchedule{1 << 0, 1 << 5, all(hours), all(dom), all(months), all(dow), loc} + return &SpecSchedule{ + 1 << secondBounds().min, + 1 << valueFive, + all(hourBounds()), + all(dayOfMonthBounds()), + all(monthBounds()), + all(dayOfWeekBounds()), + loc, + } } func every5min5s(loc *time.Location) *SpecSchedule { - return &SpecSchedule{1 << 5, 1 << 5, all(hours), all(dom), all(months), all(dow), loc} + return &SpecSchedule{ + 1 << valueFive, + 1 << valueFive, + all(hourBounds()), + all(dayOfMonthBounds()), + all(monthBounds()), + all(dayOfWeekBounds()), + loc, + } } func midnight(loc *time.Location) *SpecSchedule { - return &SpecSchedule{1, 1, 1, all(dom), all(months), all(dow), loc} + return &SpecSchedule{1, 1, 1, all(dayOfMonthBounds()), all(monthBounds()), all(dayOfWeekBounds()), loc} } func annual(loc *time.Location) *SpecSchedule { return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: 1 << months.min, - Dow: all(dow), + Second: 1 << secondBounds().min, + Minute: 1 << minuteBounds().min, + Hour: 1 << hourBounds().min, + Dom: 1 << dayOfMonthBounds().min, + Month: 1 << monthBounds().min, + Dow: all(dayOfWeekBounds()), Location: loc, } } diff --git a/spec.go b/spec.go index fa1e241..1563cb7 100644 --- a/spec.go +++ b/spec.go @@ -17,172 +17,341 @@ type bounds struct { names map[string]uint } -// The bounds for each field. -var ( - seconds = bounds{0, 59, nil} - minutes = bounds{0, 59, nil} - hours = bounds{0, 23, nil} - dom = bounds{1, 31, nil} - months = bounds{1, 12, map[string]uint{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - }} - dow = bounds{0, 6, map[string]uint{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - }} -) - const ( // Set the top bit if a star was included in the expression. starBit = 1 << 63 + + middayHour = 12 + hoursPerDay = 24 + yearSearchSpan = 5 + + minSecondValue uint = 0 + maxSecondValue uint = 59 + minMinuteValue uint = 0 + maxMinuteValue uint = 59 + minHourValue uint = 0 + maxHourValue uint = 23 + minDayOfMonthValue uint = 1 + maxDayOfMonthValue uint = 31 + minMonthValue uint = 1 + maxMonthValue uint = 12 + minDayOfWeekValue uint = 0 + maxDayOfWeekValue uint = 6 + + monthJanuaryValue uint = 1 + monthFebruaryValue uint = 2 + monthMarchValue uint = 3 + monthAprilValue uint = 4 + monthMayValue uint = 5 + monthJuneValue uint = 6 + monthJulyValue uint = 7 + monthAugustValue uint = 8 + monthSeptemberValue uint = 9 + monthOctoberValue uint = 10 + monthNovemberValue uint = 11 + monthDecemberValue uint = 12 + + weekdaySundayValue uint = 0 + weekdayMondayValue uint = 1 + weekdayTuesdayValue uint = 2 + weekdayWednesdayValue uint = 3 + weekdayThursdayValue uint = 4 + weekdayFridayValue uint = 5 + weekdaySaturdayValue uint = 6 ) +func secondBounds() bounds { + return bounds{minSecondValue, maxSecondValue, nil} +} + +func minuteBounds() bounds { + return bounds{minMinuteValue, maxMinuteValue, nil} +} + +func hourBounds() bounds { + return bounds{minHourValue, maxHourValue, nil} +} + +func dayOfMonthBounds() bounds { + return bounds{minDayOfMonthValue, maxDayOfMonthValue, nil} +} + +func monthBounds() bounds { + return bounds{minMonthValue, maxMonthValue, map[string]uint{ + "jan": monthJanuaryValue, + "feb": monthFebruaryValue, + "mar": monthMarchValue, + "apr": monthAprilValue, + "may": monthMayValue, + "jun": monthJuneValue, + "jul": monthJulyValue, + "aug": monthAugustValue, + "sep": monthSeptemberValue, + "oct": monthOctoberValue, + "nov": monthNovemberValue, + "dec": monthDecemberValue, + }} +} + +func dayOfWeekBounds() bounds { + return bounds{minDayOfWeekValue, maxDayOfWeekValue, map[string]uint{ + "sun": weekdaySundayValue, + "mon": weekdayMondayValue, + "tue": weekdayTuesdayValue, + "wed": weekdayWednesdayValue, + "thu": weekdayThursdayValue, + "fri": weekdayFridayValue, + "sat": weekdaySaturdayValue, + }} +} + // Next returns the next time this schedule is activated, greater than the given -// time. If no time can be found to satisfy the schedule, return the zero time. -func (s *SpecSchedule) Next(t time.Time) time.Time { - // General approach - // - // For Month, Day, Hour, Minute, Second: - // Check if the time value matches. If yes, continue to the next field. - // If the field doesn't match the schedule, then increment the field until it matches. - // While incrementing the field, a wrap-around brings it back to the beginning - // of the field list (since it is necessary to re-verify previous field - // values) - - // Convert the given time into the schedule's timezone, if one is specified. - // Save the original timezone so we can convert back after we find a time. - // Note that schedules without a time zone specified (time.Local) are treated - // as local to the time provided. - origLocation := t.Location() - loc := s.Location - if loc == time.Local { - loc = t.Location() - } - if s.Location != time.Local { - t = t.In(s.Location) +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(candidate time.Time) time.Time { + origLocation := candidate.Location() + + nextActivation, ok := s.nextActivation(candidate) + if !ok { + return time.Time{} } - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + return nextActivation.In(origLocation) +} - // This flag indicates whether a field has been incremented. - added := false +type nextActivationState struct { + schedule *SpecSchedule + added bool + location *time.Location + yearLimit int +} - // If no time is found within five years, return zero. - yearLimit := t.Year() + 5 +func (s *SpecSchedule) nextActivation(candidate time.Time) (time.Time, bool) { + candidate, location := s.prepareNext(candidate) -WRAP: - if t.Year() > yearLimit { - return time.Time{} + state := nextActivationState{ + schedule: s, + location: location, + yearLimit: candidate.Year() + yearSearchSpan, } - // Find the first applicable month. - // If it's this month, then do nothing. - for 1< 12 { - t = t.Add(time.Duration(24-t.Hour()) * time.Hour) - } else { - t = t.Add(time.Duration(-t.Hour()) * time.Hour) - } - } + return time.Time{}, false +} - if t.Day() == 1 { - goto WRAP - } +func (state *nextActivationState) advance(candidate time.Time) (time.Time, bool) { + steps := [...]func(time.Time) (time.Time, bool){ + func(candidate time.Time) (time.Time, bool) { + return state.schedule.advanceMonth(candidate, &state.added, state.location) + }, + func(candidate time.Time) (time.Time, bool) { + return state.schedule.advanceDay(candidate, &state.added, state.location) + }, + func(candidate time.Time) (time.Time, bool) { + return state.schedule.advanceHour(candidate, &state.added, state.location) + }, + func(candidate time.Time) (time.Time, bool) { + return state.schedule.advanceMinute(candidate, &state.added) + }, + func(candidate time.Time) (time.Time, bool) { + return state.schedule.advanceSecond(candidate, &state.added) + }, } - for 1< middayHour { + return candidate.Add(time.Duration(hoursPerDay-candidate.Hour()) * time.Hour) + } + + return candidate.Add(time.Duration(-candidate.Hour()) * time.Hour) } // dayMatches returns true if the schedule's day-of-week and day-of-month // restrictions are satisfied by the given time. -func dayMatches(s *SpecSchedule, t time.Time) bool { +func dayMatches(s *SpecSchedule, candidate time.Time) bool { var ( - domMatch bool = 1< 0 - dowMatch bool = 1< 0 + domMatch = hasBit(s.Dom, candidate.Day()) + dowMatch = hasBit(s.Dow, int(candidate.Weekday())) ) if s.Dom&starBit > 0 || s.Dow&starBit > 0 { return domMatch && dowMatch } + return domMatch || dowMatch } + +func hasBit(bitset uint64, position int) bool { + return uint64(1)< 0 +} diff --git a/spec_test.go b/spec_test.go index 1b8a503..16a8894 100644 --- a/spec_test.go +++ b/spec_test.go @@ -6,7 +6,39 @@ import ( "time" ) +const ( + sundayJulyFifteenthMidnight = "Sun Jul 15 00:00 2012" + julyNinthThreePM = "Mon Jul 9 15:00 2012" + julyNinthLateNight = "Mon Jul 9 23:35 2012" + julyNinthLateNightSeconds = "Mon Jul 9 23:35:51 2012" + + newYorkSpringMidnight = "2012-03-11T00:00:00-0500" + newYorkSpringOneAM = "2012-03-11T01:00:00-0500" + newYorkSpringThreeAM = "2012-03-11T03:00:00-0400" + newYorkSpringFourAM = "2012-03-11T04:00:00-0400" + + newYorkHourlySpec = "TZ=America/New_York 0 0 * * * ?" + newYorkCronHourlySpec = "CRON_TZ=America/New_York 0 0 * * * ?" + newYorkOneAMSpec = "TZ=America/New_York 0 0 1 * * ?" + + newYorkFallMidnight = "2012-11-04T00:00:00-0400" + newYorkFallOneAMEDT = "2012-11-04T01:00:00-0400" + newYorkFallOneAMEST = "2012-11-04T01:00:00-0500" + newYorkFallTwoAMEST = "2012-11-04T02:00:00-0500" + + inputNewYorkFallMidnight = "TZ=America/New_York 2012-11-04T00:00:00-0400" + kolkataExpectedTime = "2016-01-03T14:14:00+0530" +) + +type nextRunTestCase struct { + time string + spec string + expected string +} + func TestActivation(t *testing.T) { + t.Parallel() + tests := []struct { time, spec string expected bool @@ -32,9 +64,9 @@ func TestActivation(t *testing.T) { {"Mon Jul 16 08:30 2012", "30 08 15 Jul ?", false}, // Predefined schedules - {"Mon Jul 9 15:00 2012", "@hourly", true}, + {julyNinthThreePM, "@hourly", true}, {"Mon Jul 9 15:04 2012", "@hourly", false}, - {"Mon Jul 9 15:00 2012", "@daily", false}, + {julyNinthThreePM, "@daily", false}, {"Mon Jul 9 00:00 2012", "@daily", true}, {"Mon Jul 9 00:00 2012", "@weekly", false}, {"Sun Jul 8 00:00 2012", "@weekly", true}, @@ -44,162 +76,64 @@ func TestActivation(t *testing.T) { // Test interaction of DOW and DOM. // If both are restricted, then only one needs to match. - {"Sun Jul 15 00:00 2012", "* * 1,15 * Sun", true}, + {sundayJulyFifteenthMidnight, "* * 1,15 * Sun", true}, {"Fri Jun 15 00:00 2012", "* * 1,15 * Sun", true}, {"Wed Aug 1 00:00 2012", "* * 1,15 * Sun", true}, - {"Sun Jul 15 00:00 2012", "* * */10 * Sun", true}, // verifies #70 + {sundayJulyFifteenthMidnight, "* * */10 * Sun", true}, // verifies #70 // However, if one has a star, then both need to match. - {"Sun Jul 15 00:00 2012", "* * * * Mon", false}, + {sundayJulyFifteenthMidnight, "* * * * Mon", false}, {"Mon Jul 9 00:00 2012", "* * 1,15 * *", false}, - {"Sun Jul 15 00:00 2012", "* * 1,15 * *", true}, - {"Sun Jul 15 00:00 2012", "* * */2 * Sun", true}, + {sundayJulyFifteenthMidnight, "* * 1,15 * *", true}, + {sundayJulyFifteenthMidnight, "* * */2 * Sun", true}, } - for _, test := range tests { - sched, err := ParseStandard(test.spec) + for _, testCase := range tests { + sched, err := ParseStandard(testCase.spec) if err != nil { t.Error(err) + continue } - actual := sched.Next(getTime(test.time).Add(-1 * time.Second)) - expected := getTime(test.time) - if test.expected && expected != actual || !test.expected && expected == actual { + + actual := sched.Next(getTime(testCase.time).Add(-1 * time.Second)) + + expected := getTime(testCase.time) + if testCase.expected && expected != actual || !testCase.expected && expected.Equal(actual) { t.Errorf("Fail evaluating %s on %s: (expected) %s != %s (actual)", - test.spec, test.time, expected, actual) + testCase.spec, testCase.time, expected, actual) } } } func TestNext(t *testing.T) { - runs := []struct { - time, spec string - expected string - }{ - // Simple cases - {"Mon Jul 9 14:45 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, - - // Wrap around hours - {"Mon Jul 9 15:45 2012", "0 20-35/15 * * * *", "Mon Jul 9 16:20 2012"}, - - // Wrap around days - {"Mon Jul 9 23:46 2012", "0 */15 * * * *", "Tue Jul 10 00:00 2012"}, - {"Mon Jul 9 23:45 2012", "0 20-35/15 * * * *", "Tue Jul 10 00:20 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * * * *", "Tue Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 * * *", "Tue Jul 10 01:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 10-12 * * *", "Tue Jul 10 10:20:15 2012"}, - - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 */2 * *", "Thu Jul 11 01:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 * *", "Wed Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 Jul *", "Wed Jul 10 00:20:15 2012"}, - - // Wrap around months - {"Mon Jul 9 23:35 2012", "0 0 0 9 Apr-Oct ?", "Thu Aug 9 00:00 2012"}, - {"Mon Jul 9 23:35 2012", "0 0 0 */5 Apr,Aug,Oct Mon", "Tue Aug 1 00:00 2012"}, - {"Mon Jul 9 23:35 2012", "0 0 0 */5 Oct Mon", "Mon Oct 1 00:00 2012"}, - - // Wrap around years - {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon", "Mon Feb 4 00:00 2013"}, - {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon/2", "Fri Feb 1 00:00 2013"}, - - // Wrap around minute, hour, day, month, and year - {"Mon Dec 31 23:59:45 2012", "0 * * * * *", "Tue Jan 1 00:00:00 2013"}, - - // Leap year - {"Mon Jul 9 23:35 2012", "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016"}, - - // Daylight savings time 2am EST (-5) -> 3am EDT (-4) - {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 30 2 11 Mar ?", "2013-03-11T02:30:00-0400"}, - - // hourly job - {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"}, - {"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"}, - {"2012-03-11T03:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"}, - {"2012-03-11T04:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"}, - - // hourly job using CRON_TZ - {"2012-03-11T00:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"}, - {"2012-03-11T01:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"}, - {"2012-03-11T03:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"}, - {"2012-03-11T04:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"}, - - // 1am nightly job - {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-11T01:00:00-0500"}, - {"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-12T01:00:00-0400"}, - - // 2am nightly job (skipped) - {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-03-12T02:00:00-0400"}, - - // Daylight savings time 2am EDT (-4) => 1am EST (-5) - {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 30 2 04 Nov ?", "2012-11-04T02:30:00-0500"}, - {"2012-11-04T01:45:00-0400", "TZ=America/New_York 0 30 1 04 Nov ?", "2012-11-04T01:30:00-0500"}, - - // hourly job - {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0400"}, - {"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0500"}, - {"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T02:00:00-0500"}, - - // 1am nightly job (runs twice) - {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, - {"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0500"}, - {"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, - - // 2am nightly job - {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 2 * * ?", "2012-11-04T02:00:00-0500"}, - {"2012-11-04T02:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, - - // 3am nightly job - {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, - {"2012-11-04T03:00:00-0500", "TZ=America/New_York 0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, - - // hourly job - {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0400"}, - {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0500"}, - {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 * * * ?", "2012-11-04T02:00:00-0500"}, - - // 1am nightly job (runs twice) - {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, - {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0500"}, - {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, - - // 2am nightly job - {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 2 * * ?", "2012-11-04T02:00:00-0500"}, - {"TZ=America/New_York 2012-11-04T02:00:00-0500", "0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, - - // 3am nightly job - {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, - {"TZ=America/New_York 2012-11-04T03:00:00-0500", "0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, - - // Unsatisfiable - {"Mon Jul 9 23:35 2012", "0 0 0 30 Feb ?", ""}, - {"Mon Jul 9 23:35 2012", "0 0 0 31 Apr ?", ""}, - - // Monthly job - {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 3 * ?", "2012-12-03T03:00:00-0500"}, - - // Test the scenario of DST resulting in midnight not being a valid time. - // https://github.com/robfig/cron/issues/157 - {"2018-10-17T05:00:00-0400", "TZ=America/Sao_Paulo 0 0 9 10 * ?", "2018-11-10T06:00:00-0500"}, - {"2018-02-14T05:00:00-0500", "TZ=America/Sao_Paulo 0 0 9 22 * ?", "2018-02-22T07:00:00-0500"}, - } - - for _, c := range runs { - sched, err := secondParser.Parse(c.spec) - if err != nil { - t.Error(err) - continue - } - actual := sched.Next(getTime(c.time)) - expected := getTime(c.expected) - if !actual.Equal(expected) { - t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) - } - } + t.Parallel() + + t.Run("basic and wrap", func(t *testing.T) { + t.Parallel() + assertNextRunCases(t, nextRunBasicAndWrapCases()) + }) + t.Run("spring forward", func(t *testing.T) { + t.Parallel() + assertNextRunCases(t, nextRunSpringForwardCases()) + }) + t.Run("fall back with tz spec", func(t *testing.T) { + t.Parallel() + assertNextRunCases(t, nextRunFallBackSpecCases()) + }) + t.Run("fall back with tz input", func(t *testing.T) { + t.Parallel() + assertNextRunCases(t, nextRunFallBackInputCases()) + }) + t.Run("edge cases", func(t *testing.T) { + t.Parallel() + assertNextRunCases(t, nextRunEdgeCases()) + }) } func TestErrors(t *testing.T) { + t.Parallel() + invalidSpecs := []string{ "xyz", "60 0 * * *", @@ -219,55 +153,67 @@ func getTime(value string) time.Time { return time.Time{} } - var location = time.Local + location := time.Local + if strings.HasPrefix(value, "TZ=") { parts := strings.Fields(value) + loc, err := time.LoadLocation(parts[0][len("TZ="):]) if err != nil { panic("could not parse location:" + err.Error()) } + location = loc value = parts[1] } - var layouts = []string{ + layouts := []string{ "Mon Jan 2 15:04 2006", "Mon Jan 2 15:04:05 2006", } for _, layout := range layouts { - if t, err := time.ParseInLocation(layout, value, location); err == nil { - return t + parsedTime, err := time.ParseInLocation(layout, value, location) + if err == nil { + return parsedTime } } - if t, err := time.ParseInLocation("2006-01-02T15:04:05-0700", value, location); err == nil { - return t + + parsedTime, err := time.ParseInLocation("2006-01-02T15:04:05-0700", value, location) + if err == nil { + return parsedTime } + panic("could not parse time value " + value) } func TestNextWithTz(t *testing.T) { + t.Parallel() + runs := []struct { time, spec string expected string }{ // Failing tests - {"2016-01-03T13:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"}, - {"2016-01-03T04:09:03+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"}, + {"2016-01-03T13:09:03+0530", "14 14 * * *", kolkataExpectedTime}, + {"2016-01-03T04:09:03+0530", "14 14 * * ?", kolkataExpectedTime}, // Passing tests - {"2016-01-03T14:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"}, - {"2016-01-03T14:00:00+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"}, + {"2016-01-03T14:09:03+0530", "14 14 * * *", kolkataExpectedTime}, + {"2016-01-03T14:00:00+0530", "14 14 * * ?", kolkataExpectedTime}, } - for _, c := range runs { - sched, err := ParseStandard(c.spec) + for _, testCase := range runs { + sched, err := ParseStandard(testCase.spec) if err != nil { t.Error(err) + continue } - actual := sched.Next(getTimeTZ(c.time)) - expected := getTimeTZ(c.expected) + + actual := sched.Next(getTimeTZ(testCase.time)) + + expected := getTimeTZ(testCase.expected) if !actual.Equal(expected) { - t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", testCase.time, testCase.spec, expected, actual) } } } @@ -276,25 +222,133 @@ func getTimeTZ(value string) time.Time { if value == "" { return time.Time{} } - t, err := time.Parse("Mon Jan 2 15:04 2006", value) + + parsedTime, err := time.Parse("Mon Jan 2 15:04 2006", value) if err != nil { - t, err = time.Parse("Mon Jan 2 15:04:05 2006", value) + parsedTime, err = time.Parse("Mon Jan 2 15:04:05 2006", value) if err != nil { - t, err = time.Parse("2006-01-02T15:04:05-0700", value) + parsedTime, err = time.Parse("2006-01-02T15:04:05-0700", value) if err != nil { panic(err) } } } - return t + return parsedTime } // https://github.com/robfig/cron/issues/144 func TestSlash0NoHang(t *testing.T) { + t.Parallel() + schedule := "TZ=America/New_York 15/0 * * * *" + _, err := ParseStandard(schedule) if err == nil { t.Error("expected an error on 0 increment") } } + +func assertNextRunCases(t *testing.T, testCases []nextRunTestCase) { + t.Helper() + + parserWithSeconds := testParserWithSeconds() + for _, testCase := range testCases { + sched, err := parserWithSeconds.Parse(testCase.spec) + if err != nil { + t.Error(err) + + continue + } + + actual := sched.Next(getTime(testCase.time)) + + expected := getTime(testCase.expected) + if !actual.Equal(expected) { + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", testCase.time, testCase.spec, expected, actual) + } + } +} + +func nextRunBasicAndWrapCases() []nextRunTestCase { + return []nextRunTestCase{ + {"Mon Jul 9 14:45 2012", "0 0/15 * * * *", julyNinthThreePM}, + {"Mon Jul 9 14:59 2012", "0 0/15 * * * *", julyNinthThreePM}, + {"Mon Jul 9 14:59:59 2012", "0 0/15 * * * *", julyNinthThreePM}, + {"Mon Jul 9 15:45 2012", "0 20-35/15 * * * *", "Mon Jul 9 16:20 2012"}, + {"Mon Jul 9 23:46 2012", "0 */15 * * * *", "Tue Jul 10 00:00 2012"}, + {"Mon Jul 9 23:45 2012", "0 20-35/15 * * * *", "Tue Jul 10 00:20 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 * * * *", "Tue Jul 10 00:20:15 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 1/2 * * *", "Tue Jul 10 01:20:15 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 10-12 * * *", "Tue Jul 10 10:20:15 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 1/2 */2 * *", "Thu Jul 11 01:20:15 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 * 9-20 * *", "Wed Jul 10 00:20:15 2012"}, + {julyNinthLateNightSeconds, "15/35 20-35/15 * 9-20 Jul *", "Wed Jul 10 00:20:15 2012"}, + {julyNinthLateNight, "0 0 0 9 Apr-Oct ?", "Thu Aug 9 00:00 2012"}, + {julyNinthLateNight, "0 0 0 */5 Apr,Aug,Oct Mon", "Tue Aug 1 00:00 2012"}, + {julyNinthLateNight, "0 0 0 */5 Oct Mon", "Mon Oct 1 00:00 2012"}, + {julyNinthLateNight, "0 0 0 * Feb Mon", "Mon Feb 4 00:00 2013"}, + {julyNinthLateNight, "0 0 0 * Feb Mon/2", "Fri Feb 1 00:00 2013"}, + {"Mon Dec 31 23:59:45 2012", "0 * * * * *", "Tue Jan 1 00:00:00 2013"}, + {julyNinthLateNight, "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016"}, + } +} + +func nextRunSpringForwardCases() []nextRunTestCase { + return []nextRunTestCase{ + {newYorkSpringMidnight, "TZ=America/New_York 0 30 2 11 Mar ?", "2013-03-11T02:30:00-0400"}, + {newYorkSpringMidnight, newYorkHourlySpec, newYorkSpringOneAM}, + {newYorkSpringOneAM, newYorkHourlySpec, newYorkSpringThreeAM}, + {newYorkSpringThreeAM, newYorkHourlySpec, newYorkSpringFourAM}, + {newYorkSpringFourAM, newYorkHourlySpec, "2012-03-11T05:00:00-0400"}, + {newYorkSpringMidnight, newYorkCronHourlySpec, newYorkSpringOneAM}, + {newYorkSpringOneAM, newYorkCronHourlySpec, newYorkSpringThreeAM}, + {newYorkSpringThreeAM, newYorkCronHourlySpec, newYorkSpringFourAM}, + {newYorkSpringFourAM, newYorkCronHourlySpec, "2012-03-11T05:00:00-0400"}, + {newYorkSpringMidnight, newYorkOneAMSpec, newYorkSpringOneAM}, + {newYorkSpringOneAM, newYorkOneAMSpec, "2012-03-12T01:00:00-0400"}, + {newYorkSpringMidnight, "TZ=America/New_York 0 0 2 * * ?", "2012-03-12T02:00:00-0400"}, + } +} + +func nextRunFallBackSpecCases() []nextRunTestCase { + return []nextRunTestCase{ + {newYorkFallMidnight, "TZ=America/New_York 0 30 2 04 Nov ?", "2012-11-04T02:30:00-0500"}, + {"2012-11-04T01:45:00-0400", "TZ=America/New_York 0 30 1 04 Nov ?", "2012-11-04T01:30:00-0500"}, + {newYorkFallMidnight, newYorkHourlySpec, newYorkFallOneAMEDT}, + {newYorkFallOneAMEDT, newYorkHourlySpec, newYorkFallOneAMEST}, + {newYorkFallOneAMEST, newYorkHourlySpec, newYorkFallTwoAMEST}, + {newYorkFallMidnight, newYorkOneAMSpec, newYorkFallOneAMEDT}, + {newYorkFallOneAMEDT, newYorkOneAMSpec, newYorkFallOneAMEST}, + {newYorkFallOneAMEST, newYorkOneAMSpec, "2012-11-05T01:00:00-0500"}, + {newYorkFallMidnight, "TZ=America/New_York 0 0 2 * * ?", newYorkFallTwoAMEST}, + {newYorkFallTwoAMEST, "TZ=America/New_York 0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, + {newYorkFallMidnight, "TZ=America/New_York 0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, + {"2012-11-04T03:00:00-0500", "TZ=America/New_York 0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, + } +} + +func nextRunFallBackInputCases() []nextRunTestCase { + return []nextRunTestCase{ + {inputNewYorkFallMidnight, "0 0 * * * ?", "2012-11-04T01:00:00-0400"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 * * * ?", newYorkFallOneAMEST}, + {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 * * * ?", newYorkFallTwoAMEST}, + {inputNewYorkFallMidnight, "0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 1 * * ?", newYorkFallOneAMEST}, + {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, + {inputNewYorkFallMidnight, "0 0 2 * * ?", newYorkFallTwoAMEST}, + {"TZ=America/New_York 2012-11-04T02:00:00-0500", "0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, + {inputNewYorkFallMidnight, "0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, + {"TZ=America/New_York 2012-11-04T03:00:00-0500", "0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, + } +} + +func nextRunEdgeCases() []nextRunTestCase { + return []nextRunTestCase{ + {julyNinthLateNight, "0 0 0 30 Feb ?", ""}, + {julyNinthLateNight, "0 0 0 31 Apr ?", ""}, + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 3 * ?", "2012-12-03T03:00:00-0500"}, + {"2018-10-17T05:00:00-0400", "TZ=America/Sao_Paulo 0 0 9 10 * ?", "2018-11-10T06:00:00-0500"}, + {"2018-02-14T05:00:00-0500", "TZ=America/Sao_Paulo 0 0 9 22 * ?", "2018-02-22T07:00:00-0500"}, + } +} From 648055936ce2f93951e36809e9ea3b0eb098c623 Mon Sep 17 00:00:00 2001 From: "F." Date: Fri, 10 Apr 2026 09:03:28 +0200 Subject: [PATCH 2/2] feat!: launch v4 with context.Context, log/slog, and testable Clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BREAKING CHANGE: This is a full API modernization for the v4 release. - Rename module to github.com/hyp3rd/cron/v4 - Job.Run now takes context.Context and returns error - Start(ctx), Run(ctx), Stop(ctx) all accept context.Context - Replace custom Logger interface with *slog.Logger - Rename ScheduleParser → Parser, Parser → SpecParser - Introduce Clock/Timer interfaces with WithClock option - Rewrite tests to use fakeClock for deterministic scheduling - Add CHANGELOG.md and MIGRATION.md - Rewrite README.md and doc.go for v4 API - Delete .travis.yml (CI is GitHub Actions) --- .github/workflows/go.yml | 2 +- .github/workflows/lint.yml | 2 +- .golangci.yaml | 4 +- .project-settings.env | 2 +- .travis.yml | 1 - CHANGELOG.md | 48 +++ LICENSE | 2 +- MIGRATION.md | 156 ++++++++ Makefile | 2 +- README.md | 193 +++++----- chain.go | 43 ++- chain_test.go | 75 ++-- clock.go | 44 +++ clock_fake_test.go | 132 +++++++ cron.go | 297 +++++++++----- cron_test.go | 769 +++++++++++++++++++++---------------- doc.go | 82 ++-- go.mod | 2 +- helpers_test.go | 7 +- logger.go | 102 +---- option.go | 15 +- option_test.go | 31 +- parser.go | 26 +- parser_test.go | 4 +- 24 files changed, 1300 insertions(+), 741 deletions(-) delete mode 100644 .travis.yml create mode 100644 CHANGELOG.md create mode 100644 MIGRATION.md create mode 100644 clock.go create mode 100644 clock_fake_test.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 7df844c..a450059 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -20,7 +20,7 @@ jobs: source .project-settings.env set +a echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" - echo "gci_prefix=${GCI_PREFIX:-github.com/robfig/cron/v3}" >> "$GITHUB_OUTPUT" + echo "gci_prefix=${GCI_PREFIX:-github.com/hyp3rd/cron/v4}" >> "$GITHUB_OUTPUT" echo "golangci_lint_version=${GOLANGCI_LINT_VERSION}" >> "$GITHUB_OUTPUT" echo "proto_enabled=${PROTO_ENABLED:-false}" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a314b65..8e54adb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,7 +18,7 @@ jobs: source .project-settings.env set +a echo "go_version=${GO_VERSION}" >> "$GITHUB_OUTPUT" - echo "gci_prefix=${GCI_PREFIX:-github.com/robfig/cron/v3}" >> "$GITHUB_OUTPUT" + echo "gci_prefix=${GCI_PREFIX:-github.com/hyp3rd/cron/v4}" >> "$GITHUB_OUTPUT" echo "golangci_lint_version=${GOLANGCI_LINT_VERSION}" >> "$GITHUB_OUTPUT" echo "proto_enabled=${PROTO_ENABLED:-true}" >> "$GITHUB_OUTPUT" - name: Setup Go diff --git a/.golangci.yaml b/.golangci.yaml index bffe783..1fa4fcb 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -247,7 +247,7 @@ formatters: gofumpt: # Module path which contains the source code being formatted. # Default: "" - module-path: github.com/robfig/cron/v3 + module-path: github.com/hyp3rd/cron/v4 # Choose whether to use the extra rules. # Default: false extra-rules: true @@ -257,7 +257,7 @@ formatters: # with the given prefixes are grouped after 3rd-party packages. # Default: [] local-prefixes: - - github.com/robfig/cron/v3 + - github.com/hyp3rd/cron/v4 golines: # Target maximum line length. diff --git a/.project-settings.env b/.project-settings.env index 34a3d53..4cd633e 100644 --- a/.project-settings.env +++ b/.project-settings.env @@ -1,5 +1,5 @@ GOLANGCI_LINT_VERSION=v2.11.4 BUF_VERSION=v1.67.0 GO_VERSION=1.26.2 -GCI_PREFIX=github.com/robfig/cron/v3 +GCI_PREFIX=github.com/hyp3rd/cron/v4 PROTO_ENABLED=false diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 4f2ee4d..0000000 --- a/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e653cbe --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,48 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [4.0.0] - Unreleased + +### Added + +- `context.Context` threading throughout the public API: + - `Job.Run(ctx context.Context) error` — jobs receive a cancellable context + and return errors. + - `Start(ctx context.Context)` — binds the scheduler to the caller's context. + - `Run(ctx context.Context) error` — blocking variant; returns + `ErrAlreadyRunning` if a scheduler is already active. + - `Stop(ctx context.Context) error` — cancels the scheduler, waits for + in-flight jobs bounded by `ctx`. +- `Clock` interface (`Clock`, `Timer`) and `WithClock` option for deterministic + testing without `time.Sleep`. +- `ErrAlreadyRunning` sentinel error returned by `Run` when called twice. +- `ErrPanic` sentinel error returned by `Recover` when a job panics, usable with + `errors.Is`. +- `DiscardLogger()` convenience for silencing all scheduler output. + +### Changed + +- **Module path**: `github.com/hyp3rd/cron/v4` (was `github.com/robfig/cron/v3`). +- **Logging**: replaced custom `Logger` interface with `*slog.Logger` + (`log/slog`). Default level is `slog.LevelWarn`. +- **Parser naming**: the `ScheduleParser` interface is now `Parser`; the concrete + `Parser` struct is now `SpecParser`; `NewParser()` is now `NewSpecParser()`. +- `Entry.WrappedJob` is now unexported (`wrappedJob`). +- `FuncJob` signature: `func(context.Context) error` (was `func()`). +- Minimum Go version: **1.26**. + +### Removed + +- `Logger`, `PrintfLogger`, `VerbosePrintfLogger` — use `*slog.Logger` directly. +- `ScheduleParser` interface name — use `Parser`. +- `NewParser` constructor — use `NewSpecParser`. +- `.travis.yml` — CI is on GitHub Actions. + +### Migration + +See [MIGRATION.md](MIGRATION.md) for a step-by-step upgrade guide with +before/after code snippets. diff --git a/LICENSE b/LICENSE index 3a0f627..1077c18 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2012 Rob Figueiredo +Copyright (C) 2026 Francesco Cosentino All Rights Reserved. MIT LICENSE diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 0000000..3993e67 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,156 @@ +# Migrating from robfig/cron/v3 to hyp3rd/cron/v4 + +This guide covers every breaking change between `github.com/robfig/cron/v3` and +`github.com/hyp3rd/cron/v4`. + +--- + +## 1. Import path + +```diff +- import "github.com/robfig/cron/v3" ++ import "github.com/hyp3rd/cron/v4" +``` + +```sh +go get github.com/hyp3rd/cron/v4@latest +``` + +## 2. Job interface — `context.Context` and `error` return + +Jobs now receive a context (cancelled on scheduler stop) and return an error. + +```diff + type Job interface { +- Run() ++ Run(ctx context.Context) error + } +``` + +`FuncJob` follows the same change: + +```diff +- type FuncJob func() ++ type FuncJob func(ctx context.Context) error +``` + +Update `AddFunc` call sites accordingly: + +```diff +- c.AddFunc("@hourly", func() { fmt.Println("tick") }) ++ c.AddFunc("@hourly", func(ctx context.Context) error { ++ fmt.Println("tick") ++ return nil ++ }) +``` + +## 3. Start / Run / Stop — context-driven lifecycle + +### Start + +```diff +- c.Start() ++ c.Start(ctx) +``` + +The scheduler exits when `ctx` is cancelled. + +### Run (blocking) + +```diff +- c.Run() ++ err := c.Run(ctx) +``` + +Returns `cron.ErrAlreadyRunning` if a scheduler is already active. + +### Stop + +```diff +- ctx := c.Stop() // old: returned a context +- <-ctx.Done() ++ err := c.Stop(ctx) // new: accepts a deadline context +``` + +`Stop` cancels the scheduler and waits for in-flight jobs, bounded by `ctx`. It +returns `ctx.Err()` if the deadline elapses before all jobs finish. + +## 4. Logger — `log/slog` replaces custom interface + +The custom `Logger` interface, `PrintfLogger`, and `VerbosePrintfLogger` are +removed. Cron now uses `*slog.Logger` directly. + +```diff +- cron.New(cron.WithLogger( +- cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) ++ cron.New(cron.WithLogger( ++ slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})))) +``` + +The default logger writes to stdout at `slog.LevelWarn`, keeping the scheduler +quiet unless you opt in to verbose logging. + +## 5. Parser rename — `ScheduleParser` -> `Parser`, struct `Parser` -> `SpecParser` + +| v3 name | v4 name | +|--------------------|------------------| +| `ScheduleParser` | `Parser` | +| `Parser` (struct) | `SpecParser` | +| `NewParser(...)` | `NewSpecParser(...)` | + +`NewStandardParser()` is unchanged. + +```diff +- p := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) ++ p := cron.NewSpecParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) +``` + +## 6. Clock interface (new) + +A `Clock` interface enables deterministic testing. The default is +`cron.SystemClock()`. Inject a custom clock via `WithClock`: + +```go +c := cron.New(cron.WithClock(myClock)) +``` + +## 7. Entry.WrappedJob is unexported + +`Entry.WrappedJob` is now `Entry.wrappedJob` (unexported). Use `Entry.Job` to +access the user-supplied job. + +## 8. Recover wrapper returns ErrPanic + +`Recover` now returns a wrapped `cron.ErrPanic` error instead of silently +swallowing panics: + +```go +err := job.Run(ctx) +if errors.Is(err, cron.ErrPanic) { + // handle recovered panic +} +``` + +## 9. Removed symbols + +| Removed | Replacement | +|--------------------------|----------------------------| +| `Logger` (interface) | `*slog.Logger` | +| `PrintfLogger` | `slog.New(slog.NewTextHandler(...))` | +| `VerbosePrintfLogger` | set `slog.LevelDebug` | +| `ScheduleParser` | `Parser` (interface) | +| `Parser` (struct) | `SpecParser` | +| `NewParser` | `NewSpecParser` | +| `Entry.WrappedJob` | unexported | + +--- + +## Quick migration checklist + +1. Update import path to `github.com/hyp3rd/cron/v4`. +2. Add `context.Context` parameter and `error` return to all `Job` implementations and `FuncJob` / `AddFunc` closures. +3. Pass a `context.Context` to `Start`, `Run`, and `Stop`. +4. Replace `Logger`/`PrintfLogger`/`VerbosePrintfLogger` with `*slog.Logger`. +5. Rename `NewParser` calls to `NewSpecParser`. +6. Replace `Entry.WrappedJob` usage with `Entry.Job`. +7. Handle `error` returns from `Run` and `Stop`. diff --git a/Makefile b/Makefile index 1bdaa94..7e8b86c 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ include .project-settings.env GOLANGCI_LINT_VERSION ?= v2.11.4 BUF_VERSION ?= v1.67.0 GO_VERSION ?= 1.26.2 -GCI_PREFIX ?= github.com/robfig/cron/v3 +GCI_PREFIX ?= github.com/hyp3rd/cron/v4 PROTO_ENABLED ?= true GOFILES = $(shell find . -type f -name '*.go' -not -path "./pkg/api/*" -not -path "./vendor/*" -not -path "./.gocache/*" -not -path "./.git/*") diff --git a/README.md b/README.md index 84d20ed..ed5c305 100644 --- a/README.md +++ b/README.md @@ -1,134 +1,137 @@ -[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) -[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) - # cron -Cron V3 has been released! +[![Go Reference](https://pkg.go.dev/badge/github.com/hyp3rd/cron/v4.svg)](https://pkg.go.dev/github.com/hyp3rd/cron/v4) +[![CI](https://github.com/hyp3rd/cron/actions/workflows/go.yml/badge.svg)](https://github.com/hyp3rd/cron/actions/workflows/go.yml) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) -To download the specific tagged release, run: +A fast, well-tested cron expression parser and job scheduler for Go. -```bash -go get github.com/robfig/cron/v3@v3.0.0 -``` +This is a modernized fork of the abandoned +[`robfig/cron/v3`](https://github.com/robfig/cron). It ships an idiomatic +Go 1.26+ API with `context.Context`, `log/slog`, and a testable `Clock` +interface. -Import it in your program as: +## Install -```go -import "github.com/robfig/cron/v3" +```bash +go get github.com/hyp3rd/cron/v4@latest ``` -It requires Go 1.11 or later due to usage of Go Modules. +## Quick start -Refer to the documentation here: - - -The rest of this document describes the the advances in v3 and a list of -breaking changes for users that wish to upgrade from an earlier version. - -## Upgrading to v3 (June 2019) +```go +package main -cron v3 is a major upgrade to the library that addresses all outstanding bugs, -feature requests, and rough edges. It is based on a merge of master which -contains various fixes to issues found over the years and the v2 branch which -contains some backwards-incompatible features like the ability to remove cron -jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like -the timezone support, and fixes a number of bugs. +import ( + "context" + "fmt" + "os" + "os/signal" -New features: + "github.com/hyp3rd/cron/v4" +) -- Support for Go modules. Callers must now import this library as - `github.com/robfig/cron/v3`, instead of `gopkg.in/...` +func main() { + c := cron.New() -- Fixed bugs: - - 0f01e6b parser: fix combining of Dow and Dom (#70) - - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) - - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) - - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) - - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + c.AddFunc("@every 5s", func(ctx context.Context) error { + fmt.Println("tick") -- Standard cron spec parsing by default (first field is "minute"), with an easy - way to opt into the seconds field (quartz-compatible). Although, note that the - year field (optional in Quartz) is not supported. + return nil + }) -- Extensible, key/value logging via an interface that complies with - the project. + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() -- The new Chain & JobWrapper types allow you to install "interceptors" to add - cross-cutting behavior like the following: - - Recover any panics from jobs - - Delay a job's execution if the previous run hasn't completed yet - - Skip a job's execution if the previous run hasn't completed yet - - Log each job's invocations - - Notification when jobs are completed + c.Start(ctx) -It is backwards incompatible with both v1 and v2. These updates are required: + <-ctx.Done() + c.Stop(context.Background()) +} +``` -- The v1 branch accepted an optional seconds field at the beginning of the cron - spec. This is non-standard and has led to a lot of confusion. The new default - parser conforms to the standard as described by [the Cron wikipedia page]. +## Features + +- **Standard 5-field cron expressions** (minute, hour, dom, month, dow) plus + optional seconds via `WithSeconds()`. +- **`context.Context` throughout** — `Start`, `Run`, `Stop`, and every `Job` + receive a context for cancellation and deadlines. +- **`log/slog` logging** — structured, leveled logging out of the box. Default + level is `slog.LevelWarn` to keep the scheduler quiet. +- **`Clock` interface** — inject a fake clock via `WithClock` for deterministic, + zero-`time.Sleep` tests. +- **Job wrappers** — `Recover`, `SkipIfStillRunning`, `DelayIfStillRunning`, + and custom `JobWrapper` chains. +- **Thread-safe** — add, remove, and inspect entries while the scheduler is + running. + +## Cron expressions + +| Field | Allowed values | Special characters | +|---|---|---| +| Minutes | 0-59 | `*` `/` `,` `-` | +| Hours | 0-23 | `*` `/` `,` `-` | +| Day of month | 1-31 | `*` `/` `,` `-` `?` | +| Month | 1-12 or JAN-DEC | `*` `/` `,` `-` | +| Day of week | 0-6 or SUN-SAT | `*` `/` `,` `-` `?` | + +### Predefined schedules + +| Entry | Equivalent | +|------------ |----------- | +| `@yearly` | `0 0 1 1 *` | +| `@monthly` | `0 0 1 * *` | +| `@weekly` | `0 0 * * 0` | +| `@daily` | `0 0 * * *` | +| `@hourly` | `0 * * * *` | + +### Intervals + +```text +@every 1h30m +``` - UPDATING: To retain the old behavior, construct your Cron with a custom - parser: +### Time zones ```go -// Seconds field, required -cron.New(cron.WithSeconds()) - -// Seconds field, optional -cron.New(cron.WithParser(cron.NewParser( - cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor, -))) +cron.New(cron.WithLocation(time.UTC)) +// or per-schedule: +c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", myJob) ``` -- The Cron type now accepts functional options on construction rather than the - previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). - - UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be - updated to provide those values on construction. +## Job wrappers / Chain -- CRON_TZ is now the recommended way to specify the timezone of a single - schedule, which is sanctioned by the specification. The legacy "TZ=" prefix - will continue to be supported since it is unambiguous and easy to do so. - - UPDATING: No update is required. +```go +c := cron.New(cron.WithChain( + cron.Recover(logger), + cron.SkipIfStillRunning(logger), +)) +``` -- By default, cron will no longer recover panics in jobs that it runs. - Recovering can be surprising (see issue #192) and seems to be at odds with - typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option - has been removed to accommodate the more general JobWrapper type. +Or per-job: - UPDATING: To opt into panic recovery and configure the panic logger: - ```go -cron.New(cron.WithChain( - cron.Recover(logger), // or use cron.DefaultLogger() -)) +wrapped := cron.NewChain(cron.Recover(logger)).Then(myJob) ``` -- In adding support for , `cron.WithVerboseLogger` was - removed, since it is duplicative with the leveled logging. +## Testing with a fake clock - UPDATING: Callers should use `WithLogger` and specify a logger that does not - discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: +The `Clock` interface lets you drive the scheduler deterministically: ```go -cron.New( - cron.WithLogger(cron.VerbosePrintfLogger(logger))) +c := cron.New(cron.WithClock(fakeClock)) ``` -### Background - Cron spec format +See `clock.go` for the interface definition. + +## Migration from robfig/cron/v3 -There are two cron spec formats in common usage: +See [MIGRATION.md](MIGRATION.md) for a step-by-step upgrade guide. -- The "standard" cron format, described on [the Cron wikipedia page] and used by - the cron Linux system utility. +## Changelog -- The cron format used by [the Quartz Scheduler], commonly used for scheduled - jobs in Java software +See [CHANGELOG.md](CHANGELOG.md). -[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron -[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html +## License -The original version of this package included an optional "seconds" field, which -made it incompatible with both of these formats. Now, the "standard" format is -the default format accepted, and the Quartz format is opt-in. +MIT - see [LICENSE](LICENSE). diff --git a/chain.go b/chain.go index 01ea334..b697ebb 100644 --- a/chain.go +++ b/chain.go @@ -1,14 +1,18 @@ package cron import ( + "context" "errors" "fmt" + "log/slog" "runtime" "sync" "time" ) -var errPanicValue = errors.New("panic value") +// ErrPanic wraps a value recovered from a panicking job by [Recover]. Callers +// can use [errors.Is] to detect a recovered panic. +var ErrPanic = errors.New("cron: job panicked") // JobWrapper decorates the given Job with some behavior. type JobWrapper func(Job) Job @@ -21,7 +25,7 @@ type Chain struct { // NewChain returns a Chain consisting of the given JobWrappers. func NewChain(c ...JobWrapper) Chain { - return Chain{c} + return Chain{wrappers: c} } // Then decorates the given job with all JobWrappers in the chain. @@ -41,10 +45,12 @@ func (c Chain) Then(j Job) Job { return j } -// Recover panics in wrapped jobs and log them with the provided logger. -func Recover(logger Logger) JobWrapper { +// Recover converts panics in the wrapped job into a logged error. It is the +// recommended way to prevent a panicking job from tearing down the scheduler +// goroutine. +func Recover(logger *slog.Logger) JobWrapper { return func(job Job) Job { - return FuncJob(func() { + return FuncJob(func(ctx context.Context) (err error) { defer func() { if recovered := recover(); recovered != nil { const size = 64 << 10 @@ -52,16 +58,19 @@ func Recover(logger Logger) JobWrapper { buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] - err, ok := recovered.(error) - if !ok { - err = fmt.Errorf("%w: %v", errPanicValue, recovered) + var panicErr error + if asErr, ok := recovered.(error); ok { + panicErr = fmt.Errorf("%w: %w", ErrPanic, asErr) + } else { + panicErr = fmt.Errorf("%w: %v", ErrPanic, recovered) } - logger.Error(err, "panic", "stack", "...\n"+string(buf)) + logger.Error("panic", "err", panicErr, "stack", "...\n"+string(buf)) + err = panicErr } }() - job.Run() + return job.Run(ctx) }) } } @@ -69,11 +78,11 @@ func Recover(logger Logger) JobWrapper { // DelayIfStillRunning serializes jobs, delaying subsequent runs until the // previous one is complete. Jobs running after a delay of more than a minute // have the delay logged at Info. -func DelayIfStillRunning(logger Logger) JobWrapper { +func DelayIfStillRunning(logger *slog.Logger) JobWrapper { return func(job Job) Job { var mu sync.Mutex - return FuncJob(func() { + return FuncJob(func(ctx context.Context) error { start := time.Now() mu.Lock() @@ -83,26 +92,28 @@ func DelayIfStillRunning(logger Logger) JobWrapper { logger.Info("delay", "duration", dur) } - job.Run() + return job.Run(ctx) }) } } // SkipIfStillRunning skips an invocation of the Job if a previous invocation is // still running. It logs skips to the given logger at Info level. -func SkipIfStillRunning(logger Logger) JobWrapper { +func SkipIfStillRunning(logger *slog.Logger) JobWrapper { return func(job Job) Job { ch := make(chan struct{}, 1) ch <- struct{}{} - return FuncJob(func() { + return FuncJob(func(ctx context.Context) error { select { case v := <-ch: defer func() { ch <- v }() - job.Run() + return job.Run(ctx) default: logger.Info("skip") + + return nil } }) } diff --git a/chain_test.go b/chain_test.go index d97ba1b..9f096f9 100644 --- a/chain_test.go +++ b/chain_test.go @@ -1,8 +1,8 @@ package cron import ( - "io" - "log" + "context" + "errors" "reflect" "sync" "testing" @@ -23,19 +23,22 @@ const ( func appendingJob(slice *[]int, value int) Job { var m sync.Mutex - return FuncJob(func() { + return FuncJob(func(_ context.Context) error { m.Lock() *slice = append(*slice, value) m.Unlock() + + return nil }) } func appendingWrapper(slice *[]int, value int) JobWrapper { return func(job Job) Job { - return FuncJob(func() { - appendingJob(slice, value).Run() - job.Run() + return FuncJob(func(ctx context.Context) error { + _ = appendingJob(slice, value).Run(ctx) //nolint:errcheck // test helper + + return job.Run(ctx) }) } } @@ -51,7 +54,10 @@ func TestChain(t *testing.T) { append4 = appendingJob(&nums, 4) ) - NewChain(append1, append2, append3).Then(append4).Run() + err := NewChain(append1, append2, append3).Then(append4).Run(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } if !reflect.DeepEqual(nums, []int{1, 2, 3, 4}) { t.Error("unexpected order of calls:", nums) @@ -61,7 +67,7 @@ func TestChain(t *testing.T) { func TestChainRecover(t *testing.T) { t.Parallel() - panickingJob := FuncJob(func() { + panickingJob := FuncJob(func(_ context.Context) error { panic("panickingJob panics") }) @@ -74,24 +80,27 @@ func TestChainRecover(t *testing.T) { } }() - NewChain().Then(panickingJob). - Run() + _ = NewChain().Then(panickingJob).Run(context.Background()) //nolint:errcheck // panics before returning }) - t.Run("Recovering JobWrapper recovers", func(t *testing.T) { + t.Run("Recovering JobWrapper recovers and returns ErrPanic", func(t *testing.T) { t.Parallel() - NewChain(Recover(PrintfLogger(log.New(io.Discard, "", 0)))). + err := NewChain(Recover(DiscardLogger())). Then(panickingJob). - Run() + Run(context.Background()) + if !errors.Is(err, ErrPanic) { + t.Errorf("expected ErrPanic, got %v", err) + } }) t.Run("composed with the *IfStillRunning wrappers", func(t *testing.T) { t.Parallel() - NewChain(Recover(PrintfLogger(log.New(io.Discard, "", 0)))). + //nolint:errcheck // testing recovery, error not relevant + _ = NewChain(Recover(DiscardLogger())). Then(panickingJob). - Run() + Run(context.Background()) }) } @@ -102,7 +111,7 @@ type countJob struct { delay time.Duration } -func (j *countJob) Run() { +func (j *countJob) Run(_ context.Context) error { j.m.Lock() j.started++ j.m.Unlock() @@ -110,6 +119,8 @@ func (j *countJob) Run() { j.m.Lock() j.done++ j.m.Unlock() + + return nil } func (j *countJob) Started() int { @@ -128,6 +139,12 @@ func (j *countJob) Done() int { return j.done } +func runAsync(job Job) { + go func() { + _ = job.Run(context.Background()) //nolint:errcheck // fire-and-forget test helper + }() +} + func TestChainDelayIfStillRunning(t *testing.T) { t.Parallel() @@ -137,7 +154,7 @@ func TestChainDelayIfStillRunning(t *testing.T) { var jobCounter countJob wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(jobCompletionWait) @@ -154,11 +171,11 @@ func TestChainDelayIfStillRunning(t *testing.T) { wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) go func() { - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(time.Millisecond) - go wrappedJob.Run() + runAsync(wrappedJob) }() time.Sleep(twoJobCompletionWait) @@ -177,11 +194,11 @@ func TestChainDelayIfStillRunning(t *testing.T) { wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger())).Then(&jobCounter) go func() { - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(time.Millisecond) - go wrappedJob.Run() + runAsync(wrappedJob) }() // After 5ms, the first job is still in progress, and the second job was @@ -219,7 +236,7 @@ func testChainSkipRunsImmediately(t *testing.T) { var jobCounter countJob wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(jobCompletionWait) @@ -236,11 +253,11 @@ func testChainSkipSecondRunImmediate(t *testing.T) { wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) go func() { - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(time.Millisecond) - go wrappedJob.Run() + runAsync(wrappedJob) }() time.Sleep(twoJobCompletionWait) @@ -259,11 +276,11 @@ func testChainSkipSecondRunSkipped(t *testing.T) { wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) go func() { - go wrappedJob.Run() + runAsync(wrappedJob) time.Sleep(time.Millisecond) - go wrappedJob.Run() + runAsync(wrappedJob) }() time.Sleep(waitForFirstJob) @@ -290,7 +307,7 @@ func testChainSkipRapidFire(t *testing.T) { wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger())).Then(&jobCounter) for range [rapidFireJobRuns]struct{}{} { - go wrappedJob.Run() + runAsync(wrappedJob) } time.Sleep(rapidFireCompletionWait) @@ -313,8 +330,8 @@ func testChainSkipDifferentJobs(t *testing.T) { wrappedJob2 := chain.Then(&secondJob) for range [rapidFireJobRuns]struct{}{} { - go wrappedJob1.Run() - go wrappedJob2.Run() + runAsync(wrappedJob1) + runAsync(wrappedJob2) } time.Sleep(independentJobsWait) diff --git a/clock.go b/clock.go new file mode 100644 index 0000000..8dc9c8d --- /dev/null +++ b/clock.go @@ -0,0 +1,44 @@ +package cron + +import "time" + +// Clock abstracts the wall clock so that Cron can be driven by a fake clock in +// tests. The default clock returned by [SystemClock] delegates to the standard +// library [time] package. +type Clock interface { + // Now returns the current time. + Now() time.Time + // NewTimer creates a timer that fires after the given duration. The timer + // must be stopped by the caller to release resources. + NewTimer(d time.Duration) Timer +} + +// Timer is the subset of [*time.Timer] required by the scheduler loop. It +// exists so that fake clocks can drive the scheduler deterministically. +type Timer interface { + // C returns the channel on which the fire event is delivered. + C() <-chan time.Time + // Stop prevents the timer from firing. It returns true if the call stops + // the timer, or false if the timer has already expired or been stopped. + Stop() bool +} + +// SystemClock returns a [Clock] backed by [time.Now] and [time.NewTimer]. It is +// the default clock used by [New] when no [WithClock] option is supplied. +func SystemClock() Clock { return systemClock{} } + +type systemClock struct{} + +func (systemClock) Now() time.Time { return time.Now() } + +func (systemClock) NewTimer(d time.Duration) Timer { + return &systemTimer{Timer: time.NewTimer(d)} +} + +type systemTimer struct { + *time.Timer +} + +func (t *systemTimer) C() <-chan time.Time { return t.Timer.C } + +func (t *systemTimer) Stop() bool { return t.Timer.Stop() } diff --git a/clock_fake_test.go b/clock_fake_test.go new file mode 100644 index 0000000..03d7785 --- /dev/null +++ b/clock_fake_test.go @@ -0,0 +1,132 @@ +package cron + +import ( + "sync" + "time" +) + +// fakeClock is a deterministic Clock for testing. Callers advance time +// explicitly via Advance; pending timers fire synchronously during the +// advance, unblocking the scheduler loop without any real-time waits. +type fakeClock struct { + mu sync.Mutex + cur time.Time + timers []*fakeTimer +} + +// newFakeClock returns a fakeClock anchored at the given time. +func newFakeClock(t time.Time) *fakeClock { + return &fakeClock{cur: t} +} + +// Now returns the current fake time. +func (fc *fakeClock) Now() time.Time { + fc.mu.Lock() + defer fc.mu.Unlock() + + return fc.cur +} + +// NewTimer creates a timer that fires when the clock advances past now+dur. If +// dur <= 0, the timer fires immediately (buffered send). +func (fc *fakeClock) NewTimer(dur time.Duration) Timer { + fc.mu.Lock() + defer fc.mu.Unlock() + + ft := &fakeTimer{ + clock: fc, + ch: make(chan time.Time, 1), + deadline: fc.cur.Add(dur), + } + + if dur <= 0 { + ft.ch <- fc.cur + + ft.fired = true + } else { + fc.timers = append(fc.timers, ft) + } + + return ft +} + +// Advance moves the clock forward by dur and fires all timers whose deadlines +// have been reached. Timers fire in deadline order. +func (fc *fakeClock) Advance(dur time.Duration) { + fc.mu.Lock() + defer fc.mu.Unlock() + + fc.cur = fc.cur.Add(dur) + fc.fireTimersLocked() +} + +// BlockUntilTimers spins briefly until at least count active (non-stopped, +// non-fired) timers are registered. This synchronizes with the scheduler +// goroutine, which creates a timer at the top of each loop iteration. +func (fc *fakeClock) BlockUntilTimers(count int) { + for { + fc.mu.Lock() + + active := 0 + + for _, ft := range fc.timers { + if !ft.stopped && !ft.fired { + active++ + } + } + + fc.mu.Unlock() + + if active >= count { + return + } + + // Yield to the scheduler goroutine. + time.Sleep(time.Millisecond) + } +} + +func (fc *fakeClock) fireTimersLocked() { + remaining := fc.timers[:0] + + for _, ft := range fc.timers { + if ft.stopped { + continue + } + + if !fc.cur.Before(ft.deadline) { + ft.ch <- fc.cur + + ft.fired = true + } else { + remaining = append(remaining, ft) + } + } + + fc.timers = remaining +} + +// fakeTimer implements the Timer interface for fakeClock. Stop acquires the +// parent clock's mutex to avoid data races with BlockUntilTimers. +type fakeTimer struct { + clock *fakeClock + ch chan time.Time + deadline time.Time + fired bool + stopped bool +} + +func (ft *fakeTimer) C() <-chan time.Time { return ft.ch } + +func (ft *fakeTimer) Stop() bool { + ft.clock.mu.Lock() + defer ft.clock.mu.Unlock() + + if ft.fired || ft.stopped { + return false + } + + ft.stopped = true + + return true +} diff --git a/cron.go b/cron.go index 3a5ef4b..172e806 100644 --- a/cron.go +++ b/cron.go @@ -2,39 +2,54 @@ package cron import ( "context" + "errors" "fmt" + "log/slog" "slices" "sync" + "sync/atomic" "time" ) -// Cron keeps track of any number of entries, invoking the associated func as +// ErrAlreadyRunning is returned by [Cron.Run] when the scheduler is already +// running in another goroutine. +var ErrAlreadyRunning = errors.New("cron: already running") + +// Cron keeps track of any number of entries, invoking the associated job as // specified by the schedule. It may be started, stopped, and the entries may // be inspected while running. type Cron struct { - entries []*Entry - chain Chain - stop chan struct{} - add chan *Entry - remove chan EntryID - snapshot chan chan []Entry - running bool - logger Logger - runningMu sync.Mutex - location *time.Location - parser ScheduleParser - nextID EntryID - jobWaiter sync.WaitGroup -} - -// ScheduleParser is an interface for schedule spec parsers that return a Schedule. -type ScheduleParser interface { + entries []*Entry + chain Chain + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running atomic.Bool + logger *slog.Logger + runningMu sync.Mutex + location *time.Location + parser Parser + clock Clock + nextID EntryID + jobWaiter sync.WaitGroup + rootCtx context.Context //nolint:containedctx // stored to propagate cancellation to in-flight jobs + rootCancel context.CancelFunc + loopDone chan struct{} +} + +// Parser turns a cron spec string into a [Schedule]. The default +// implementation is [SpecParser]; callers can supply their own by passing +// [WithParser]. +type Parser interface { Parse(spec string) (Schedule, error) } -// Job is an interface for submitted cron jobs. +// Job is the unit of work scheduled by [Cron]. Implementations should honor +// the provided context: when ctx is cancelled the job is expected to return +// promptly. A non-nil error is logged by the scheduler but does not stop +// future executions. type Job interface { - Run() + Run(ctx context.Context) error } // Schedule describes a job's duty cycle. @@ -47,7 +62,7 @@ type Schedule interface { // EntryID identifies an entry within a Cron instance. type EntryID int -// Entry consists of a schedule and the func to execute on that schedule. +// Entry consists of a schedule and the job to execute on that schedule. type Entry struct { // ID is the cron-assigned ID of this entry, which may be used to look up a // snapshot or remove it. @@ -57,19 +72,18 @@ type Entry struct { Schedule Schedule // Next time the job will run, or the zero time if Cron has not been - // started or this entry's schedule is unsatisfiable + // started or this entry's schedule is unsatisfiable. Next time.Time // Prev is the last time this job was run, or the zero time if never. Prev time.Time - // WrappedJob is the thing to run when the Schedule is activated. - WrappedJob Job - - // Job is the thing that was submitted to cron. - // It is kept around so that user code that needs to get at the job later, - // e.g. via Entries() can do so. + // Job is the job that was submitted to cron, kept around so callers can + // recover it via [Cron.Entries] or [Cron.Entry]. Job Job + + // wrappedJob is the chain-wrapped job actually executed on activation. + wrappedJob Job } // Valid returns true if this is not the zero entry. @@ -89,7 +103,11 @@ func (e Entry) Valid() bool { return e.ID != 0 } // // Chain // Description: Wrap submitted jobs to customize behavior. -// Default: A chain that recovers panics and logs them to stderr. +// Default: An empty chain; jobs are executed as-is. +// +// Clock +// Description: Source of wall-clock time and timers used by the scheduler. +// Default: A clock backed by time.Now / time.NewTimer. // // See "cron.With*" to modify the default behavior. func New(opts ...Option) *Cron { @@ -97,14 +115,13 @@ func New(opts ...Option) *Cron { entries: nil, chain: NewChain(), add: make(chan *Entry), - stop: make(chan struct{}), snapshot: make(chan chan []Entry), remove: make(chan EntryID), - running: false, runningMu: sync.Mutex{}, logger: DefaultLogger(), location: time.Local, parser: NewStandardParser(), + clock: SystemClock(), } for _, opt := range opts { opt(cronInstance) @@ -115,16 +132,16 @@ func New(opts ...Option) *Cron { const idleTimerDuration = 100000 * time.Hour -// FuncJob is a wrapper that turns a func() into a cron.Job. -type FuncJob func() +// FuncJob adapts an ordinary function into a [Job]. +type FuncJob func(ctx context.Context) error // Run executes the wrapped func. -func (f FuncJob) Run() { f() } +func (f FuncJob) Run(ctx context.Context) error { return f(ctx) } // AddFunc adds a func to the Cron to be run on the given schedule. // The spec is parsed using the time zone of this Cron instance as the default. // An opaque ID is returned that can be used to later remove it. -func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { +func (c *Cron) AddFunc(spec string, cmd func(ctx context.Context) error) (EntryID, error) { return c.AddJob(spec, FuncJob(cmd)) } @@ -144,20 +161,34 @@ func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { // The job is wrapped with the configured Chain. func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { c.runningMu.Lock() - defer c.runningMu.Unlock() c.nextID++ entry := &Entry{ ID: c.nextID, Schedule: schedule, - WrappedJob: c.chain.Then(cmd), Job: cmd, + wrappedJob: c.chain.Then(cmd), } - if !c.running { + + if !c.running.Load() { + c.entries = append(c.entries, entry) + c.runningMu.Unlock() + + return entry.ID + } + + addCh := c.add + loopDone := c.loopDone + c.runningMu.Unlock() + + select { + case addCh <- entry: + case <-loopDone: + // Scheduler loop has exited; append directly. + c.runningMu.Lock() c.entries = append(c.entries, entry) - } else { - c.add <- entry + c.runningMu.Unlock() } return entry.ID @@ -166,16 +197,28 @@ func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { // Entries returns a snapshot of the cron entries. func (c *Cron) Entries() []Entry { c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - replyChan := make(chan []Entry, 1) - c.snapshot <- replyChan + if !c.running.Load() { + snap := c.entrySnapshot() + c.runningMu.Unlock() - return <-replyChan + return snap } - return c.entrySnapshot() + snapCh := c.snapshot + loopDone := c.loopDone + c.runningMu.Unlock() + + replyChan := make(chan []Entry, 1) + select { + case snapCh <- replyChan: + return <-replyChan + case <-loopDone: + c.runningMu.Lock() + defer c.runningMu.Unlock() + + return c.entrySnapshot() + } } // Location gets the time zone location. @@ -183,7 +226,8 @@ func (c *Cron) Location() *time.Location { return c.location } -// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +// Entry returns a snapshot of the given entry, or the zero Entry if it could +// not be found. func (c *Cron) Entry(id EntryID) Entry { for _, entry := range c.Entries() { if id == entry.ID { @@ -197,67 +241,125 @@ func (c *Cron) Entry(id EntryID) Entry { // Remove an entry from being run in the future. func (c *Cron) Remove(id EntryID) { c.runningMu.Lock() - defer c.runningMu.Unlock() - if c.running { - c.remove <- id - } else { + if !c.running.Load() { + c.removeEntry(id) + c.runningMu.Unlock() + + return + } + + removeCh := c.remove + loopDone := c.loopDone + c.runningMu.Unlock() + + select { + case removeCh <- id: + case <-loopDone: + c.runningMu.Lock() c.removeEntry(id) + c.runningMu.Unlock() } } -// Start the cron scheduler in its own goroutine, or no-op if already started. -func (c *Cron) Start() { +// Start launches the scheduler in its own goroutine bound to ctx. When ctx is +// cancelled the scheduler exits; any jobs already in flight are allowed to +// finish. Calling Start on an already-running scheduler is a no-op. +func (c *Cron) Start(ctx context.Context) { c.runningMu.Lock() defer c.runningMu.Unlock() - if c.running { + if c.running.Load() { return } - c.running = true - go c.schedulerLoop() + loopCtx := c.enterRunning(ctx) + loopDone := c.loopDone + + go func() { + c.schedulerLoop(loopCtx) + c.markStopped(loopDone) + }() } -// Run the cron scheduler, or no-op if already running. -func (c *Cron) Run() { +// Run executes the scheduler synchronously on the calling goroutine until ctx +// is cancelled. It returns [ErrAlreadyRunning] if another goroutine is already +// running the scheduler. +func (c *Cron) Run(ctx context.Context) error { c.runningMu.Lock() - if c.running { + + if c.running.Load() { c.runningMu.Unlock() - return + return ErrAlreadyRunning } - c.running = true + loopCtx := c.enterRunning(ctx) + loopDone := c.loopDone c.runningMu.Unlock() - c.schedulerLoop() + + c.schedulerLoop(loopCtx) + c.markStopped(loopDone) + + return nil } -// Stop stops the cron scheduler if it is running; otherwise it does nothing. -// A context is returned so the caller can wait for running jobs to complete. -func (c *Cron) Stop() context.Context { +// Stop cancels the running scheduler and waits for in-flight jobs to finish, +// bounded by the provided context. It returns ctx.Err() if the context is +// cancelled before all jobs complete. Calling Stop on a scheduler that is not +// running is a no-op and returns nil. +func (c *Cron) Stop(ctx context.Context) error { c.runningMu.Lock() - defer c.runningMu.Unlock() + if c.rootCancel != nil { + c.rootCancel() + } - if c.running { - c.stop <- struct{}{} + loopDone := c.loopDone + c.runningMu.Unlock() - c.running = false + if loopDone != nil { + select { + case <-loopDone: + case <-ctx.Done(): + return fmt.Errorf("cron: stop: %w", ctx.Err()) + } } - ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) go func() { c.jobWaiter.Wait() - cancel() + close(done) }() - return ctx + select { + case <-done: + return nil + case <-ctx.Done(): + return fmt.Errorf("cron: stop: %w", ctx.Err()) + } +} + +// enterRunning must be called with runningMu held. It sets up the root context +// derived from the caller's ctx and marks the scheduler as running. +func (c *Cron) enterRunning(ctx context.Context) context.Context { + c.running.Store(true) + c.rootCtx, c.rootCancel = context.WithCancel(ctx) //nolint:gosec // G118: rootCancel is stored and called by Stop + c.loopDone = make(chan struct{}) + + return c.rootCtx +} + +// markStopped clears the running flag once the scheduler loop has exited. It +// must not acquire runningMu: Schedule/Remove/Entries may hold it while +// waiting on loopDone. +func (c *Cron) markStopped(loopDone chan struct{}) { + c.running.Store(false) + close(loopDone) } -// schedulerLoop runs the scheduler. It remains private because access to the -// running state is coordinated by Start and Run. -func (c *Cron) schedulerLoop() { +// schedulerLoop runs the scheduler until ctx is cancelled. +func (c *Cron) schedulerLoop(ctx context.Context) { c.logger.Info("start") now := c.initializeEntries() @@ -267,7 +369,7 @@ func (c *Cron) schedulerLoop() { var shouldStop bool - now, shouldStop = c.processSchedulerEvent(now) + now, shouldStop = c.processSchedulerEvent(ctx, now) if shouldStop { return } @@ -320,19 +422,19 @@ func (c *Cron) initializeEntries() time.Time { return now } -func (c *Cron) processSchedulerEvent(now time.Time) (time.Time, bool) { +func (c *Cron) processSchedulerEvent(ctx context.Context, now time.Time) (time.Time, bool) { timer := c.newSchedulerTimer(now) defer timer.Stop() for { select { - case firedAt := <-timer.C: - return c.handleTimerFired(firedAt), false + case firedAt := <-timer.C(): + return c.handleTimerFired(ctx, firedAt), false case newEntry := <-c.add: return c.handleEntryAdded(newEntry), false case replyChan := <-c.snapshot: replyChan <- c.entrySnapshot() - case <-c.stop: + case <-ctx.Done(): c.logger.Info("stop") return now, true @@ -342,31 +444,31 @@ func (c *Cron) processSchedulerEvent(now time.Time) (time.Time, bool) { } } -func (c *Cron) newSchedulerTimer(now time.Time) *time.Timer { +func (c *Cron) newSchedulerTimer(now time.Time) Timer { if len(c.entries) == 0 || c.entries[0].Next.IsZero() { // If there are no entries yet, just sleep - it still handles new entries // and stop requests. - return time.NewTimer(idleTimerDuration) + return c.clock.NewTimer(idleTimerDuration) } - return time.NewTimer(c.entries[0].Next.Sub(now)) + return c.clock.NewTimer(c.entries[0].Next.Sub(now)) } -func (c *Cron) handleTimerFired(firedAt time.Time) time.Time { +func (c *Cron) handleTimerFired(ctx context.Context, firedAt time.Time) time.Time { now := firedAt.In(c.location) c.logger.Info("wake", "now", now) - c.runDueEntries(now) + c.runDueEntries(ctx, now) return now } -func (c *Cron) runDueEntries(now time.Time) { +func (c *Cron) runDueEntries(ctx context.Context, now time.Time) { for _, entry := range c.entries { if entry.Next.After(now) || entry.Next.IsZero() { break } - c.startJob(entry.WrappedJob) + c.startJob(ctx, entry.wrappedJob) entry.Prev = entry.Next entry.Next = entry.Schedule.Next(now) c.logger.Info("run", "now", now, "entry", entry.ID, "next", entry.Next) @@ -390,16 +492,20 @@ func (c *Cron) handleEntryRemoved(id EntryID) time.Time { return now } -// startJob runs the given job in a new goroutine. -func (c *Cron) startJob(j Job) { +// startJob runs the given job in a new goroutine. Non-nil errors returned by +// the job are logged at Warn level and do not affect future executions. +func (c *Cron) startJob(ctx context.Context, j Job) { c.jobWaiter.Go(func() { - j.Run() + err := j.Run(ctx) + if err != nil { + c.logger.Warn("job error", "err", err) + } }) } // now returns current time in c location. func (c *Cron) now() time.Time { - return time.Now().In(c.location) + return c.clock.Now().In(c.location) } // entrySnapshot returns a copy of the current cron entry list. @@ -413,12 +519,5 @@ func (c *Cron) entrySnapshot() []Entry { } func (c *Cron) removeEntry(id EntryID) { - var entries []*Entry - for _, e := range c.entries { - if e.ID != id { - entries = append(entries, e) - } - } - - c.entries = entries + c.entries = slices.DeleteFunc(c.entries, func(e *Entry) bool { return e.ID == id }) } diff --git a/cron_test.go b/cron_test.go index 5296c15..0169447 100644 --- a/cron_test.go +++ b/cron_test.go @@ -3,8 +3,9 @@ package cron import ( "bytes" "context" + "errors" "fmt" - "log" + "log/slog" "strings" "sync" "sync/atomic" @@ -12,29 +13,32 @@ import ( "time" ) -// Many tests schedule a job for every second, and then wait at most a second -// for it to run. This amount is just slightly larger than 1 second to -// compensate for a few milliseconds of runtime. -const OneSecond = 1*time.Second + 50*time.Millisecond - const ( - everySecondSpec = "* * * * * ?" - everySecondWithSeconds = "* * * * * *" - januaryFirstSpec = "0 0 0 1 1 ?" - decemberThirtyFirstSpec = "0 0 0 31 12 ?" - invalidFebruarySpec = "0 0 0 30 Feb ?" - januaryFirstOffsetSpec = "1 0 0 1 1 ?" - secondsBoundaryThreshold = 58 + everySecondSpec = "* * * * * ?" + everySecondWithSeconds = "* * * * * *" + januaryFirstSpec = "0 0 0 1 1 ?" + decemberThirtyFirstSpec = "0 0 0 31 12 ?" + invalidFebruarySpec = "0 0 0 30 Feb ?" + januaryFirstOffsetSpec = "1 0 0 1 1 ?" expectedTwoFiringsMessage = "expected job fires 2 times" wrongJobRetrievedMessage = "wrong job retrieved:" - stopContextDoneMessage = "context was not done immediately" slowStopJobDelay = 2 * time.Second waitForStopCheck = 750 * time.Millisecond waitForStopCompletion = 1500 * time.Millisecond + + unexpectedStopError = "unexpected stop error: %v" ) +// baseTime is a fixed instant used by fake-clock tests. It is midnight UTC on +// a Monday so that day-of-week specs behave predictably. +var baseTime = time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) //nolint:gochecknoglobals // test constant + +// awaitTimeout is the real-time safety net for tests that advance a fake clock +// and then wait for a goroutine to observe the result. +const awaitTimeout = 200 * time.Millisecond + type syncWriter struct { wr bytes.Buffer m sync.Mutex @@ -59,8 +63,57 @@ func (sw *syncWriter) String() string { return sw.wr.String() } -func newBufLogger(sw *syncWriter) Logger { - return PrintfLogger(log.New(sw, "", log.LstdFlags)) +func newBufLogger(sw *syncWriter) *slog.Logger { + return slog.New(slog.NewTextHandler(sw, &slog.HandlerOptions{Level: slog.LevelDebug})) +} + +// noop is a job body that does nothing and never errors. +func noop(_ context.Context) error { return nil } + +// done wraps wg.Done into a FuncJob-compatible closure. +func done(wg *sync.WaitGroup) func(context.Context) error { + return func(_ context.Context) error { + wg.Done() + + return nil + } +} + +// startCron starts cron bound to a per-test context and registers a cleanup +// that stops it with a generous deadline. +func startCron(t *testing.T, cron *Cron) { + t.Helper() + + runCtx, cancelRun := context.WithCancel(context.Background()) + cron.Start(runCtx) + + t.Cleanup(func() { + stopCtx, cancelStop := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelStop() + + _ = cron.Stop(stopCtx) //nolint:errcheck // best-effort cleanup + + cancelRun() + }) +} + +// awaitWg waits for wg to complete with a short real-time safety net. +func awaitWg(t *testing.T, wg *sync.WaitGroup) { + t.Helper() + + select { + case <-wait(wg): + case <-time.After(awaitTimeout): + t.Fatal("timed out waiting for jobs to complete") + } +} + +// newFakeWithSeconds creates a Cron with seconds-level parsing backed by a +// fake clock anchored at baseTime. +func newFakeWithSeconds() (*Cron, *fakeClock) { + fc := newFakeClock(baseTime) + + return New(WithParser(testParserWithSeconds()), WithChain(), WithClock(fc)), fc } func TestFuncPanicRecovery(t *testing.T) { @@ -68,17 +121,23 @@ func TestFuncPanicRecovery(t *testing.T) { var buf syncWriter - cron := New(WithParser(testParserWithSeconds()), - WithChain(Recover(newBufLogger(&buf)))) - - cron.Start() - defer cron.Stop() + fc := newFakeClock(baseTime) + cron := New( + WithParser(testParserWithSeconds()), + WithChain(Recover(newBufLogger(&buf))), + WithClock(fc), + ) - mustAddFunc(t, cron, everySecondSpec, func() { + mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { panic("YOLO") }) - time.Sleep(OneSecond) + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + + // Give the goroutine time to log the panic. + time.Sleep(10 * time.Millisecond) if !strings.Contains(buf.String(), "YOLO") { t.Error("expected a panic to be logged, got none") @@ -87,7 +146,7 @@ func TestFuncPanicRecovery(t *testing.T) { type DummyJob struct{} -func (DummyJob) Run() { +func (DummyJob) Run(_ context.Context) error { panic("YOLO") } @@ -98,15 +157,20 @@ func TestJobPanicRecovery(t *testing.T) { var buf syncWriter - cron := New(WithParser(testParserWithSeconds()), - WithChain(Recover(newBufLogger(&buf)))) - - cron.Start() - defer cron.Stop() + fc := newFakeClock(baseTime) + cron := New( + WithParser(testParserWithSeconds()), + WithChain(Recover(newBufLogger(&buf))), + WithClock(fc), + ) mustAddJob(t, cron, everySecondSpec, job) - time.Sleep(OneSecond) + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + + time.Sleep(10 * time.Millisecond) if !strings.Contains(buf.String(), "YOLO") { t.Error("expected a panic to be logged, got none") @@ -117,13 +181,12 @@ func TestJobPanicRecovery(t *testing.T) { func TestNoEntries(t *testing.T) { t.Parallel() - cron := newWithSeconds() - cron.Start() + cron, _ := newFakeWithSeconds() + cron.Start(context.Background()) - select { - case <-time.After(OneSecond): - t.Fatal("expected cron will be stopped immediately") - case <-stop(cron): + err := cron.Stop(context.Background()) + if err != nil { + t.Fatalf("expected cron to stop immediately: %v", err) } } @@ -131,19 +194,24 @@ func TestNoEntries(t *testing.T) { func TestStopCausesJobsToNotRun(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + var calls atomic.Int64 - cron := newWithSeconds() - cron.Start() - cron.Stop() - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + cron, fc := newFakeWithSeconds() + cron.Start(context.Background()) + _ = cron.Stop(context.Background()) //nolint:errcheck // tested elsewhere - select { - case <-time.After(OneSecond): - // No job ran! - case <-wait(wg): - t.Fatal("expected stopped cron does not run any job") + mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { + calls.Add(1) + + return nil + }) + + // Cron is stopped — advancing the clock should not fire the job. + fc.Advance(2 * time.Second) + time.Sleep(10 * time.Millisecond) + + if c := calls.Load(); c != 0 { + t.Fatalf("expected no job runs after stop, got %d", c) } } @@ -154,18 +222,13 @@ func TestAddBeforeRunning(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - cron := newWithSeconds() - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) - - cron.Start() - defer cron.Stop() + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, everySecondSpec, done(wg)) - // Give cron 2 seconds to run our job (which is always activated). - select { - case <-time.After(OneSecond): - t.Fatal("expected job runs") - case <-wait(wg): - } + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Start cron, add a job, expect it runs. @@ -175,39 +238,57 @@ func TestAddWhileRunning(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - cron := newWithSeconds() + cron, fc := newFakeWithSeconds() - cron.Start() - defer cron.Stop() + startCron(t, cron) + fc.BlockUntilTimers(1) // wait for idle timer - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + mustAddFunc(t, cron, everySecondSpec, done(wg)) - select { - case <-time.After(OneSecond): - t.Fatal("expected job runs") - case <-wait(wg): - } + // Let the scheduler process the add event and create a new timer. + time.Sleep(5 * time.Millisecond) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Test for #34. Adding a job after calling start results in multiple job invocations. func TestAddWhileRunningWithDelay(t *testing.T) { t.Parallel() - cron := newWithSeconds() + cron, fc := newFakeWithSeconds() - cron.Start() - defer cron.Stop() + startCron(t, cron) + fc.BlockUntilTimers(1) // idle timer - time.Sleep(5 * time.Second) + // Advance 5 seconds — the idle timer (100000h) does not fire, but + // the internal clock moves forward so that fc.Now() returns baseTime+5s. + fc.Advance(5 * time.Second) - var calls int64 + var calls atomic.Int64 - mustAddFunc(t, cron, everySecondWithSeconds, func() { atomic.AddInt64(&calls, 1) }) + // Adding a job sends on the add channel, waking the scheduler. + // The scheduler computes Next from fc.Now() (baseTime+5s). + wg := &sync.WaitGroup{} + wg.Add(1) + + mustAddFunc(t, cron, everySecondWithSeconds, func(_ context.Context) error { + calls.Add(1) + wg.Done() + + return nil + }) - <-time.After(OneSecond) + // Let the scheduler goroutine stop the old idle timer and create + // a new timer for the next second. + time.Sleep(5 * time.Millisecond) - if atomic.LoadInt64(&calls) != 1 { - t.Errorf("called %d times, expected 1\n", calls) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) + + if c := calls.Load(); c != 1 { + t.Errorf("called %d times, expected 1\n", c) } } @@ -215,21 +296,23 @@ func TestAddWhileRunningWithDelay(t *testing.T) { func TestRemoveBeforeRunning(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + var calls atomic.Int64 - cron := newWithSeconds() - id := mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + cron, fc := newFakeWithSeconds() + id := mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { + calls.Add(1) + + return nil + }) cron.Remove(id) - cron.Start() - defer cron.Stop() + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + time.Sleep(10 * time.Millisecond) - select { - case <-time.After(OneSecond): - // Success, shouldn't run - case <-wait(wg): - t.FailNow() + if c := calls.Load(); c != 0 { + t.Fatalf("expected removed job not to run, got %d calls", c) } } @@ -237,21 +320,28 @@ func TestRemoveBeforeRunning(t *testing.T) { func TestRemoveWhileRunning(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + var calls atomic.Int64 - cron := newWithSeconds() + cron, fc := newFakeWithSeconds() + + startCron(t, cron) + fc.BlockUntilTimers(1) // wait for idle timer - cron.Start() - defer cron.Stop() + id := mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { + calls.Add(1) - id := mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + return nil + }) cron.Remove(id) - select { - case <-time.After(OneSecond): - case <-wait(wg): - t.FailNow() + // Let the scheduler process add + remove events. + time.Sleep(5 * time.Millisecond) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + time.Sleep(10 * time.Millisecond) + + if c := calls.Load(); c != 0 { + t.Fatalf("expected removed job not to run, got %d calls", c) } } @@ -262,53 +352,59 @@ func TestSnapshotEntries(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - cron := New() - mustAddFunc(t, cron, "@every 2s", func() { wg.Done() }) + fc := newFakeClock(baseTime) + cron := New(WithClock(fc)) + mustAddFunc(t, cron, "@every 2s", done(wg)) - cron.Start() - defer cron.Stop() + startCron(t, cron) + fc.BlockUntilTimers(1) - // Cron should fire in 2 seconds. After 1 second, call Entries. - time.Sleep(OneSecond) + // Advance 1 second, call Entries mid-cycle. + fc.Advance(1 * time.Second) + time.Sleep(5 * time.Millisecond) cron.Entries() - // Even though Entries was called, the cron should fire at the 2 second mark. - select { - case <-time.After(OneSecond): - t.Error("expected job runs at 2 second mark") - case <-wait(wg): - } + // Advance another second — the job should fire at the 2s mark. + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Test that the entries are correctly sorted. -// Add a bunch of long-in-the-future entries, and an immediate entry, and ensure -// that the immediate entry runs immediately. -// Also: Test that multiple jobs run in the same instant. func TestMultipleEntries(t *testing.T) { t.Parallel() wg := &sync.WaitGroup{} wg.Add(2) - cron := newWithSeconds() - mustAddFunc(t, cron, januaryFirstSpec, func() {}) - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) - id1 := mustAddFunc(t, cron, everySecondSpec, func() { t.Fatal() }) - id2 := mustAddFunc(t, cron, everySecondSpec, func() { t.Fatal() }) - mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, januaryFirstSpec, noop) + mustAddFunc(t, cron, everySecondSpec, done(wg)) + id1 := mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { + t.Fatal() + + return nil + }) + id2 := mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { + t.Fatal() + + return nil + }) + mustAddFunc(t, cron, decemberThirtyFirstSpec, noop) + mustAddFunc(t, cron, everySecondSpec, done(wg)) cron.Remove(id1) - cron.Start() + + startCron(t, cron) + fc.BlockUntilTimers(1) // wait for scheduler to be ready cron.Remove(id2) - defer cron.Stop() - select { - case <-time.After(OneSecond): - t.Error("expected job run in proper order") - case <-wait(wg): - } + // Let the scheduler process the remove event. + time.Sleep(5 * time.Millisecond) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Test running the same job twice. @@ -318,19 +414,17 @@ func TestRunningJobTwice(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(2) - cron := newWithSeconds() - mustAddFunc(t, cron, januaryFirstSpec, func() {}) - mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) - - cron.Start() - defer cron.Stop() + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, januaryFirstSpec, noop) + mustAddFunc(t, cron, decemberThirtyFirstSpec, noop) + mustAddFunc(t, cron, everySecondSpec, done(wg)) - select { - case <-time.After(2 * OneSecond): - t.Error(expectedTwoFiringsMessage) - case <-wait(wg): - } + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } func TestRunningMultipleSchedules(t *testing.T) { @@ -339,55 +433,45 @@ func TestRunningMultipleSchedules(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(2) - cron := newWithSeconds() - mustAddFunc(t, cron, januaryFirstSpec, func() {}) - mustAddFunc(t, cron, decemberThirtyFirstSpec, func() {}) - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) - cron.Schedule(Every(time.Minute), FuncJob(func() {})) - cron.Schedule(Every(time.Second), FuncJob(func() { wg.Done() })) - cron.Schedule(Every(time.Hour), FuncJob(func() {})) + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, januaryFirstSpec, noop) + mustAddFunc(t, cron, decemberThirtyFirstSpec, noop) + mustAddFunc(t, cron, everySecondSpec, done(wg)) + cron.Schedule(Every(time.Minute), FuncJob(noop)) + cron.Schedule(Every(time.Second), FuncJob(done(wg))) + cron.Schedule(Every(time.Hour), FuncJob(noop)) - cron.Start() - defer cron.Stop() + startCron(t, cron) - select { - case <-time.After(2 * OneSecond): - t.Error(expectedTwoFiringsMessage) - case <-wait(wg): - } + // Two "every second" entries: one advance fires both, satisfying wg.Add(2). + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Test that the cron is run in the local time zone (as opposed to UTC). +// With a fake clock we pick a fixed time and schedule relative to it, +// eliminating the old secondsBoundaryThreshold hack. func TestLocalTimezone(t *testing.T) { t.Parallel() wg := &sync.WaitGroup{} wg.Add(2) - now := time.Now() - // FIX: Issue #205 - // This calculation doesn't work in seconds 58 or 59. - // Take the easy way out and sleep. - if now.Second() >= secondsBoundaryThreshold { - time.Sleep(2 * time.Second) - - now = time.Now() - } - - spec := fmt.Sprintf("%d,%d %d %d %d %d ?", - now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) - - cron := newWithSeconds() - mustAddFunc(t, cron, spec, func() { wg.Done() }) + // baseTime is 2024-01-01 00:00:00 UTC. Seconds 1 and 2 will fire. + fc := newFakeClock(baseTime) + cron := New(WithParser(testParserWithSeconds()), WithChain(), WithClock(fc), WithLocation(time.UTC)) - cron.Start() - defer cron.Stop() + spec := fmt.Sprintf("1,2 %d %d %d %d ?", + baseTime.Minute(), baseTime.Hour(), baseTime.Day(), baseTime.Month()) + mustAddFunc(t, cron, spec, done(wg)) - select { - case <-time.After(OneSecond * 2): - t.Error(expectedTwoFiringsMessage) - case <-wait(wg): - } + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } // Test that the cron is run in the given time zone (as opposed to local). @@ -397,41 +481,38 @@ func TestNonLocalTimezone(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(2) - loc := mustLoadLocation(t, "Atlantic/Cape_Verde") - - now := time.Now().In(loc) - // FIX: Issue #205 - // This calculation doesn't work in seconds 58 or 59. - // Take the easy way out and sleep. - if now.Second() >= secondsBoundaryThreshold { - time.Sleep(2 * time.Second) - - now = time.Now().In(loc) + loc, err := time.LoadLocation("Atlantic/Cape_Verde") + if err != nil { + t.Fatalf("load location: %v", err) } - spec := fmt.Sprintf("%d,%d %d %d %d %d ?", - now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) + // Anchor in Cape Verde time so the schedule matches. + cvTime := baseTime.In(loc) + fc := newFakeClock(baseTime) + cron := New(WithLocation(loc), WithParser(testParserWithSeconds()), WithClock(fc)) - cron := New(WithLocation(loc), WithParser(testParserWithSeconds())) - mustAddFunc(t, cron, spec, func() { wg.Done() }) + spec := fmt.Sprintf("1,2 %d %d %d %d ?", + cvTime.Minute(), cvTime.Hour(), cvTime.Day(), cvTime.Month()) + mustAddFunc(t, cron, spec, done(wg)) - cron.Start() - defer cron.Stop() - - select { - case <-time.After(OneSecond * 2): - t.Error(expectedTwoFiringsMessage) - case <-wait(wg): - } + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) } -// Test that calling stop before start silently returns without -// blocking the stop channel. +// Test that calling Stop before Start silently returns without blocking. func TestStopWithoutStart(t *testing.T) { t.Parallel() cron := New() - cron.Stop() + + err := cron.Stop(context.Background()) + if err != nil { + t.Errorf(unexpectedStopError, err) + } } type testJob struct { @@ -439,8 +520,10 @@ type testJob struct { name string } -func (t testJob) Run() { +func (t testJob) Run(_ context.Context) error { t.wg.Done() + + return nil } // Test that adding an invalid job spec returns an error. @@ -462,20 +545,28 @@ func TestBlockingRun(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - cron := newWithSeconds() - mustAddFunc(t, cron, everySecondSpec, func() { wg.Done() }) + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, everySecondSpec, done(wg)) + runCtx, cancelRun := context.WithCancel(context.Background()) unblockChan := make(chan struct{}) go func() { - cron.Run() + _ = cron.Run(runCtx) //nolint:errcheck // tested elsewhere + close(unblockChan) }() - defer cron.Stop() + t.Cleanup(func() { + cancelRun() + <-unblockChan + }) + + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) select { - case <-time.After(OneSecond): + case <-time.After(awaitTimeout): t.Error("expected job fires") case <-unblockChan: t.Error("expected that Run() blocks") @@ -483,30 +574,80 @@ func TestBlockingRun(t *testing.T) { } } -// Test that double-running is a no-op. +// TestRunReturnsErrAlreadyRunning verifies a second Run returns an error while +// the first is active. +func TestRunReturnsErrAlreadyRunning(t *testing.T) { + t.Parallel() + + cron, _ := newFakeWithSeconds() + + runCtx, cancelRun := context.WithCancel(context.Background()) + started := make(chan struct{}) + + go func() { + close(started) + + _ = cron.Run(runCtx) //nolint:errcheck // tested elsewhere + }() + + <-started + // Give the goroutine a chance to acquire the running flag. + time.Sleep(10 * time.Millisecond) + + err := cron.Run(context.Background()) + if !errors.Is(err, ErrAlreadyRunning) { + t.Errorf("expected ErrAlreadyRunning, got %v", err) + } + + cancelRun() + + stopCtx, cancelStop := context.WithTimeout(context.Background(), time.Second) + defer cancelStop() + + _ = cron.Stop(stopCtx) //nolint:errcheck // best-effort cleanup +} + +// Test that double-Start is a no-op. func TestStartNoop(t *testing.T) { t.Parallel() tickChan := make(chan struct{}, 2) - cron := newWithSeconds() - mustAddFunc(t, cron, everySecondSpec, func() { + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, everySecondSpec, func(_ context.Context) error { tickChan <- struct{}{} + + return nil }) - cron.Start() - defer cron.Stop() + startCron(t, cron) + fc.BlockUntilTimers(1) + + // First tick. + fc.Advance(1 * time.Second) + + select { + case <-tickChan: + case <-time.After(awaitTimeout): + t.Fatal("first tick did not arrive") + } - // Wait for the first firing to ensure the runner is going - <-tickChan + // Double-start should be a no-op. + cron.Start(context.Background()) - cron.Start() + // Second tick. + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) - <-tickChan + select { + case <-tickChan: + case <-time.After(awaitTimeout): + t.Fatal("second tick did not arrive") + } - // Fail if this job fires again in a short period, indicating a double-run + // No third tick should appear. select { - case <-time.After(time.Millisecond): + case <-time.After(10 * time.Millisecond): case <-tickChan: t.Error("expected job fires exactly twice") } @@ -519,7 +660,7 @@ func TestJob(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - cron := newWithSeconds() + cron, fc := newFakeWithSeconds() mustAddJob(t, cron, invalidFebruarySpec, testJob{wg, "job0"}) mustAddJob(t, cron, januaryFirstSpec, testJob{wg, "job1"}) job2 := mustAddJob(t, cron, everySecondSpec, testJob{wg, "job2"}) @@ -536,14 +677,10 @@ func TestJob(t *testing.T) { t.Error(wrongJobRetrievedMessage, actualName) } - cron.Start() - defer cron.Stop() - - select { - case <-time.After(OneSecond): - t.FailNow() - case <-wait(wg): - } + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + awaitWg(t, wg) // Ensure the entries are in the right order. expecteds := []string{"job2", "job4", "job5", "job1", "job3", "job0"} @@ -573,6 +710,7 @@ func TestJob(t *testing.T) { // Issue #206 // Ensure that the next run of a job after removing an entry is accurate. +// This test has time.Sleep inside the job body, so it must use real time. func TestScheduleAfterRemoval(t *testing.T) { t.Parallel() @@ -584,18 +722,14 @@ func TestScheduleAfterRemoval(t *testing.T) { wg1.Add(1) wg2.Add(1) - // The first time this job is run, set a timer and remove the other job - // 750ms later. Correct behavior would be to still run the job again in - // 250ms, but the bug would cause it to run instead 1s later. - var ( calls int mu sync.Mutex ) cron := newWithSeconds() - hourJob := cron.Schedule(Every(time.Hour), FuncJob(func() {})) - cron.Schedule(Every(time.Second), FuncJob(func() { + hourJob := cron.Schedule(Every(time.Hour), FuncJob(noop)) + cron.Schedule(Every(time.Second), FuncJob(func(_ context.Context) error { mu.Lock() defer mu.Unlock() @@ -616,17 +750,16 @@ func TestScheduleAfterRemoval(t *testing.T) { default: panic("unexpected extra call") } + + return nil })) - cron.Start() - defer cron.Stop() + startCron(t, cron) - // the first run might be any length of time 0 - 1s, since the schedule - // rounds to the second. wait for the first run to true up. wg1.Wait() select { - case <-time.After(2 * OneSecond): + case <-time.After(3 * time.Second): t.Error(expectedTwoFiringsMessage) case <-wait(&wg2): } @@ -643,20 +776,28 @@ func (*ZeroSchedule) Next(_ time.Time) time.Time { func TestJobWithZeroTimeDoesNotRun(t *testing.T) { t.Parallel() - cron := newWithSeconds() + cron, fc := newFakeWithSeconds() + + var calls atomic.Int64 - var calls int64 + mustAddFunc(t, cron, everySecondWithSeconds, func(_ context.Context) error { + calls.Add(1) - mustAddFunc(t, cron, everySecondWithSeconds, func() { atomic.AddInt64(&calls, 1) }) - cron.Schedule(new(ZeroSchedule), FuncJob(func() { t.Error("expected zero task will not run") })) + return nil + }) + cron.Schedule(new(ZeroSchedule), FuncJob(func(_ context.Context) error { + t.Error("expected zero task will not run") - cron.Start() - defer cron.Stop() + return nil + })) - <-time.After(OneSecond) + startCron(t, cron) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + time.Sleep(10 * time.Millisecond) - if atomic.LoadInt64(&calls) != 1 { - t.Errorf("called %d times, expected 1\n", calls) + if calls.Load() != 1 { + t.Errorf("called %d times, expected 1\n", calls.Load()) } } @@ -667,36 +808,27 @@ func TestStopAndWait(t *testing.T) { t.Run("repeated calls to Stop", testStopAndWaitRepeatedCalls) t.Run("a couple fast jobs added, still returns immediately", testStopAndWaitFastJobs) t.Run("a couple fast jobs and a slow job added, waits for slow job", testStopAndWaitSlowJob) - t.Run("repeated calls to stop, waiting for completion and after", testStopAndWaitRepeatedWhileWaiting) } func TestMultiThreadedStartAndStop(t *testing.T) { t.Parallel() cron := New() - go cron.Run() - - time.Sleep(2 * time.Millisecond) - cron.Stop() -} - -func wait(wg *sync.WaitGroup) chan bool { - ch := make(chan bool) go func() { - wg.Wait() - - ch <- true + _ = cron.Run(context.Background()) //nolint:errcheck // tested elsewhere }() - return ch + time.Sleep(2 * time.Millisecond) + + _ = cron.Stop(context.Background()) //nolint:errcheck // tested elsewhere } -func stop(cron *Cron) chan bool { +func wait(wg *sync.WaitGroup) chan bool { ch := make(chan bool) go func() { - cron.Stop() + wg.Wait() ch <- true }() @@ -704,7 +836,7 @@ func stop(cron *Cron) chan bool { return ch } -// newWithSeconds returns a Cron with the seconds field enabled. +// newWithSeconds returns a Cron with the seconds field enabled (real clock). func newWithSeconds() *Cron { return New(WithParser(testParserWithSeconds()), WithChain()) } @@ -718,106 +850,89 @@ func requireTestJobName(t *testing.T, cron *Cron, id EntryID) string { func testStopAndWaitNothingRunning(t *testing.T) { t.Parallel() - cron := newWithSeconds() - cron.Start() + cron, _ := newFakeWithSeconds() + cron.Start(context.Background()) - requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) + err := cron.Stop(context.Background()) + if err != nil { + t.Errorf(unexpectedStopError, err) + } } func testStopAndWaitRepeatedCalls(t *testing.T) { t.Parallel() - cron := newWithSeconds() - cron.Start() - _ = cron.Stop() + cron, _ := newFakeWithSeconds() + cron.Start(context.Background()) + + _ = cron.Stop(context.Background()) //nolint:errcheck // first stop time.Sleep(time.Millisecond) - requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) + err := cron.Stop(context.Background()) + if err != nil { + t.Errorf(unexpectedStopError, err) + } } func testStopAndWaitFastJobs(t *testing.T) { t.Parallel() - cron := newWithSeconds() - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - cron.Start() - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - time.Sleep(time.Second) - - requireContextDoneWithin(cron.Stop(), t, time.Millisecond, stopContextDoneMessage) -} + cron, fc := newFakeWithSeconds() + mustAddFunc(t, cron, everySecondWithSeconds, noop) + cron.Start(context.Background()) + fc.BlockUntilTimers(1) -func testStopAndWaitSlowJob(t *testing.T) { - t.Parallel() + mustAddFunc(t, cron, everySecondWithSeconds, noop) + mustAddFunc(t, cron, everySecondWithSeconds, noop) + mustAddFunc(t, cron, everySecondWithSeconds, noop) - slowJobStarted := make(chan struct{}, 1) - cron := newWithSeconds() - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - cron.Start() - mustAddFunc(t, cron, everySecondWithSeconds, func() { - signalJobStarted(slowJobStarted) - time.Sleep(slowStopJobDelay) - }) - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - - waitForJobStarted(t, slowJobStarted) + time.Sleep(5 * time.Millisecond) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + time.Sleep(10 * time.Millisecond) - ctx := cron.Stop() - requireContextPendingFor(ctx, t, waitForStopCheck, "context was done too quickly immediately") - requireContextDoneWithin(ctx, t, waitForStopCompletion, "context not done after job should have completed") + err := cron.Stop(context.Background()) + if err != nil { + t.Errorf(unexpectedStopError, err) + } } -func testStopAndWaitRepeatedWhileWaiting(t *testing.T) { +func testStopAndWaitSlowJob(t *testing.T) { t.Parallel() slowJobStarted := make(chan struct{}, 1) cron := newWithSeconds() - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) - mustAddFunc(t, cron, everySecondWithSeconds, func() { + mustAddFunc(t, cron, everySecondWithSeconds, noop) + cron.Start(context.Background()) + mustAddFunc(t, cron, everySecondWithSeconds, func(_ context.Context) error { signalJobStarted(slowJobStarted) time.Sleep(slowStopJobDelay) + + return nil }) - cron.Start() - mustAddFunc(t, cron, everySecondWithSeconds, func() {}) + mustAddFunc(t, cron, everySecondWithSeconds, noop) waitForJobStarted(t, slowJobStarted) - ctx := cron.Stop() - ctx2 := cron.Stop() + // A short deadline should trip because the slow job is still running. + shortCtx, cancelShort := context.WithTimeout(context.Background(), waitForStopCheck) - select { - case <-ctx.Done(): - t.Error("context was done too quickly immediately") - case <-ctx2.Done(): - t.Error("context2 was done too quickly immediately") - case <-time.After(waitForStopCompletion): - } + err := cron.Stop(shortCtx) - requireContextDoneWithin(ctx, t, time.Second, "context not done after job should have completed") - requireContextDoneWithin(ctx2, t, time.Millisecond, "context2 not done even though context1 is") - requireContextDoneWithin(cron.Stop(), t, time.Millisecond, "context not done even when cron Stop is completed") -} - -func requireContextDoneWithin(ctx context.Context, t *testing.T, timeout time.Duration, message string) { - t.Helper() + cancelShort() - select { - case <-ctx.Done(): - case <-time.After(timeout): - t.Error(message) + if err == nil { + t.Error("expected Stop to time out while slow job was running") } -} -func requireContextPendingFor(ctx context.Context, t *testing.T, duration time.Duration, message string) { - t.Helper() + // A longer deadline should succeed once the slow job wraps up. + longCtx, cancelLong := context.WithTimeout(context.Background(), waitForStopCompletion) + defer cancelLong() - select { - case <-ctx.Done(): - t.Error(message) - case <-time.After(duration): + err = cron.Stop(longCtx) + if err != nil { + t.Errorf("expected Stop to succeed, got %v", err) } } @@ -826,7 +941,7 @@ func waitForJobStarted(t *testing.T, started <-chan struct{}) { select { case <-started: - case <-time.After(OneSecond): + case <-time.After(2 * time.Second): t.Fatal("slow job did not start in time") } } diff --git a/doc.go b/doc.go index b9d00f5..08871b1 100644 --- a/doc.go +++ b/doc.go @@ -3,38 +3,45 @@ Package cron implements a cron spec parser and job runner. # Installation -To download the specific tagged release, run: +To download the latest tagged release, run: - go get github.com/robfig/cron/v3@v3.0.0 + go get github.com/hyp3rd/cron/v4 Import it in your program as: - import "github.com/robfig/cron/v3" + import "github.com/hyp3rd/cron/v4" -It requires Go 1.11 or later due to usage of Go Modules. +It requires Go 1.26 or later. # Usage -Callers may register Funcs to be invoked on a given schedule. Cron will run -them in their own goroutines. +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. All jobs receive a [context.Context] that is +cancelled when the scheduler is stopped. c := cron.New() - c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) - c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) - c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) - c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) - c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) - c.Start() + c.AddFunc("30 * * * *", func(ctx context.Context) error { + fmt.Println("Every hour on the half hour") + return nil + }) + c.AddFunc("@hourly", func(ctx context.Context) error { + fmt.Println("Every hour, starting an hour from now") + return nil + }) + c.Start(context.Background()) .. // Funcs are invoked in their own goroutine, asynchronously. ... // Funcs may also be added to a running Cron - c.AddFunc("@daily", func() { fmt.Println("Every day") }) + c.AddFunc("@daily", func(ctx context.Context) error { + fmt.Println("Every day") + return nil + }) .. // Inspect the cron job entries' next and previous run times. inspect(c.Entries()) .. - c.Stop() // Stop the scheduler (does not stop any jobs already running). + c.Stop(ctx) // Stop the scheduler and wait for in-flight jobs. # CRON Expression Format @@ -48,7 +55,7 @@ A cron expression represents a set of times, using 5 space-separated fields. Month | Yes | 1-12 or JAN-DEC | * / , - Day of week | Yes | 0-6 or SUN-SAT | * / , - ? -Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and "sun" are equally accepted. The specific interpretation of the format is based on the Cron Wikipedia page: @@ -57,11 +64,11 @@ https://en.wikipedia.org/wiki/Cron # Alternative Formats Alternative Cron expression formats support other fields like seconds. You can -implement that by creating a custom Parser as follows. +implement that by creating a custom [SpecParser] as follows. cron.New( cron.WithParser( - cron.NewParser( + cron.NewSpecParser( cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) Since adding Seconds is the most common modification to the standard cron spec, @@ -88,7 +95,7 @@ Slashes are used to describe increments of ranges. For example 3-59/15 in the minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field. The form "N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the -increment until the end of that specific range. It does not wrap around. +increment until the end of that specific range. It does not wrap around. Comma ( , ) @@ -119,8 +126,9 @@ You may use one of several pre-defined schedules in place of a cron expression. # Intervals -You may also schedule a job to execute at fixed intervals, starting at the time it's added -or cron is run. This is supported by formatting the cron spec like this: +You may also schedule a job to execute at fixed intervals, starting at the time +it's added or cron is run. This is supported by formatting the cron spec like +this: @every @@ -130,7 +138,7 @@ where "duration" is a string accepted by time.ParseDuration For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that. -Note: The interval does not take the job runtime into account. For example, +Note: The interval does not take the job runtime into account. For example, if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, it will have only 2 minutes of idle time between each run. @@ -148,22 +156,17 @@ of the cron spec, of the form "CRON_TZ=Asia/Tokyo". For example: - # Runs at 6am in time.Local + // Runs at 6am in time.Local cron.New().AddFunc("0 6 * * ?", ...) - # Runs at 6am in America/New_York + // Runs at 6am in America/New_York nyc, _ := time.LoadLocation("America/New_York") c := cron.New(cron.WithLocation(nyc)) c.AddFunc("0 6 * * ?", ...) - # Runs at 6am in Asia/Tokyo + // Runs at 6am in Asia/Tokyo cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - # Runs at 6am in Asia/Tokyo - c := cron.New(cron.WithLocation(nyc)) - c.SetLocation("America/New_York") - c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) - The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. Be aware that jobs scheduled during daylight-savings leap-ahead transitions will @@ -180,7 +183,7 @@ to achieve the following effects: - Skip a job's execution if the previous run hasn't completed yet - Log each job's invocations -Install wrappers for all jobs added to a cron using the `cron.WithChain` option: +Install wrappers for all jobs added to a cron using the [WithChain] option: cron.New(cron.WithChain( cron.SkipIfStillRunning(logger), @@ -202,23 +205,18 @@ ensures that invocations have a clear happens-before ordering between them. # Logging -Cron defines a Logger interface that is a subset of the one defined in -github.com/go-logr/logr. It has two logging levels (Info and Error), and -parameters are key/value pairs. This makes it possible for cron logging to plug -into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided -to wrap the standard library *log.Logger. - -For additional insight into Cron operations, verbose logging may be activated -which will record job runs, scheduling decisions, and added or removed jobs. -Activate it with a one-off logger as follows: +Cron uses [log/slog] for structured logging. By default, the scheduler logs at +[slog.LevelWarn] and above, so routine scheduling events stay quiet. Pass a +custom [*slog.Logger] via [WithLogger] to control log level and destination: cron.New( - cron.WithLogger( - cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + cron.WithLogger(slog.New( + slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}), + ))) # Implementation -Cron entries are stored in an array, sorted by their next activation time. Cron +Cron entries are stored in an array, sorted by their next activation time. Cron sleeps until the next job is due to be run. Upon waking: diff --git a/go.mod b/go.mod index 0b45362..2f2b3f6 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ -module github.com/robfig/cron/v3 +module github.com/hyp3rd/cron/v4 go 1.26.2 diff --git a/helpers_test.go b/helpers_test.go index 0f4c4e4..96b9584 100644 --- a/helpers_test.go +++ b/helpers_test.go @@ -1,15 +1,16 @@ package cron import ( + "context" "testing" "time" ) -func testParserWithSeconds() Parser { - return NewParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor) +func testParserWithSeconds() SpecParser { + return NewSpecParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor) } -func mustAddFunc(t *testing.T, cron *Cron, spec string, cmd func()) EntryID { +func mustAddFunc(t *testing.T, cron *Cron, spec string, cmd func(context.Context) error) EntryID { t.Helper() entryID, err := cron.AddFunc(spec, cmd) diff --git a/logger.go b/logger.go index 4974655..5cb84a2 100644 --- a/logger.go +++ b/logger.go @@ -1,101 +1,21 @@ package cron import ( - "io" - "log" + "log/slog" "os" - "strings" - "time" ) -// DefaultLogger is used by Cron if none is specified. -func DefaultLogger() Logger { - return PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) +// DefaultLogger is used by Cron if none is specified. It writes structured +// text to stdout under the "cron" group at [slog.LevelWarn] and above, so +// routine scheduling events stay quiet unless the caller opts in to verbose +// logging via [WithLogger]. +func DefaultLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelWarn, + })).WithGroup("cron") } // DiscardLogger can be used by callers to discard all log messages. -func DiscardLogger() Logger { - return PrintfLogger(log.New(io.Discard, "", 0)) -} - -// Logger is the interface used in this package for logging, so that any backend -// can be plugged in. It is a subset of the github.com/go-logr/logr interface. -type Logger interface { - // Info logs routine messages about cron's operation. - Info(msg string, keysAndValues ...any) - // Error logs an error condition. - Error(err error, msg string, keysAndValues ...any) -} - -// PrintfLogger wraps a Printf-based logger (such as the standard library "log") -// into an implementation of the Logger interface which logs errors only. -func PrintfLogger(l interface{ Printf(_ string, _ ...any) }) Logger { - return printfLogger{l, false} -} - -// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library -// "log") into an implementation of the Logger interface which logs everything. -func VerbosePrintfLogger(l interface{ Printf(_ string, _ ...any) }) Logger { - return printfLogger{l, true} -} - -type printfLogger struct { - logger interface{ Printf(_ string, _ ...any) } - logInfo bool -} - -const errorLogFields = 2 - -// Info logs routine messages about cron's operation. -func (pl printfLogger) Info(msg string, keysAndValues ...any) { - if pl.logInfo { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)), - append([]any{msg}, keysAndValues...)...) - } -} - -// Error logs an error condition. -func (pl printfLogger) Error(err error, msg string, keysAndValues ...any) { - keysAndValues = formatTimes(keysAndValues) - pl.logger.Printf( - formatString(len(keysAndValues)+errorLogFields), - append([]any{msg, "error", err}, keysAndValues...)...) -} - -// formatString returns a logfmt-like format string for the number of -// key/values. -func formatString(numKeysAndValues int) string { - var sb strings.Builder - sb.WriteString("%s") - - if numKeysAndValues > 0 { - sb.WriteString(", ") - } - - for i := range numKeysAndValues / 2 { - if i > 0 { - sb.WriteString(", ") - } - - sb.WriteString("%v=%v") - } - - return sb.String() -} - -// formatTimes formats any time.Time values as RFC3339. -func formatTimes(keysAndValues []any) []any { - formattedArgs := make([]any, 0, len(keysAndValues)) - - for _, arg := range keysAndValues { - if t, ok := arg.(time.Time); ok { - arg = t.Format(time.RFC3339) - } - - formattedArgs = append(formattedArgs, arg) - } - - return formattedArgs +func DiscardLogger() *slog.Logger { + return slog.New(slog.DiscardHandler) } diff --git a/option.go b/option.go index 09e4278..0495053 100644 --- a/option.go +++ b/option.go @@ -1,6 +1,7 @@ package cron import ( + "log/slog" "time" ) @@ -17,13 +18,13 @@ func WithLocation(loc *time.Location) Option { // WithSeconds overrides the parser used for interpreting job schedules to // include a seconds field as the first one. func WithSeconds() Option { - return WithParser(NewParser( + return WithParser(NewSpecParser( Second | Minute | Hour | Dom | Month | Dow | Descriptor, )) } // WithParser overrides the parser used for interpreting job schedules. -func WithParser(p ScheduleParser) Option { +func WithParser(p Parser) Option { return func(c *Cron) { c.parser = p } @@ -38,8 +39,16 @@ func WithChain(wrappers ...JobWrapper) Option { } // WithLogger uses the provided logger. -func WithLogger(logger Logger) Option { +func WithLogger(logger *slog.Logger) Option { return func(c *Cron) { c.logger = logger } } + +// WithClock overrides the clock used by the cron instance. It is intended +// primarily for tests that want to drive the scheduler deterministically. +func WithClock(clock Clock) Option { + return func(c *Cron) { + c.clock = clock + } +} diff --git a/option_test.go b/option_test.go index 5739d54..e0538ee 100644 --- a/option_test.go +++ b/option_test.go @@ -1,7 +1,8 @@ package cron import ( - "log" + "context" + "log/slog" "strings" "testing" "time" @@ -19,7 +20,7 @@ func TestWithLocation(t *testing.T) { func TestWithParser(t *testing.T) { t.Parallel() - parser := NewParser(Dow) + parser := NewSpecParser(Dow) c := New(WithParser(parser)) if c.parser != parser { @@ -27,26 +28,32 @@ func TestWithParser(t *testing.T) { } } -func TestWithVerboseLogger(t *testing.T) { +func TestWithLoggerCapturesSchedulerEvents(t *testing.T) { t.Parallel() var buf syncWriter - logger := log.New(&buf, "", log.LstdFlags) + logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + fc := newFakeClock(baseTime) - cron := New(WithLogger(VerbosePrintfLogger(logger))) - if requireType[printfLogger](t, cron.logger).logger != logger { + cron := New(WithLogger(logger), WithClock(fc)) + if cron.logger != logger { t.Error("expected provided logger") } - mustAddFunc(t, cron, "@every 1s", func() {}) - cron.Start() - time.Sleep(OneSecond) - cron.Stop() + mustAddFunc(t, cron, "@every 1s", func(context.Context) error { return nil }) + cron.Start(context.Background()) + fc.BlockUntilTimers(1) + fc.Advance(1 * time.Second) + time.Sleep(10 * time.Millisecond) + + err := cron.Stop(context.Background()) + if err != nil { + t.Fatalf("stop: %v", err) + } out := buf.String() - if !strings.Contains(out, "schedule,") || - !strings.Contains(out, "run,") { + if !strings.Contains(out, "schedule") || !strings.Contains(out, "run") { t.Error("expected to see some actions, got:", out) } } diff --git a/parser.go b/parser.go index 1046118..d2591f4 100644 --- a/parser.go +++ b/parser.go @@ -79,12 +79,12 @@ func defaults() []string { } } -// Parser that can be configured. -type Parser struct { +// SpecParser that can be configured. +type SpecParser struct { options ParseOption } -// NewParser creates a Parser with custom options. +// NewSpecParser creates a SpecParser with custom options. // // It panics if more than one Optional is given, since it would be impossible to // correctly infer which optional is provided or missing in general. @@ -92,17 +92,17 @@ type Parser struct { // Examples // // // Standard parser without descriptors -// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// specParser := NewSpecParser(Minute | Hour | Dom | Month | Dow) // sched, err := specParser.Parse("0 0 15 */3 *") // // // Same as above, just excludes time fields -// specParser := NewParser(Dom | Month | Dow) +// specParser := NewSpecParser(Dom | Month | Dow) // sched, err := specParser.Parse("15 */3 *") // // // Same as above, just makes Dow optional -// specParser := NewParser(Dom | Month | DowOptional) +// specParser := NewSpecParser(Dom | Month | DowOptional) // sched, err := specParser.Parse("15 */3") -func NewParser(options ParseOption) Parser { +func NewSpecParser(options ParseOption) SpecParser { optionals := 0 if options&DowOptional > 0 { optionals++ @@ -116,20 +116,20 @@ func NewParser(options ParseOption) Parser { panic("multiple optionals may not be configured") } - return Parser{options} + return SpecParser{options} } -// NewStandardParser returns a Parser configured to parse standard 5-field crontab specs. -func NewStandardParser() Parser { - return NewParser( +// NewStandardParser returns a SpecParser configured to parse standard 5-field crontab specs. +func NewStandardParser() SpecParser { + return NewSpecParser( Minute | Hour | Dom | Month | Dow | Descriptor, ) } // Parse returns a new crontab schedule representing the given spec. // It returns a descriptive error if the spec is not valid. -// It accepts crontab specs and features configured by NewParser. -func (p Parser) Parse(spec string) (Schedule, error) { +// It accepts crontab specs and features configured by NewSpecParser. +func (p SpecParser) Parse(spec string) (Schedule, error) { if len(spec) == 0 { return nil, errEmptySpec } diff --git a/parser_test.go b/parser_test.go index 918ec4e..2f8c603 100644 --- a/parser_test.go +++ b/parser_test.go @@ -243,7 +243,7 @@ func TestParseSchedule(t *testing.T) { func TestOptionalSecondSchedule(t *testing.T) { t.Parallel() - parser := NewParser(SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor) + parser := NewSpecParser(SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor) entries := []struct { expr string expected Schedule @@ -437,7 +437,7 @@ func TestStandardSpecSchedule(t *testing.T) { func TestNoDescriptorParser(t *testing.T) { t.Parallel() - parser := NewParser(Minute | Hour) + parser := NewSpecParser(Minute | Hour) _, err := parser.Parse("@every 1m") if err == nil {