diff --git a/New File b/.ai/mcp/mcp.json similarity index 100% rename from New File rename to .ai/mcp/mcp.json diff --git a/.aiassistant/review_guidelines.md b/.aiassistant/review_guidelines.md new file mode 100644 index 0000000..3e34738 --- /dev/null +++ b/.aiassistant/review_guidelines.md @@ -0,0 +1,238 @@ +# Ploch.Data Review Guidelines + +Use these guidelines when reviewing staged or pending changes in this +repository. Review every change as if it may ship as a public NuGet +package and be consumed outside this repository. Prioritise +correctness, regression risk, compatibility, package-boundary safety, +test coverage, and documentation over cosmetic feedback. + +## Core Review Priorities + +1. Find bugs, regressions, unsafe changes, and unintended breaking + changes first. +2. Protect provider-agnostic abstractions from EF Core or + provider-specific leakage. +3. Protect external-consumer behaviour, especially package APIs, DI + registration, persisted state, and SampleApp packaging. +4. Treat missing tests, missing documentation, and missing validation as + real findings when behaviour or public surface changes. +5. Avoid low-value nits already enforced by analyzers or formatters + unless they hide a real maintenance problem. + +## How To Write Findings + +- Order findings by severity: blocker, high, medium, low. +- Each finding should explain the problem, impact, affected file or + area, and what kind of correction is expected. +- Prefer precise, actionable comments over broad stylistic advice. +- Distinguish required fixes from optional improvements. +- State verification gaps explicitly when tests or builds that should + have run are not evident. +- Use British English in review comments and suggested text. + +## Repository-Specific Review Checks + +### Architecture And Package Boundaries + +- Preserve the separation between provider-agnostic packages and EF Core + or provider-specific implementations. +- Do not allow EF Core types, provider-specific behaviour, or migration + concerns to leak into abstractions intended to stay + provider-agnostic. +- Keep changes targeted. Flag repo-wide refactors unless the task + clearly requires cross-package changes. +- When shared abstractions, DI registration, or common extension points + change, review downstream impact across core packages, provider + packages, integration-testing packages, and `samples/SampleApp`. +- Protect business-facing abstractions from architecture drift. + +### Generic Repository And Unit Of Work Usage + +- Consumers should use the narrowest repository interface that satisfies + the use case. +- `IUnitOfWork` should be introduced only when multiple entity types or + explicit transaction control are required. +- Complex reusable query logic should prefer the Specification pattern + rather than duplicated inline LINQ or unnecessary `IQueryable` + exposure. +- Flag repository changes that weaken typed IDs, blur read/write + separation, or make transaction boundaries unclear. + +### Domain Model Expectations + +- Entities should remain simple POCO classes, not business-logic + containers. +- Entities should implement the appropriate `Ploch.Data.Model` + interfaces such as `IHasId`, `INamed`, `IHasDescription`, audit + interfaces, or hierarchy interfaces instead of re-declaring common + concepts ad hoc. +- Category and tag entities should use the provided base types rather + than custom reimplementations. +- Navigation properties, audit properties, nullability, and collection + defaults should match existing patterns. + +### EF Core And Data-Project Conventions + +- `DbContext` configuration should use + `ApplyConfigurationsFromAssembly`; do not move entity configuration + inline into the context. +- Keep one internal configuration class per entity. +- Delete behaviour must be explicit; do not rely on EF Core defaults for + important relationships. +- Enum persistence should stay readable and consistent, typically + string-based where the repository already expects that. +- Provider-specific migrations belong only in provider-specific + projects, not in the base data project. +- Generated migration files and snapshots should not be manually edited + without a strong reason. + +### Public API And Compatibility + +- Review all public surface changes as potential breaking changes, + including public types, methods, properties, constructors, + interfaces, DI registration surface, configuration keys, package IDs, + serialised or persisted state, and migration behaviour. +- If behaviour or public API changed, expect corresponding documentation + updates and, for user-visible changes, release notes updates. +- Maintain backwards compatibility for stored state. If stored schema or + persisted behaviour changes, ensure the change is deliberate and + migration-safe. +- Flag silent behavioural changes even when signatures stay the same. + +### SampleApp Consumer Safety + +- Treat `samples/SampleApp` as an external consumer of published + packages. +- Never allow manual `PackageReference` to `ProjectReference` swaps in + SampleApp `.csproj` files. +- SampleApp build configuration must remain self-contained and must not + import parent repository build configuration, other than the existing + conditional `ProjectReferences.props` mechanism. +- If new Ploch.Data packages are added, ensure `ProjectReferences.props` + is updated. +- If published package versions change, ensure + `samples/SampleApp/Directory.Packages.props` stays correct. +- Flag any change that would make the sample app work only in solution + mode but not as a standalone consumer. + +## Testing And Validation + +- New behaviour, bug fixes, and regression-prone refactors should come + with tests. +- When behaviour crosses repositories, EF Core mappings, DI + registration, or provider selection, expect broader verification than + a single unit test. +- Tests should follow repository conventions: xUnit v3, + FluentAssertions, AutoFixture where helpful, observable behaviour over + implementation details, positive and negative cases, and names such as + `MethodName_should_explain_what_it_should_do`. +- Integration tests are preferred when a change spans repositories, + EF Core, specifications, or Unit of Work behaviour. +- If the review cannot confirm appropriate verification, call out the + gap explicitly. Relevant validation commands often include: +- `dotnet build Ploch.Data.slnx` +- `dotnet test` +- `dotnet build Ploch.Data.slnx -p:UsePlochProjectReferences=true` +- `dotnet build Ploch.Data.SampleApp.slnx` + +## Documentation And Release Hygiene + +- All public types and members should have XML documentation comments + with the appropriate tags for the member kind. +- XML documentation should be clear, accurate, and written in British + English. +- For public methods and non-obvious APIs, expect ``, + ``, ``, ``, and `` tags where + appropriate. +- Public API or behaviour changes should be reflected in the relevant + markdown documentation in `docs/`, package README content, or other + referenced documentation. +- User-visible features, significant fixes, and breaking changes should + update `RELEASE_NOTES.md` or the appropriate change-log material. +- Flag stale documentation describing removed or renamed APIs. + +## Commit Metadata When Visible + +- If the proposed commit message is visible to the reviewer, ensure it + follows Conventional Commits. +- Every commit should reference a GitHub issue with `Refs: #`. +- Breaking changes should be explicit in both the commit header and the + `BREAKING CHANGE:` footer. +- Do not invent missing issue references. Missing issue linkage should + be reported as a process problem. + +## Code Quality And Safety Checks + +- Prefer minimal, readable, maintainable code over clever or + over-engineered solutions. +- Always build the entire solution using `dotnet build Ploch.Data.slnx` and + make sure **there are no new warnings** produced by static code analyzers. + If there are, you need to address them. Some of them might be false positive, + in this case you can disable them temporarily in code using for example + ```csharp + #pragma warning disable CA2200 // Rethrow to preserve stack details + ... + #pragma warning restore CA2200 + ``` + + Keep in mind that there are other ways of disabling those warnings. If this + is a false positive in many places, then it might make sense to disable + it in `.editorconfig` file. + But either way, the golden rule is **THERE MUST NOT BE EVEN A SINGLE NEW WARNING**. +- Remove dead code, temporary workarounds, debug code, and commented-out + implementations unless there is a clear justification. +- Fail fast on unrecoverable errors. Silent failure, swallowed + exceptions, or low-context logging should be treated as review issues. +- Logging should use appropriate levels and enough context to diagnose + failures. Repository-style messages such as `[ModuleName] Message` + should be preferred where practical. +- Handle nullability and optionality explicitly; do not assume non-null + values without justification. +- Avoid nested ternaries and avoid introducing complexity that obscures + intent. +- Never allow real PII, secrets, connection strings, API keys, or other + sensitive data to be committed. Test and example data must be fake or + anonymised. + +## Dependency Review + +- Prefer fixed, explicit dependency versions and the centralised package + management patterns already used by the repository. +- For dependency upgrades, expect evidence that changelogs, migration + guidance, and downstream impact were considered. +- Dependency updates that change runtime behaviour, build behaviour, or + packaging should trigger corresponding test and documentation + scrutiny. + +## Do Not Waste Review Bandwidth On + +- Formatting or whitespace already enforced by `.editorconfig`, + analyzers, or formatters. +- Generic style opinions that conflict with established repository + patterns. +- Alternative designs that are merely different unless the current + change introduces real risk, inconsistency, or maintenance cost. +- Superficial suggestions that ignore package boundaries, + external-consumer behaviour, or repository conventions. + +## High-Risk Smells That Should Almost Always Be Called Out + +- Provider-specific logic added to provider-agnostic packages. +- Public API changes without tests, docs, or release note updates. +- `DbContext` changes without corresponding configuration or migration + scrutiny. +- Repository or Unit of Work changes that obscure transaction boundaries + or weaken typed IDs. +- SampleApp project file edits that bypass the + `PackageReference`-to-`ProjectReference` switching mechanism. +- Changes that only validate one build mode when both standalone and + solution-mode consumer behaviour matter. +- New public members without XML documentation. +- Behavioural changes merged without explicit verification evidence. + +## Final Review Stance + +Default to protecting long-term maintainability and external-consumer +safety. If a change is technically valid but creates architecture drift, +consumer risk, hidden breakage, or undocumented behaviour, treat it as a +real review finding rather than a minor note. diff --git a/.aiassistant/rules/agent.md b/.aiassistant/rules/agent.md new file mode 100644 index 0000000..a4b8f92 --- /dev/null +++ b/.aiassistant/rules/agent.md @@ -0,0 +1,75 @@ +--- +apply: always +--- + +# Agent Behaviour Specification + +## Pre-Code Workflow + +Before analysing, investigating, or modifying any code: + +1. Fetch relevant rules (repo/package + patterns). In Cursor, use `fetch_rules` tool. +2. Read the README and locate linked spec files and relevant documentation. +3. Review all relevant specs and docs. +4. Create a complete TODO list that includes: + - Implementation tasks + - Automated testing (unit, visual regression, e2e as appropriate) + - Manual verification step (e.g. "Manually verify changes in browser", "Test CLI command", "send request via curl") + - Updating any snapshots if they exist (e.g. visual regressions will have baseline images) + +## When to Stop and Confirm + +Stop and ask the user before implementing changes that may violate or need more information to stay compliant around: + +- **Legal or regulatory rules:** SCA, PCI-DSS, GDPR. +- **Security:** Authentication, session handling, encryption, sensitive data. +- **Business logic:** Permissions, account access, financial limits, payment flows. +- **Data access:** Queries that could expose PII or sensitive data. +- **Specification conflicts:** When the request conflicts with linked spec files. + +If unsure whether a change falls into these categories, stop and ask. + +## Post-Code Workflow + +After implementing changes, **before reporting completion**, you MUST complete BOTH: + +1. **Automated testing** — Run relevant tests (unit, integration, visual, e2e). Check project-specific rules, `package.json` scripts, or infer from context. Code compilation alone is insufficient. When working with visual regressions, make sure to update snapshots after you're happy with your changes. +2. **Manual verification** — Verify like a developer or user would. e.g. For web code, use browser MCP tools to navigate to the app, sign in if needed, and visually confirm the change works. For CLI tools, run commands. For APIs, send requests. + +**CRITICAL**: Never report completion until BOTH automated AND manual verification pass. If either cannot be performed: + +- Explicitly state which verification is blocked and why +- Ask the user how to proceed +- Do NOT mark tasks as complete — leave them as "pending verification" + +## Pull Requests + +- **Complete testing before creating PR:** Finish ALL automated and manual verification BEFORE creating a pull request. A PR signals the work is ready for review. +- **PR body must follow template:** When creating a PR, read `.github/pull_request_template.md` first (if it exists) and structure the body accordingly. Include ticket links, remove inapplicable sections (e.g. incident links for non-incidents), and add developer testing notes. +- **Never create a placeholder PR:** Only create a PR when implementation and all verification steps are complete. + +### CI Check Gate (Mandatory) + +After pushing changes or creating/updating a PR, you **must** monitor CI checks and resolve any failures before considering the work complete: + +1. **Observe checks:** After pushing, use `gh pr checks --watch` (or `gh run list` / `gh run view`) to monitor the status of all CI checks (build, test, SonarCloud, etc.). +2. **On failure — investigate:** If any check fails, retrieve the logs (`gh run view --log-failed`) to identify the root cause. Do not guess — read the actual failure output. +3. **Fix and push:** Make the necessary code changes to resolve the failure, commit with an appropriate conventional commit message, and push again. +4. **Re-observe:** After pushing the fix, monitor the checks again. Repeat the investigate-fix-push cycle until **all checks pass**. +5. **PR comments:** After checks pass, also review any automated PR comments (e.g. SonarCloud quality gate, Codacy, bot feedback). If they flag issues that should be addressed, fix those too. +6. **Only then declare complete:** Work is not done until all CI checks are green and automated PR feedback has been addressed. + +**Do not:** + +- Ignore or dismiss failing checks. +- Mark work as complete while checks are still running or failing. +- Assume a failure is "flaky" without evidence — investigate first. +- Push multiple speculative fixes without reading the failure logs. + +## Standards + +- Use British English. +- Run commands yourself. +- Clean up after modifications. +- Use browser MCPs if available when testing web code. +- **Never amend commits** unless the user explicitly asks. Always create new commits. diff --git a/.aiassistant/rules/branch-naming.md b/.aiassistant/rules/branch-naming.md new file mode 100644 index 0000000..3e621fb --- /dev/null +++ b/.aiassistant/rules/branch-naming.md @@ -0,0 +1,41 @@ +--- +apply: always +--- + +# Branch Naming Standards + +## Pattern + +``` +/- +``` + +## Change Types + +| Type | When | +|------|------| +| `feature` | New feature or capability | +| `fix` | Bug fix | +| `chore` | Maintenance, config, housekeeping | +| `refactor` | Code restructuring without behaviour change | +| `docs` | Documentation only | +| `test` | Adding or updating tests only | +| `perf` | Performance improvement | +| `ci` | CI/CD pipeline changes | +| `build` | Build system changes | + +## Rules + +- `` is the GitHub issue number (digits only, no `#` prefix). +- `` is lowercase, hyphen-separated, max 5 words. Summarise the change, not the issue title verbatim. +- Always derive the change type from the nature of the work, not the issue label alone. +- If the issue has no clear type from labels, infer from the title and description. + +## Examples + +- `feature/72-dbcontext-creation-lifecycle-plugins` +- `fix/187-duplicate-entity-concurrent-upsert` +- `chore/210-nbgv-versioning-fetch-depth` +- `refactor/205-extract-shared-audit-logic` +- `docs/215-update-serialization-readme` +- `test/220-add-repository-edge-case-tests` diff --git a/.aiassistant/rules/code-quality.md b/.aiassistant/rules/code-quality.md new file mode 100644 index 0000000..4f96943 --- /dev/null +++ b/.aiassistant/rules/code-quality.md @@ -0,0 +1,18 @@ +--- +apply: always +--- + +# Code Quality Standards + +- Write minimal, readable, maintainable code. +- Split responsibilities across modules following existing conventions. +- Remove unused code. +- Minimise state; derive values when possible. +- Handle all possibilities; don't assume optionality. +- Error handling: fail fast on unrecoverable errors; no silent failures. Always log. For user-initiated actions, show user feedback. +- Comments: explain "why" for non-obvious logic. +- Logging: Use appropriate levels - error for unrecoverable failures, warn for recoverable issues with fallbacks, info for important state changes, debug for logic flow (not spammy). Include context in messages. Format: `[ModuleName] Message`. +- Maintain backward compatibility for stored state; implement migrations when required. +- Clean up local data on logout. +- Avoid nested ternaries. +- Never commit PII or potential PII to source code (names, emails, phone numbers, addresses, etc.). Use anonymised or fake data for tests and examples. diff --git a/.aiassistant/rules/commits.md b/.aiassistant/rules/commits.md new file mode 100644 index 0000000..a6d0f2c --- /dev/null +++ b/.aiassistant/rules/commits.md @@ -0,0 +1,138 @@ +--- +apply: always +--- + +# Commit Message Standards + +All commit messages **must** follow the [Conventional Commits](https://www.conventionalcommits.org/) specification. + +## Format + +```text +(): + + + +[BREAKING CHANGE: ] +Refs: # +``` + +## Structure Rules + +- **Header** (`(): `): Required. Max 72 characters. +- **Body**: Include when the change is non-trivial. Briefly describe *what* changed and *why*. Wrap at 72 characters. +- **Footer**: Always include `Refs: #`. This is **mandatory** — every commit must reference a GitHub issue. See [Associated issue](#associated-issue) for how to find the right issue number. Do not fabricate issue numbers. +- **Breaking changes**: If any change breaks backward compatibility (public API signature change, removed/renamed public member, configuration key change, behavioural contract change), add a `BREAKING CHANGE:` footer with a description of what consumers must change. Also add `!` after the type/scope in the header: `feat(api)!: ...`. + +## Types + +| Type | When to use | +| ---------- | ---------------------------------------------------- | +| `feat` | New feature or capability | +| `fix` | Bug fix | +| `docs` | Documentation only | +| `style` | Formatting, whitespace, semicolons — no logic change | +| `refactor` | Code restructuring without behaviour change | +| `perf` | Performance improvement | +| `test` | Adding or updating tests | +| `build` | Build system, CI, or dependency changes | +| `chore` | Maintenance tasks (tooling, config, housekeeping) | +| `ci` | CI/CD pipeline changes | +| `revert` | Reverting a previous commit | + +## Scope + +- Use the **project or module name** affected (e.g. `common`, `data`, `lists-api`, `solution`, `ci`). +- For changes spanning the entire repo or solution, use `solution` or the repo short name. +- Keep scope lowercase, hyphen-separated if multi-word. + +## Subject Line + +- Use **imperative mood** ("Add feature", not "Added feature" or "Adds feature"). +- Start with a capital letter. +- No trailing period. + +## Detecting Breaking Changes + +Before writing the commit message, analyse the staged changes for: + +- Removed or renamed public types, methods, properties, or interfaces. +- Changed method signatures (parameter types, return types, parameter order). +- Removed or renamed configuration keys, environment variables, or connection string names. +- Changed default behaviour that existing consumers rely on. +- Removed or renamed NuGet package IDs. +- Changed serialisation format of persisted data. + +If any of these are detected, the commit **must** include the `BREAKING CHANGE:` footer. + +## Associated Issue + +Every commit **must** include a `Refs: #` footer linking to a GitHub issue. Follow this lookup order: + +1. **Check the open PR** for the current branch (`gh pr view`). If the PR body or linked issues reference an issue, use that. +2. **Search repository issues** (`gh issue list` or the GitHub MCP tools) for an existing issue that matches the change. If there is a clear candidate, use it — and if there is an open PR without an issue link, associate the issue with the PR. +3. **Ask the user** if no matching issue is found. The user may want to create a new issue for the changes. Do not guess or omit the `Refs` footer — always ask rather than commit without an issue reference. + +## Examples + +### Simple feature + +```text +feat(common): Add StringExtensions.ContainsAny method + +Added a new extension method that checks whether a string contains +any of the specified substrings. + +Refs: #162 +``` + +### Breaking change + +```text +chore(solution)!: Update ContainsAny namespace + +Moved the public API method Strings.ContainsAny to the +StringExtensions class under a new namespace. + +BREAKING CHANGE: Ploch.Common.Strings.ContainsAny moved to +Ploch.Common.Extensions.StringExtensions.ContainsAny. Update +using directives accordingly. +Refs: #162 +``` + +### Bug fix + +```text +fix(data): Prevent duplicate entity on concurrent upsert + +Added optimistic concurrency check in the upsert path to avoid +inserting a duplicate when two requests race on the same key. + +Refs: #187 +``` + +### Multi-scope refactor + +```text +refactor(solution): Extract shared audit timestamp logic + +Moved SetAuditTimestamps from individual DbContext overrides into +a shared base class to reduce duplication across Data projects. + +Refs: #205 +``` + +### Change Log updates + +If a commit contains information that should go to the change log, make sure you put it there. Don't put things like styling changes or minor things there. This is especially important for the breaking changes and new features. + +### CI/build change + +```text +ci(github-actions): Add fetch-depth 0 for NBGV versioning + +NBGV requires full git history to calculate commit height. +Updated all checkout steps across workflows. + +Refs: #210 +``` diff --git a/.aiassistant/rules/data-access.md b/.aiassistant/rules/data-access.md new file mode 100644 index 0000000..d41c3b1 --- /dev/null +++ b/.aiassistant/rules/data-access.md @@ -0,0 +1,499 @@ +--- +apply: always +--- + +# Data Access Standards + +Rules for consuming `Ploch.Data.GenericRepository` libraries in MrPloch projects. Covers repository injection, Unit of Work usage, Specification pattern, and testing. For DbContext and entity configuration setup, see `data-project.md`. For entity design, see `domain-model.md`. + +## Repository Interface Hierarchy + +The `Ploch.Data.GenericRepository` package provides a layered interface hierarchy. Choose the most restrictive interface that satisfies the consumer's needs: + +| Interface | Purpose | Use When | +|-----------|---------|----------| +| `IQueryableRepository` | Exposes `IQueryable Entities` and `GetPageQuery()` | Direct LINQ access needed (rare, prefer Specification) | +| `IReadRepositoryAsync` | Read operations without typed ID: `GetAllAsync()`, `FindFirstAsync()`, `CountAsync()`, `GetPageAsync()` | Reading entities where ID type does not matter | +| `IReadRepositoryAsync` | Adds `GetByIdAsync(TId id, ...)` | Reading entities by typed primary key | +| `IWriteRepositoryAsync` | `AddAsync()`, `UpdateAsync()`, `DeleteAsync()` | Write-only access (uncommon) | +| `IReadWriteRepositoryAsync` | Combines read + write | Full CRUD access to a single entity type | + +**Constraint:** All entities used with repositories **must** implement `IHasId` from `Ploch.Data.Model`. + +## Choosing Between Repository and Unit of Work + +### Direct Repository Injection + +Inject `IReadRepositoryAsync` or `IReadWriteRepositoryAsync` directly when operating on a **single entity type** with no cross-entity transactional requirements: + +```csharp +public class ListProfilesUseCase(IReadRepositoryAsync profileRepository) +{ + public async Task> ExecuteAsync(CancellationToken ct = default) + { + return await profileRepository.GetAllAsync(cancellationToken: ct); + } +} +``` + +- Prefer `IReadRepositoryAsync` for read-only consumers. +- Prefer `IReadWriteRepositoryAsync` only when the consumer needs both read and write on that entity. + +### Unit of Work Injection + +Inject `IUnitOfWork` when: + +- **Multiple entity types** must be modified in a single atomic transaction. +- The consumer needs to **commit or rollback** explicitly. +- You want to **retrieve repositories dynamically** by entity type. + +```csharp +public class CreateProfileUseCase(IUnitOfWork unitOfWork) +{ + public async Task ExecuteAsync(CreateProfileRequest request, CancellationToken ct = default) + { + var profileRepo = unitOfWork.Repository(); + var tagRepo = unitOfWork.Repository(); + + var profile = new SystemProfile { Name = request.Name }; + await profileRepo.AddAsync(profile, ct); + + foreach (var tagName in request.Tags) + { + await tagRepo.AddAsync(new SystemProfileTag { Name = tagName }, ct); + } + + await unitOfWork.CommitAsync(ct); + return profile.Id; + } +} +``` + +### IUnitOfWork API + +```csharp +public interface IUnitOfWork : IDisposable +{ + IReadWriteRepositoryAsync Repository() + where TEntity : class, IHasId; + + TRepository Repository() + where TRepository : IReadWriteRepositoryAsync + where TEntity : class, IHasId; + + Task CommitAsync(CancellationToken cancellationToken = default); + Task RollbackAsync(CancellationToken cancellationToken = default); +} +``` + +## Read Operations + +### GetAllAsync + +Retrieve all entities, optionally with a filter predicate: + +```csharp +var allProfiles = await repository.GetAllAsync(cancellationToken: ct); +var activeProfiles = await repository.GetAllAsync(p => p.IsActive, cancellationToken: ct); +``` + +### GetByIdAsync + +Retrieve a single entity by typed primary key: + +```csharp +var profile = await repository.GetByIdAsync(profileId, cancellationToken: ct); +if (profile is null) + return Result.NotFound(); +``` + +### FindFirstAsync + +Find the first entity matching a predicate. Use the `onDbSet` parameter for eager loading: + +```csharp +var existing = await repository.FindFirstAsync( + s => s.Name == serviceName, + cancellationToken: ct); +``` + +### GetPageAsync + +Paginated queries with optional sorting and filtering: + +```csharp +var page = await repository.GetPageAsync( + pageNumber: 1, + pageSize: 20, + sortBy: p => p.Name, + query: p => p.IsActive, + cancellationToken: ct); +``` + +### CountAsync + +Count entities with optional filter: + +```csharp +var total = await repository.CountAsync(p => p.IsActive, ct); +``` + +### Eager Loading via onDbSet + +Several read methods accept a `Func, IQueryable>? onDbSet` parameter to apply `Include()` calls: + +```csharp +var profile = await repository.GetByIdAsync( + profileId, + onDbSet: q => q.Include(p => p.Tags).Include(p => p.Categories), + cancellationToken: ct); +``` + +## Write Operations + +All write operations are performed through repositories. Changes are persisted either implicitly (direct repository injection) or explicitly (via `IUnitOfWork.CommitAsync()`). + +### Add + +```csharp +var entity = new SystemProfile { Name = "New Profile" }; +await repository.AddAsync(entity, ct); +// entity.Id is populated after save +``` + +### AddRange + +```csharp +var entities = new[] { new Tag { Name = "A" }, new Tag { Name = "B" } }; +await repository.AddRangeAsync(entities, ct); +``` + +### Update + +```csharp +var existing = await repository.GetByIdAsync(id, ct); +existing.Name = "Updated Name"; +await repository.UpdateAsync(existing, ct); +``` + +### Delete + +By entity or by ID: + +```csharp +await repository.DeleteAsync(entity, ct); +await repository.DeleteAsync(entityId, ct); +``` + +### Upsert Pattern + +Check-then-add-or-update when a unique constraint exists beyond the primary key: + +```csharp +var existing = await repository.FindFirstAsync(s => s.Name == name, cancellationToken: ct); +if (existing != null) +{ + existing.Value = newValue; + await repository.UpdateAsync(existing, ct); +} +else +{ + await repository.AddAsync(new Entity { Name = name, Value = newValue }, ct); +} +await unitOfWork.CommitAsync(ct); +``` + +## Specification Pattern (Ardalis.Specification) + +Use Specifications to encapsulate reusable, composable query logic. Prefer Specifications over inline LINQ predicates for queries that include eager loading or complex filtering. + +### Required Packages + +- `Ardalis.Specification` — base types +- `Ploch.Data.GenericRepository.EFCore` — integrates Specification with repositories + +### Single vs Multiple Result Specifications + +```csharp +// Returns multiple results +public class ProfileSearchSpecification : Specification +{ + public ProfileSearchSpecification(string? nameContains, IEnumerable? tags) + { + Query.Include(x => x.Tags) + .Where(x => x.Name.Contains(nameContains!), nameContains is not null) + .Where(p => p.Tags!.Any(t => tags!.Contains(t.Name)), tags is not null && tags.Any()); + } +} + +// Returns single result +public class GetProfileByIdOrNameSpecification : SingleResultSpecification +{ + public GetProfileByIdOrNameSpecification(int? id, string? name) + { + Query.Include(p => p.Tags) + .Include(p => p.Actions!) + .ThenInclude(a => a.ApplicationMatching) + .Where(p => p.Name.Equals(name), name is not null) + .Where(p => p.Id == id!.Value, id.HasValue); + } +} +``` + +### Consuming Specifications + +```csharp +// Multiple results +var profiles = await repository.GetAllBySpecificationAsync( + new ProfileSearchSpecification(nameFilter, tagFilter), ct); + +// Single result +var profile = await repository.GetBySpecificationAsync( + new GetProfileByIdOrNameSpecification(id, name), ct); +``` + +### Specification Guidelines + +- Inherit from `Specification` for multi-result queries. +- Inherit from `SingleResultSpecification` for queries expected to return zero or one result. +- Use conditional `Where` clauses with the boolean overload: `.Where(predicate, condition)`. +- Include related entities via `Query.Include()` and `ThenInclude()`. +- Keep Specifications in a `Specifications` folder/namespace within the consuming project (e.g. `UseCases/Specifications/`). + +## DI Registration + +### Standard Registration + +In the Data project's `ServiceCollectionRegistrations` class, call `AddRepositories()` to register all repository interfaces and `IUnitOfWork`: + +```csharp +public static IServiceCollection AddDataServices( + this IServiceCollection services, + Action configureOptions, + IConfiguration configuration) +{ + return services + .AddDbContext<{Product}DbContext>(configureOptions) + .AddRepositories<{Product}DbContext>(configuration); +} +``` + +This single call registers: + +- `IQueryableRepository` as `QueryableRepository` +- `IReadRepositoryAsync` as `ReadRepositoryAsync` +- `IReadRepositoryAsync` as `ReadRepositoryAsync` +- `IReadWriteRepositoryAsync` as `ReadWriteRepositoryAsync` +- `IUnitOfWork` as `UnitOfWork` + +### ServicesBundle Registration + +For applications using the `ServicesBundle` pattern from `ploch-common`, inherit from `GenericRepositoriesServicesBundle`: + +```csharp +public class MyDataBundle : GenericRepositoriesServicesBundle +{ + protected override Action GetOptionsBuilderAction(IConfiguration? configuration) + { + return options => options.UseSqlite( + configuration.RequiredNotNull().GetConnectionString("DefaultConnection")); + } +} +``` + +Register in application startup: + +```csharp +services.AddServicesBundle(new MyDataBundle(), configuration); +``` + +### Custom Repository Registration + +When extending the base repository with domain-specific logic, register the custom type explicitly: + +```csharp +public class CustomListsRepository(DbContext dbContext, IAuditEntityHandler auditEntityHandler) + : ReadWriteRepositoryAsync(dbContext, auditEntityHandler) +{ + public override async Task UpdateAsync(List entity, CancellationToken ct = default) + { + // Custom logic before update + await base.UpdateAsync(entity, ct); + } +} + +// Registration +services.AddScoped, CustomListsRepository>(); +``` + +## Error Handling + +Repository operations throw specific exceptions from `Ploch.Data.GenericRepository`: + +| Exception | When | Handling | +|-----------|------|----------| +| `DataAccessException` | Read operation failure | Log and return error result | +| `DataUpdateException` | `CommitAsync()` or write failure | Log and return error result | +| `DataUpdateConcurrencyException` | Optimistic concurrency violation | Retry or notify user | + +```csharp +try +{ + await unitOfWork.CommitAsync(ct); +} +catch (DataUpdateException ex) +{ + logger.LogError(ex, "Failed to save profile"); + return Result.Error(ex.Message); +} +``` + +## Testing with Repositories + +### Integration Test Setup + +Use a shared SQLite in-memory connection for integration tests. SQLite in-memory matches the real relational provider behaviour (foreign keys, indexes, transactions, migrations) that the EF Core InMemory provider does not simulate. A single shared connection keeps the database alive for the lifetime of the test and is re-used by every `DbContext` instance created within it. + +```csharp +public abstract class RepositoryTestFixture : IAsyncDisposable +{ + private readonly SqliteConnection _connection; + protected readonly MyDbContext DbContext; + + protected RepositoryTestFixture() + { + _connection = new SqliteConnection("Data Source=:memory:"); + _connection.Open(); + + var options = new DbContextOptionsBuilder() + .UseSqlite(_connection) + .Options; + + DbContext = new MyDbContext(options); + DbContext.Database.EnsureCreated(); + } + + protected IReadWriteRepositoryAsync GetRepository() + where TEntity : class, IHasId + { + return new ReadWriteRepositoryAsync(DbContext); + } + + public async ValueTask DisposeAsync() + { + await DbContext.DisposeAsync(); + await _connection.DisposeAsync(); + } +} +``` + +> The `Ploch.Data.EFCore.IntegrationTesting` package already provides `DbContextServicesRegistrationHelper` and `DataIntegrationTest` base classes that wire this up — prefer those when writing tests inside this repository. The snippet above is the standalone equivalent for external consumers. + +### Unit Testing with Mocks + +Mock the repository interface for unit testing use cases: + +```csharp +[Fact] +public async Task ListProfiles_ReturnsAllProfiles() +{ + var mockRepo = new Mock>(); + mockRepo.Setup(r => r.GetAllAsync(It.IsAny())) + .ReturnsAsync(new List { new() { Name = "Test" } }); + + var useCase = new ListProfilesUseCase(mockRepo.Object); + var result = await useCase.ExecuteAsync(); + + Assert.Single(result); +} +``` + +### Testing with IUnitOfWork + +```csharp +[Fact] +public async Task CreateProfile_CommitsSuccessfully() +{ + var mockUow = new Mock(); + var mockRepo = new Mock>(); + mockUow.Setup(u => u.Repository()).Returns(mockRepo.Object); + mockUow.Setup(u => u.CommitAsync(It.IsAny())).ReturnsAsync(1); + + var useCase = new CreateProfileUseCase(mockUow.Object); + var result = await useCase.ExecuteAsync(new CreateProfileRequest("Test")); + + mockRepo.Verify(r => r.AddAsync(It.IsAny(), It.IsAny()), Times.Once); + mockUow.Verify(u => u.CommitAsync(It.IsAny()), Times.Once); +} +``` + +## Application Layer Patterns + +### Use Case Pattern + +Encapsulate a single business operation in a use case class. Inject the narrowest repository interface needed: + +```csharp +public class GetProfileDetailsUseCase( + IReadRepositoryAsync profileRepository, + ILogger logger) +{ + public async Task> ExecuteAsync(int profileId, CancellationToken ct = default) + { + try + { + var profile = await profileRepository.GetByIdAsync(profileId, ct); + if (profile is null) + return Result.NotFound(); + return Result.Success(MapToDetails(profile)); + } + catch (DataAccessException ex) + { + logger.LogError(ex, "Error retrieving profile {ProfileId}", profileId); + return Result.Error(ex.Message); + } + } +} +``` + +### Storage/Service Pattern + +For infrastructure services that persist state, inject `IUnitOfWork`: + +```csharp +public class DbStateStorage(IUnitOfWork unitOfWork) : IStateStorage +{ + public async Task SaveStateAsync(IEnumerable services, CancellationToken ct = default) + { + var repository = unitOfWork.Repository(); + + foreach (var service in services) + { + var existing = await repository.FindFirstAsync(s => s.Name == service.Name, cancellationToken: ct); + if (existing != null) + { + existing.Status = service.Status; + await repository.UpdateAsync(existing, ct); + } + else + { + await repository.AddAsync(MapToEntity(service), ct); + } + } + + await unitOfWork.CommitAsync(ct); + } +} +``` + +## Quick Reference + +| I want to... | Use | +|--------------|-----| +| Read entities | `IReadRepositoryAsync` | +| Read + write a single entity type | `IReadWriteRepositoryAsync` | +| Write across multiple entity types atomically | `IUnitOfWork` | +| Encapsulate a complex query | `Specification` or `SingleResultSpecification` | +| Register all repositories | `services.AddRepositories(configuration)` | +| Register with ServicesBundle | Inherit `GenericRepositoriesServicesBundle` | +| Add custom repository logic | Inherit `ReadWriteRepositoryAsync` | diff --git a/.aiassistant/rules/data-project.md b/.aiassistant/rules/data-project.md new file mode 100644 index 0000000..7617b31 --- /dev/null +++ b/.aiassistant/rules/data-project.md @@ -0,0 +1,235 @@ +--- +apply: always +--- + +# Data Project Standards + +Rules for creating and structuring EF Core Data projects in MrPloch repositories. A Data project contains the `DbContext`, entity type configurations, and DI registration for a domain model. + +## Project Structure + +``` +src/ + Data/ + Configurations/ + {Entity}Configuration.cs # One per entity + {Product}DbContext.cs # The DbContext class + ServiceCollectionRegistrations.cs # DI extension methods + Ploch.{Product}.Data.csproj # Project file +``` + +- **Project name:** `Ploch.{Product}.Data` (e.g. `Ploch.Lists.Data`, `Ploch.Tools.SystemProfiles.Data`). +- **Namespace:** `Ploch.{Product}.Data`. +- Entity configurations go in a `Configurations` subfolder with namespace `Ploch.{Product}.Data.Configurations`. +- The project **must** reference the corresponding Model project (`Ploch.{Product}.Model` or `Ploch.{Product}.Domain.Db`). + +## Project File (.csproj) + +Required package references: + +- `Microsoft.EntityFrameworkCore` — always required. +- `Microsoft.EntityFrameworkCore.Relational` — if using relational-specific features (e.g. `HasConversion`, `HasIndex`). +- `Microsoft.EntityFrameworkCore.Tools` — if EF Core migrations will be managed in this project (set `PrivateAssets=all`). + +Optional references: + +- `Ploch.Data.GenericRepository.EFCore` or `Ploch.Data.EFCore` — for generic repository and Unit of Work integration. +- `Microsoft.EntityFrameworkCore.Proxies` — only if lazy loading proxies are required. + +## DbContext Class + +### Naming + +- Name the class `{Product}DbContext` (e.g. `ListsDbContext`, `SystemProfilesDbContext`, `EditorConfigDbContext`). + +### Constructors + +```csharp +public class {Product}DbContext : DbContext +{ + protected {Product}DbContext() + { } + + public {Product}DbContext(DbContextOptions<{Product}DbContext> options) : base(options) + { } +} +``` + +- Include a `protected` parameterless constructor for EF Core tooling (migrations, design-time factory). +- The primary constructor takes `DbContextOptions` — always use the strongly-typed generic variant, not `DbContextOptions`. +- If the project uses ASP.NET Identity, inherit from `IdentityDbContext` instead of `DbContext`. + +### DbSet Properties + +- Declare a `DbSet` property for **every entity** that should be directly queryable. +- Use plural names for DbSet properties (e.g. `Lists`, `ListItems`, `SystemProfiles`). +- Include DbSet properties for derived types in TPH hierarchies if they need to be queried directly. + +```csharp +public DbSet Lists { get; set; } +public DbSet ListItems { get; set; } +``` + +### OnModelCreating + +- **Always** use assembly scanning — do not configure entities inline. +- Call `base.OnModelCreating()` after applying configurations (required when inheriting from `IdentityDbContext`; good practice for plain `DbContext`). + +```csharp +protected override void OnModelCreating(ModelBuilder modelBuilder) +{ + modelBuilder.ApplyConfigurationsFromAssembly(typeof({Product}DbContext).Assembly); + base.OnModelCreating(modelBuilder); +} +``` + +### Audit Timestamp Tracking + +If any entities implement `IHasAuditProperties` or `IHasAuditTimeProperties`, override `SaveChanges` and `SaveChangesAsync` to automatically set timestamps: + +```csharp +public override int SaveChanges() +{ + SetAuditTimestamps(); + return base.SaveChanges(); +} + +public override Task SaveChangesAsync(CancellationToken cancellationToken = default) +{ + SetAuditTimestamps(); + return base.SaveChangesAsync(cancellationToken); +} + +private void SetAuditTimestamps() +{ + var now = DateTimeOffset.UtcNow; + foreach (var entry in ChangeTracker.Entries()) + { + switch (entry.State) + { + case EntityState.Added: + entry.Entity.CreatedTime = now; + entry.Entity.ModifiedTime = now; + break; + case EntityState.Modified: + entry.Entity.ModifiedTime = now; + break; + } + } +} +``` + +## Entity Type Configurations + +### One Class Per Entity + +- Create one configuration class per entity implementing `IEntityTypeConfiguration`. +- Name the class `{Entity}Configuration` (e.g. `ListConfiguration`, `ProjectConfiguration`). +- Mark the class as `internal` — configurations are implementation details of the Data project. + +```csharp +internal class ListConfiguration : IEntityTypeConfiguration +{ + public void Configure(EntityTypeBuilder builder) + { + // Configuration here + } +} +``` + +### What to Configure + +**Always configure in Fluent API (in the configuration class):** + +- Relationships (`HasOne`, `HasMany`, `WithOne`, `WithMany`). +- Delete behaviour (`OnDelete`) — always set explicitly; do not rely on EF Core conventions. +- Discriminators for TPH inheritance (`HasDiscriminator`). +- Indexes (`HasIndex`). +- Many-to-many join tables (`HasMany(...).WithMany(...)`). +- Enum-to-string conversions (`HasConversion()`). + +**Prefer Data Annotations on the entity (in the Model project):** + +- `[Key]` for primary keys (when not following EF Core naming conventions). +- `[Required]` for required properties. +- `[MaxLength]` for string length constraints. + +**Do not duplicate** — if a constraint is expressed via a Data Annotation on the entity, do not repeat it in the Fluent API configuration. + +### Relationship Configuration Patterns + +```csharp +// One-to-many +builder.HasMany(e => e.Items) + .WithOne(e => e.List) + .OnDelete(DeleteBehavior.Cascade); + +// Many-to-many +builder.HasMany(e => e.Tags) + .WithMany(e => e.SystemProfiles); + +// Optional relationship +builder.HasOne(e => e.Parent) + .WithMany(e => e.Children) + .IsRequired(false); + +// Self-referential hierarchy (for entities implementing IHierarchicalParentChildrenComposite) +builder.HasOne(e => e.Parent) + .WithMany(e => e.Children) + .IsRequired(false); +``` + +### TPH Discriminator Pattern + +For entity inheritance hierarchies, configure the discriminator in the base entity's configuration: + +```csharp +builder.HasDiscriminator("discriminator_column") + .HasValue(nameof(DerivedTypeA)) + .HasValue(nameof(DerivedTypeB)); +``` + +### Enum Conversion Pattern + +Store enums as strings for readability: + +```csharp +builder.Property(e => e.Status) + .IsRequired() + .HasConversion() + .HasMaxLength(32); +``` + +## DI Registration + +- Create a static class `ServiceCollectionRegistrations` (or `ServiceCollectionRegistration`) with extension methods. +- Register the DbContext and optionally the generic repositories from `ploch-data`. + +```csharp +public static class ServiceCollectionRegistrations +{ + public static IServiceCollection AddDataServices( + this IServiceCollection services, + Action configureOptions, + IConfiguration configuration) + { + return services + .AddDbContext<{Product}DbContext>(configureOptions) + .AddRepositories<{Product}DbContext>(configuration); + } +} +``` + +- The `AddRepositories()` method comes from `Ploch.Data.GenericRepository.EFCore` and registers the generic repository and Unit of Work. See `data-access.md` for the full repository and Unit of Work consumption patterns. +- If generic repositories are not needed, register just the DbContext. + +## Naming Summary + +| Item | Naming Pattern | Example | +|------|---------------|---------| +| Project | `Ploch.{Product}.Data` | `Ploch.Lists.Data` | +| DbContext | `{Product}DbContext` | `ListsDbContext` | +| Configuration | `{Entity}Configuration` | `ListConfiguration` | +| DI class | `ServiceCollectionRegistrations` | — | +| DI method | `Add{Product}DataServices` or `AddDataServices` | `AddDataServices` | +| DbSet property | Plural entity name | `Lists`, `ListItems` | diff --git a/.aiassistant/rules/data-provider-project.md b/.aiassistant/rules/data-provider-project.md new file mode 100644 index 0000000..f7608d5 --- /dev/null +++ b/.aiassistant/rules/data-provider-project.md @@ -0,0 +1,269 @@ +--- +apply: always +--- + +# Database Provider Project Standards + +Rules for creating provider-specific Data projects (SQLite, SQL Server) in MrPloch repositories. These projects sit alongside the base Data project (see `data-project.md`) and contain the design-time factory, connection string configuration, migrations, and helper scripts. + +## Project Structure + +```text +src/ + Data.SQLite/ # or Data.SqlServer/ + Migrations/ + {Timestamp}_Initial.cs # Generated by EF Core + {Timestamp}_Initial.Designer.cs # Generated by EF Core + {Product}DbContextModelSnapshot.cs # Generated by EF Core + {Product}DbContextFactory.cs # Design-time factory + appsettings.json # Connection string for migrations tooling + recreate-migrations.ps1 # Deletes and recreates migrations + update-database.ps1 # Applies migrations to local DB + recreate-migrations-update-database.ps1 # Deletes DB, recreates migrations, updates DB + Ploch.{Product}.Data.SQLite.csproj # Project file +``` + +## Naming Conventions + +| Provider | Directory Name | Project Name | Namespace | +|----------|---------------|-------------|-----------| +| SQLite | `Data.SQLite` | `Ploch.{Product}.Data.SQLite` | `Ploch.{Product}.Data.SQLite` | +| SQL Server | `Data.SqlServer` | `Ploch.{Product}.Data.SqlServer` | `Ploch.{Product}.Data.SqlServer` | + +- The factory class is always named `{Product}DbContextFactory` — same name in both provider projects, differentiated by namespace. + +## Project File (.csproj) + +### Required References + +Every provider project needs: + +- A project reference to the base Data project (`Ploch.{Product}.Data`). +- A project reference to the provider-specific factory base from `ploch-data`. +- `Microsoft.EntityFrameworkCore.Design` with `PrivateAssets=all` — required for migrations tooling. +- `Microsoft.EntityFrameworkCore.Tools` with `PrivateAssets=all` — required for `dotnet ef` commands. + +### SQLite Project + +```xml + + + net9.0 + enable + enable + + + + + + + + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + + + + PreserveNewest + + + +``` + +### SQL Server Project + +Same as SQLite but additionally reference the SQL Server provider package and the `ploch-data` SQL Server factory: + +```xml + + + + + + + + + + +``` + +## DbContextFactory Class + +### Factory Inheritance + +The factory must inherit from the provider-specific base class in `ploch-data`: + +| Provider | Base Class | Package | +|----------|-----------|---------| +| SQLite | `SqLiteDbContextFactory` | `Ploch.Data.EFCore.SqLite` | +| SQL Server | `SqlServerDbContextFactory` | `Ploch.Data.EFCore.SqlServer` | +| Other | `BaseDbContextFactory` | `Ploch.Data.EFCore` | + +The `TFactory` type parameter must be the concrete factory class itself — this enables `ApplyMigrationsAssembly` to resolve the correct assembly for migrations. + +### SQLite Factory (Preferred Style) + +When using the provider-specific base class, the factory can be a single-line primary constructor class because `SqLiteDbContextFactory` already implements `ConfigureOptions`: + +```csharp +using Ploch.Data.EFCore.SqLite; + +namespace Ploch.{Product}.Data.SQLite; + +public class {Product}DbContextFactory() + : SqLiteDbContextFactory<{Product}DbContext, {Product}DbContextFactory>(options => new(options)); +``` + +### SQL Server Factory (Preferred Style) + +```csharp +using Ploch.Data.EFCore.SqlServer; + +namespace Ploch.{Product}.Data.SqlServer; + +public class {Product}DbContextFactory() + : SqlServerDbContextFactory<{Product}DbContext, {Product}DbContextFactory>(options => new(options)); +``` + +### Custom Configuration (When Needed) + +If the provider-specific base class does not cover your needs, override `ConfigureOptions` and inherit from `BaseDbContextFactory` directly: + +```csharp +using Microsoft.EntityFrameworkCore; +using Ploch.Data.EFCore; + +namespace Ploch.{Product}.Data.SQLite; + +public class {Product}DbContextFactory : BaseDbContextFactory<{Product}DbContext, {Product}DbContextFactory> +{ + public {Product}DbContextFactory() : base(options => new {Product}DbContext(options)) + { } + + protected override DbContextOptionsBuilder<{Product}DbContext> ConfigureOptions( + Func connectionStringFunc, + DbContextOptionsBuilder<{Product}DbContext> optionsBuilder) + { + return optionsBuilder.UseSqlite(connectionStringFunc(), ApplyMigrationsAssembly); + } +} +``` + +## Connection String Configuration (appsettings.json) + +Each provider project must include an `appsettings.json` with a `DefaultConnection` connection string, copied to output directory. This is used by the design-time factory for migrations tooling. + +### SQLite + +```json +{ + "ConnectionStrings": { + "DefaultConnection": "DataSource={product}.db;Cache=Shared" + } +} +``` + +### SQL Server + +```json +{ + "ConnectionStrings": { + "DefaultConnection": "Server=localhost;Database=Ploch.{Product};Integrated Security=True;TrustServerCertificate=True" + } +} +``` + +## PowerShell Migration Scripts + +Every provider project must include these three scripts. Run them from the provider project directory. + +Each script must wrap its body in `Push-Location $PSScriptRoot` / `try { ... } finally { Pop-Location }` so it remains location-independent and safe to invoke from any working directory. + +### `recreate-migrations.ps1` + +Deletes all existing migrations and creates a fresh `Initial` migration: + +```powershell +Push-Location $PSScriptRoot +try { + Remove-Item Migrations -Force -Confirm:$false -Recurse + dotnet ef migrations add Initial +} finally { + Pop-Location +} +``` + +### `update-database.ps1` + +Applies pending migrations to the local database: + +```powershell +Push-Location $PSScriptRoot +try { + dotnet ef database update +} finally { + Pop-Location +} +``` + +### `recreate-migrations-update-database.ps1` + +Deletes the local database file (SQLite) or database (SQL Server), recreates migrations, and applies them. Useful during development when the model is changing frequently. + +#### SQLite variant + +```powershell +Push-Location $PSScriptRoot +try { + Remove-Item *.db -Force -Confirm:$false -ErrorAction SilentlyContinue + ./recreate-migrations.ps1 + ./update-database.ps1 +} finally { + Pop-Location +} +``` + +#### SQL Server variant + +```powershell +Push-Location $PSScriptRoot +try { + dotnet ef database drop --force + ./recreate-migrations.ps1 + ./update-database.ps1 +} finally { + Pop-Location +} +``` + +## .gitignore + +Each provider project should include a `.gitignore` that excludes local database files: + +### SQLite (.gitignore) + +```gitignore +*.db +*.db-shm +*.db-wal +``` + +### SQL Server (.gitignore) + +No additional ignores needed (database is server-hosted). + +## Migrations + +- Migrations are generated and managed exclusively in the provider project, never in the base Data project. +- The first migration should always be named `Initial`. +- Do not manually edit generated migration files unless absolutely necessary. +- The `{Product}DbContextModelSnapshot.cs` is auto-generated — do not edit. +- The base `BaseDbContextFactory.ApplyMigrationsAssembly` method ensures EF Core looks for migrations in the provider project's assembly, not the DbContext's assembly. diff --git a/.aiassistant/rules/dependencies.md b/.aiassistant/rules/dependencies.md new file mode 100644 index 0000000..ecaeca4 --- /dev/null +++ b/.aiassistant/rules/dependencies.md @@ -0,0 +1,40 @@ +--- +apply: always +--- + +# Dependency Management Standards + +## Version Pinning + +- **Always use fixed versions** (e.g., `"lodash": "4.17.21"`, not `"^4.17.21"`). +- Applies to all dependency files including overrides and resolutions. + +## Upgrading Process + +### Pre-Upgrade Investigation + +- Read changelog/release notes between current and target versions. +- Identify breaking changes and their impact. +- Look for official migration guides and codemods. +- Check for CLI migration tools (e.g. `npx package-name migrate`). +- Note deprecated APIs and their replacements. + +### Information Sources + +- Official documentation site (highest priority). +- Repository `CHANGELOG.md`, GitHub releases. +- Migration guides at `/docs/migration`, `/MIGRATION.md`. +- Package README; community resources for complex migrations. + +### Upgrade Execution + +- Run automated tools first (codemods, CLI migrations). +- Update configuration files and type definitions. +- Update imports, API calls, deprecated methods. +- Run tests and fix breakages. +- Address linter/type errors. +- Manual verification of critical paths. + +### Post-Upgrade + +- Update README, spec, or mdc files that reference the package. diff --git a/.aiassistant/rules/documentation.md b/.aiassistant/rules/documentation.md new file mode 100644 index 0000000..fbe61e7 --- /dev/null +++ b/.aiassistant/rules/documentation.md @@ -0,0 +1,58 @@ +--- +apply: always +--- + +# Documentation Standards + +## XML Code Documentation + +For all **publicly available code** (open-source packages, public GitHub repositories): + +- Provide XML documentation comments (`///`) on all public types, methods, properties, and constructors. +- Include ``, ``, ``, ``, and `` tags as appropriate. +- Follow Microsoft's XML documentation style — review comments in Microsoft's own libraries (e.g. `System.Text.Json`, `Microsoft.Extensions.DependencyInjection`) for reference. +- Include `` blocks when usage is not 100% obvious or when there are multiple valid usage patterns. +- Use British English in documentation text. + +### What to Document + +| Member | Required Tags | +|--------|--------------| +| Public class/struct/interface | ``, optionally `` with usage guidance | +| Public method | ``, `` for each parameter, ``, `` for thrown exceptions | +| Public property | `` | +| Public constructor | ``, `` for each parameter | +| Public enum | `` on the enum and each member | + +### Example + +```csharp +/// +/// Checks whether the specified string contains any of the provided substrings. +/// +/// The string to search within. +/// The substrings to search for. +/// +/// if contains at least one +/// of the specified substrings; otherwise, . +/// +/// +/// Thrown when or is . +/// +/// +/// +/// var result = "Hello World".ContainsAny("Hello", "Goodbye"); +/// // result == true +/// +/// +public static bool ContainsAny(this string source, params string[] values) +``` + +## Markdown Documentation Pages + +- Documentation pages (README.md, docs/*.md, RELEASE_NOTES.md) **must** be kept in sync with the current code. +- When adding or modifying public APIs, update the relevant documentation pages. +- When adding new features, ensure the README or relevant doc page documents them. +- When changing behaviour, update any documentation that describes the old behaviour. +- Do not leave documentation describing removed or renamed APIs. +- Update `RELEASE_NOTES.md` or change log files for user-visible changes (new features, breaking changes, significant bug fixes). diff --git a/.aiassistant/rules/domain-model.md b/.aiassistant/rules/domain-model.md new file mode 100644 index 0000000..634f690 --- /dev/null +++ b/.aiassistant/rules/domain-model.md @@ -0,0 +1,58 @@ +--- +apply: always +--- + +# Domain Model Standards + +Domain entities in MrPloch projects are **simple POCO types** that implement interfaces from the `Ploch.Data.Model` package to standardise common property names. This enables reusable UI components, generic repository operations, and consistent API shapes across projects. + +## Core Principle + +- Entities **must** implement `Ploch.Data.Model` interfaces for any common property (`Id`, `Name`, `Title`, `Description`, `Contents`, etc.) rather than defining these properties ad-hoc. +- Entities are plain data carriers — no business logic in entity classes. + +## Required Interface Usage + +| Property | Interface | Notes | +|----------|-----------|-------| +| `Id` | `IHasId` | Every entity must implement this. Default `TId` is `int`; use `Guid` or `string` where appropriate. | +| `Name` | `INamed` | For entities with a name. Use `INamedReadOnly` if the name should not be settable. | +| `Title` | `IHasTitle` | For entities with a title. Use `IHasTitleReadOnly` for read-only. | +| `Description` | `IHasDescription` | Nullable `string?`. | +| `Contents` | `IHasContents` | Nullable `string?` for textual content. | +| `Notes` | `IHasNotes` | Nullable `string?`. | +| `Value` | `IHasValue` | For entities that hold a typed value. | + +## Audit Properties + +- Use `IHasAuditProperties` for entities that need full audit tracking (`CreatedTime`, `ModifiedTime`, `AccessedTime`, `CreatedBy`, `LastModifiedBy`, `LastAccessedBy`). +- Use `IHasAuditTimeProperties` if only timestamps are needed (no user tracking). +- Use individual interfaces (`IHasCreatedTime`, `IHasModifiedBy`, etc.) if only specific audit fields are needed. + +## Common Base Types + +- **Category entities** must inherit from `Category` (or `Category` for `int` IDs) from `Ploch.Data.Model.CommonTypes`. Do not create custom category classes from scratch — the base type provides `Id`, `Name`, `Parent`, and `Children` with the correct hierarchical structure. +- **Tag entities** must inherit from `Tag` (or `Tag` for `int` IDs) from `Ploch.Data.Model.CommonTypes`. The base type provides `Id`, `Name`, and `Description`. + +## Hierarchical Entities + +- For tree structures (parent/children of the same type), implement `IHierarchicalParentChildrenComposite`. +- For entities that only need a parent reference, use `IHierarchicalWithParent` or `IHierarchicalWithParentComposite` (self-referential). +- For entities that only need children, use `IHierarchicalWithChildren` or `IHierarchicalWithChildrenComposite` (self-referential). +- If an EF Core provider-layer project opts into lazy loading (via the lazy-loading proxies package), mark the corresponding `Parent` and `Children` navigation properties as `virtual` in that layer. The core domain model must stay provider-agnostic — do not require `virtual` purely for EF Core in repositories or applications that do not use lazy loading. + +## Categorisation and Tagging + +- Entities with categories must implement `IHasCategories` (or `IHasCategories` for non-`int` IDs). The `TCategory` type must inherit from `Category`. +- Entities with tags must implement `IHasTags` (or `IHasTags` for non-`int` IDs). The `TTag` type must inherit from `Tag`. + +## Entity Style Rules + +- Entities are **classes** (not records), unless there is a specific reason for value semantics. +- Use auto-properties with `{ get; set; }`. +- Use `= null!` for required reference-type properties (EF Core will populate them). +- Use `= []` or `= null!` for collection properties. +- Nullable properties (`string?`, `ICollection?`) for optional fields. +- Mark navigation properties as `virtual` only when a consuming project (typically the EF Core provider layer) needs lazy loading. Keep the core model free of ORM-specific requirements. +- Data Annotations from `System.ComponentModel.DataAnnotations` (`[Key]`, `[Required]`, `[MaxLength]`) are optional. Apply them on the entity only when they carry provider-agnostic meaning (e.g. validation). Relational-only constraints belong in Fluent API configurations in the Data project, not on the entity. +- Keep entities in a dedicated `Model` or `Models` namespace (e.g. `Ploch.Lists.Model`, `Ploch.EditorConfigTools.Models`). diff --git a/.aiassistant/rules/naming.md b/.aiassistant/rules/naming.md new file mode 100644 index 0000000..0042e2f --- /dev/null +++ b/.aiassistant/rules/naming.md @@ -0,0 +1,9 @@ +--- +apply: always +--- + +# Naming Standards + +- Use **camelCase** for methods and properties. +- Boolean names should begin with: `is`, `are`, `should`, `could`, `would` (e.g., `shouldLogUserOutAfterTransfer`). +- Methods must start with a verb (e.g., `removeUserFromList`). diff --git a/.aiassistant/rules/pr-descriptions.md b/.aiassistant/rules/pr-descriptions.md new file mode 100644 index 0000000..f8138c7 --- /dev/null +++ b/.aiassistant/rules/pr-descriptions.md @@ -0,0 +1,58 @@ +--- +apply: always +--- + +# Pull Request Description Standards + +## Core Rule + +Every PR **must** have a detailed description. All changes and decisions **must** be documented in the PR body. A reviewer should be able to understand the full scope and rationale without reading the code first. + +## Structure + +If a `.github/pull_request_template.md` exists in the repository, follow it. Otherwise, use this structure: + +```markdown +## Summary + +Brief description of what this PR does and why. + +## Changes + +- Bullet list of all meaningful changes +- Include file/module scope where helpful +- Group by feature or area if the PR is large + +## Design Decisions + +Document any non-obvious choices made during implementation: +- Why a particular approach was chosen over alternatives +- Trade-offs considered +- Constraints that influenced the design + +## Testing + +- What automated tests were added or modified +- What manual testing was performed +- Test coverage impact + +## Breaking Changes + +List any breaking changes and what consumers must update. +Omit this section entirely if there are no breaking changes. + +## Related + +- Closes # +- Related to # +- Depends on (if cross-repo dependency) +``` + +## Rules + +- **Link the issue:** Always include `Closes #` or `Refs #` to automatically link and (optionally) close the issue on merge. +- **Document decisions:** If you chose approach A over approach B, explain why. Reviewers and future maintainers need this context. +- **Be specific:** "Updated the data layer" is insufficient. "Added `GetBySpecificationAsync` method to `IReadRepositoryAsync` for Ardalis.Specification support" is specific. +- **Include test evidence:** Mention test counts, coverage percentages, or specific scenarios tested. +- **Update on subsequent pushes:** If you push fixes for CI failures or PR comments, update the PR description to reflect the **final** state of the changes, not the initial state. +- **No placeholder PRs:** Only create a PR when implementation and all local verification steps are complete. diff --git a/.aiassistant/rules/project-structure.md b/.aiassistant/rules/project-structure.md new file mode 100644 index 0000000..fc22360 --- /dev/null +++ b/.aiassistant/rules/project-structure.md @@ -0,0 +1,156 @@ +--- +apply: always +--- + +# Repository & Project Structure Standards + +Rules for organising .NET repositories in the MrPloch workspace. + +## Repository Root Layout + +Every repository follows this structure: + +``` +/ + src/ # All source projects + tests/ # All test projects + docs/ # Documentation (design docs, plans, specs) + scripts/ # Build, migration, and utility scripts + .github/ + workflows/ # GitHub Actions CI/CD + pull_request_template.md # PR template + .claude/ # Claude Code rules and skills + Directory.Build.props # Centralised MSBuild settings + Directory.Packages.props # Central Package Management + NuGet.Config # NuGet package sources (optional, workspace-level exists) + .editorconfig # Code style enforcement + .gitignore # Git ignore rules + .gitattributes # Git attribute rules + README.md # Repository documentation + RELEASE_NOTES.md # Release notes (library repos) + CLAUDE.md # Claude Code project instructions + *.slnx / *.sln # Solution file(s) at repository root + LICENSE # MIT licence +``` + +### Key Directories + +| Directory | Purpose | Required | +|-----------|---------|----------| +| `src/` | Source projects — one subdirectory per project | Yes | +| `tests/` | Test projects — one subdirectory per test project | Yes | +| `docs/` | Design documents, plans, specs, API references | Optional | +| `scripts/` | PowerShell/shell scripts for build, migration, repo maintenance | Optional | +| `DocumentationSite/` | DocFx-generated API documentation site | Some repos | +| `change-log/` | Per-issue/per-PR change log markdown files | Some repos | +| `samples/` | Sample/example projects demonstrating usage | Some repos | + +## Source Project Layout + +Source projects live in `src/` with a directory name that is the short project name (without the `Ploch.` prefix): + +``` +src/ + {ShortName}/ + Ploch.{Product}.{ShortName}.csproj + *.cs +``` + +### Naming Convention + +- **Directory name:** The short name without the `Ploch.` prefix, using dots for namespacing. + - Example: `src/Common.Serialization/` contains `Ploch.Common.Serialization.csproj` + - Example: `src/Data.EFCore/` contains `Ploch.Data.EFCore.csproj` + - Example: `src/Data/` contains `Ploch.Lists.Data.csproj` + - Example: `src/Model/` contains `Ploch.Lists.Model.csproj` +- **Project file:** Always prefixed with `Ploch.` — e.g. `Ploch.Common.Serialization.csproj`. +- **Namespace:** Matches the project name — e.g. `Ploch.Common.Serialization`. + +### Common Source Project Types + +For application repos (e.g. `ploch-lists`, `ploch-groupmatters`), the `src/` directory typically contains: + +| Directory | Project Name | Purpose | +|-----------|-------------|---------| +| `Model/` | `Ploch.{Product}.Model` | Domain entity POCOs | +| `Data/` | `Ploch.{Product}.Data` | DbContext, entity configurations | +| `Data.SQLite/` | `Ploch.{Product}.Data.SQLite` | SQLite provider, migrations, design-time factory | +| `Data.SqlServer/` | `Ploch.{Product}.Data.SqlServer` | SQL Server provider, migrations, design-time factory | +| `Api/` | `Ploch.{Product}.Api` | Web API host / endpoints | +| `UI/` | Various | UI application (MAUI, WinUI, etc.) | + +## Test Project Layout + +Test projects live in `tests/` mirroring the source project they test, with a `.Tests` suffix: + +``` +tests/ + {ShortName}.Tests/ + Ploch.{Product}.{ShortName}.Tests.csproj + *Tests.cs +``` + +### Naming Convention + +- **Directory name:** Source project short name + `.Tests` suffix. + - Example: `tests/Common.Serialization.Tests/` for `src/Common.Serialization/` + - Example: `tests/Data.EFCore.Tests/` for `src/Data.EFCore/` +- **Project file:** Source project name + `.Tests` — e.g. `Ploch.Common.Serialization.Tests.csproj`. +- **Integration tests** use `.IntegrationTests` suffix instead of `.Tests`. + - Example: `Ploch.Data.EFCore.IntegrationTesting.csproj` + +### Test Class Naming + +- Unit tests: `{TestedTypeName}Tests` — e.g. `StringExtensionsTests`. +- Integration tests: `{TestedFeature}Tests` — e.g. `AuthenticationTests`. + +## Solution Files + +- Solution files (`.slnx` or `.sln`) are placed at the **repository root**. +- Prefer `.slnx` (XML-based) format for new or updated solutions. Many repos maintain both `.sln` and `.slnx`. +- Name: `Ploch.{Product}.slnx` — e.g. `Ploch.Common.slnx`, `Ploch.Data.slnx`. +- Some repos have multiple solutions for different subsets (e.g. `Ploch.Common.Endpoints.slnx`, `Ploch.Common.LocalDev.slnx`). + +## Build Configuration Files + +All of these live at the **repository root**: + +| File | Purpose | +|------|---------| +| `Directory.Build.props` | Centralised MSBuild properties (nullable, lang version, analysers, test project detection, packaging) | +| `Directory.Packages.props` | Central Package Management (`ManagePackageVersionsCentrally=true`), imports shared versions from `mrploch-development/dependencies/` | +| `NuGet.Config` | Package sources (nuget.org + GitHub Packages). Workspace-level config exists at `C:\DevNet\my\mrploch\NuGet.Config` | +| `.editorconfig` | Code style and analyser severity rules | +| `stylecop.json` | StyleCop analyser configuration (some repos) | + +## Cross-Repository References + +During local development, repos reference each other via **relative `ProjectReference` paths** — all repos must be cloned as siblings under the same parent directory (`C:\DevNet\my\mrploch\`): + +```xml + + + + + +``` + +Shared build configuration is imported from the `mrploch-development` sibling directory: + +```xml + +``` + +In CI, repos consume each other as **NuGet packages** from GitHub Packages instead of `ProjectReference`. + +## GitHub Configuration + +- `.github/workflows/` — CI/CD workflows (typically `build-dotnet.yml`). +- `.github/pull_request_template.md` — PR template with description, issue link, and review checklist. +- `.github/dependabot.yml` — Dependabot configuration (some repos). + +## Files That Do NOT Belong + +- No module-level `README.md` files inside `src/` subdirectories (library projects may have package READMEs for NuGet, but no standalone module docs). +- No `CLAUDE.md` inside `src/` or `tests/` — only at the repository root. +- No test projects inside `src/` — all tests go in `tests/`. diff --git a/.aiassistant/rules/qa.md b/.aiassistant/rules/qa.md new file mode 100644 index 0000000..1db63c9 --- /dev/null +++ b/.aiassistant/rules/qa.md @@ -0,0 +1,34 @@ +--- +apply: always +--- + +# QA Testing Standards + +## Critical Rules + +- **Reject localhost URLs** – If given `localhost`, `127.0.0.1`, or `0.0.0.0`, stop and ask for a deployed URL. QA testing must be against real deployed environments. +- **Never analyse source code** – If given code snippets, refuse to review them. QA tests the running application, not the implementation. +- **Test actual deployed URL** – Don't assume local matches production. Require staging/dev/prod URLs. + +## Process + +- Create a `qa-report/` directory and a subfolder for the task you are QA'ing. Place output there. + +## Output Format + +- **Summary:** `qa-report/table.md` with columns: Test Case | Result | Details. +- **Individual reports:** `qa-report/[test-name].md` using format: + +```markdown +**🔑 Entry Criteria** +- Given: [initial state] + +**🪜 Steps** +- When: [action taken] + +**✅ Result** +- Then: [expected outcome] + +**📎 Evidence** +[Attach relevant evidence: screenshots, logs, API responses] +``` diff --git a/.aiassistant/rules/rules.md b/.aiassistant/rules/rules.md new file mode 100644 index 0000000..8ab181d --- /dev/null +++ b/.aiassistant/rules/rules.md @@ -0,0 +1,63 @@ +--- +apply: always +--- + +# Documentation and Rules System + +## Three-Tier Documentation System + +**Tier 1: README.md** – Onboarding, quick start, basic usage (max 150 lines for packages). Copy-pasteable examples. Cross-reference, don't duplicate. Acts as an index for all spec files (list in a Documentation section). Repo/package level only; no module-level READMEs. Modules are covered by spec files where necessary. + +**Tier 2: .cursor/rules/\*.mdc** – Engineering standards and workflows. How to write code, use frameworks, configure tools, and set up the environment. Concise, actionable instructions only. + +**Tier 3: \*.spec.md** – Business logic, compliance, feature requirements. Explains "why" and "what", not "how". No test scenarios. Must link back to the repo/package README. + +**No overlap:** Cross-reference between tiers, never duplicate. + +## README Structure + +READMEs should include these sections as applicable: + +1. **Title** — Package/repo name +2. **Quick Start** — Getting started quickly +3. **Documentation** — List of spec files (`*.spec.md`) with brief descriptions (under a 'Specs' sub-section), along with any other related documentation in separate sub-sections as needed +4. **Development** — Prerequisites, setup, and contribution guidelines +5. **Configuration** — Configuration options (if applicable) + +## Rule File Structure + +`.cursor/rules` is the source of truth for AI rules. + +**Generic rules:** `name.mdc` (no underscore). Universal, repo-agnostic. Specific to a language, framework, tool, platform, etc. + +**Repo-specific rules:** `_project.mdc` (required) and, for repos with more than one package, `_packageName.mdc`. Repo or package specific paths, commands, utilities. + +**Globs vs AI interpretation:** Use globs for strict file patterns. Without globs (recommended), AI interprets context for better accuracy. + +**Guidelines:** Single responsibility per file. Actionable only. Prefer tooling (ESLint, Prettier) over AI rules. If a rule can be enforced by a linter or formatter, it belongs in that tool's config, not here. AI agents should read and respect linter and formatter output. + +## MDC File Formatting + +- **Frontmatter:** `description` is required; AI uses semantic matching to decide relevance. Optionally add ONE of: `alwaysApply: true` (forces load for every request) OR `globs` (strict file pattern enforcement). Omit both to rely on intelligent description-based pickup. +- One `#` title per file; `##` for sections. +- Rules as `-` bullet points; one concept per bullet. +- Use `**bold**` for emphasis; backticks for `code`, `filenames`, and `commands`. + +## Architectural Decisions Hierarchy + +- **Spec files:** Major architectural decisions with business impact. +- **`_project.mdc`, `_packageName.mdc`:** Smaller architectural and project-level decisions. +- **Framework rules:** Usage patterns for chosen tools. + +## Spec Creation Guidelines + +Write spec files for complex architectural decisions: auth, API clients, state management, compliance-heavy workflows. + +Skip specs for styling, simple UI, config, and dev tooling. + +## Sync Process + +Run the following after any rule changes: + +- **node:** `pnpm exec ai-rules install` (or `npx @EqualsGroup/ai-rules install`) +- **.NET:** `dotnet ai-rules install` diff --git a/.aiassistant/rules/sample-app.md b/.aiassistant/rules/sample-app.md new file mode 100644 index 0000000..75d1f33 --- /dev/null +++ b/.aiassistant/rules/sample-app.md @@ -0,0 +1,61 @@ +--- +apply: always +--- + +# Sample Application Rules + +The `samples/SampleApp/` directory contains a **Knowledge Base sample application** that demonstrates how an **external consumer** would use the Ploch.Data libraries (GenericRepository, Unit of Work, EF Core utilities, etc.) from published NuGet packages. + +## Dual-Mode Build + +The SampleApp supports two build modes: + +### Standalone mode (default) + +```bash +cd samples/SampleApp +dotnet build Ploch.Data.SampleApp.slnx +``` + +Uses `PackageReference` for Ploch.Data packages — exactly as an external consumer would. Requires the packages to be published on the NuGet feed. + +### Solution mode (CI / PR validation) + +```bash +dotnet build Ploch.Data.slnx -p:UsePlochProjectReferences=true +``` + +The `ProjectReferences.props` file automatically replaces Ploch.Data `PackageReference` items with `ProjectReference` items pointing to the library source code. This catches breaking changes at PR time. + +## How the switching works + +1. Each csproj file contains only `PackageReference` for Ploch.Data packages (the external consumer view) +2. `samples/SampleApp/Directory.Build.props` conditionally imports `ProjectReferences.props` when `UsePlochProjectReferences=true` +3. `ProjectReferences.props` removes all Ploch.Data PackageReferences and adds ProjectReferences to the corresponding source projects +4. The CI workflow passes `-p:UsePlochProjectReferences=true` on all dotnet commands + +## Critical Constraints + +### Never manually edit csproj files to swap references + +The PackageReference ↔ ProjectReference switching is handled **automatically** by `ProjectReferences.props`. Never manually convert PackageReferences to ProjectReferences (or vice versa) in any SampleApp csproj file. + +### Standalone build configuration — no parent imports + +The SampleApp has its own `Directory.Build.props` and `Directory.Packages.props` that are **self-contained**. They must **not** import or inherit from the parent repo's build configuration files. An external consumer would not have access to `mrploch-development/dependencies/` or the repo's root `Directory.Build.props`. + +### Package versions are managed independently + +The SampleApp's `Directory.Packages.props` defines its own `PlochDataPackagesVersion` variable and all package versions explicitly. When a new version of the Ploch.Data packages is published, this version must be updated manually. + +### SonarCloud + +The SampleApp is analysed by SonarCloud for code issues but **excluded from coverage** metrics (`sonar.coverage.exclusions` includes `**/samples/**`). + +## What this means in practice + +- **Do not** replace `PackageReference` with `ProjectReference` for Ploch.Data packages in csproj files. +- **Do not** add `` directives that reference files outside `samples/SampleApp/` (except `ProjectReferences.props` which is conditionally imported). +- **Do** treat the SampleApp csproj files as if they were in a completely separate repository. +- **Do** update `PlochDataPackagesVersion` in `samples/SampleApp/Directory.Packages.props` after publishing new package versions. +- **Do** update `ProjectReferences.props` if new Ploch.Data packages are added to the library. diff --git a/.aiassistant/rules/summaries.md b/.aiassistant/rules/summaries.md new file mode 100644 index 0000000..d810af8 --- /dev/null +++ b/.aiassistant/rules/summaries.md @@ -0,0 +1,11 @@ +--- +apply: always +--- + +# Summary Reports + +When producing a summary, report, or analysis (e.g. pipeline status, build results, investigation findings, architecture overviews): + +1. **Save to file** — Write the summary as a Markdown file in `C:\DevNet\my\mrploch\temp\` with a descriptive, timestamped filename (e.g. `2026-03-10-ploch-common-pipeline-status.md`). +2. **Open automatically** — After writing the file, open it with the system default viewer by running: `start "" ""` (on Windows this launches the default `.md` handler, which is Typedown). +3. **Still display inline** — Continue showing a concise version of the summary in the conversation as normal. diff --git a/.aiassistant/rules/todo-tasks-execution.md b/.aiassistant/rules/todo-tasks-execution.md new file mode 100644 index 0000000..88a8e98 --- /dev/null +++ b/.aiassistant/rules/todo-tasks-execution.md @@ -0,0 +1,21 @@ +--- +apply: always +--- + +# Performing Tasks from TODO.md + +**Skill:** Use `/execute-todo` to run the full workflow. See `~/.claude/skills/execute-todo/SKILL.md`. + +## Principles + +These principles guide TODO task execution. The skill handles the workflow; these rules explain *why*. + +- **Autonomous execution** — research before asking. Use web search, sibling repos, docs to resolve uncertainties. Only ask the user when truly blocked. +- **End-to-end quality** — every task must build, pass tests, pass static analysis, and survive self-review before committing. +- **Zero new warnings** — treat analyser output (StyleCop, Roslynator, SonarAnalyzer) as requirements, not suggestions. +- **Comprehensive tests** — coding tasks require unit tests (xUnit v3, FluentAssertions, AutoFixture) per the .NET testing rules. +- **Conventional Commits** — one commit per task, following `commits.md` rules. +- **PR check gate** — when pushing, wait for all CI checks to pass. Resolve failures and PR comments before marking complete. +- **Parallel where possible** — independent tasks should be dispatched to parallel agents. +- **Non-blocking issues** — collect questions and suggestions in `TODO-important.md`. Only ask if truly blocking. +- **For common libraries** (Ploch.Common, Ploch.Data, Ploch.Web, etc.) — provide code documentation and README files. diff --git a/.aiassistant/rules/writing-dotnet-tests.md b/.aiassistant/rules/writing-dotnet-tests.md new file mode 100644 index 0000000..ab444a6 --- /dev/null +++ b/.aiassistant/rules/writing-dotnet-tests.md @@ -0,0 +1,25 @@ +--- +apply: always +--- + +# .NET Testing Standards + +Contains rules that should be used, when testing a .NET code. + +## Frameworks and Libraries + +- The tests for the `.NET` code should be written using the `xUnit` framework +- The `xUnit` version to use is `v3` ([xUnit v3 docs](https://xunit.net/docs/getting-started/v3/getting-started)) +- Use [FluentAssertions library](https://fluentassertions.com/) +- Use the [AutoFixture library](https://github.com/AutoFixture/AutoFixture) + +## Writing Tests + +- Try to test observable behaviour, not implementation details. +- Try structure tests using the **Arrange, Act, Assert** pattern, where appropriate, unless it negatively affects readability and flow +- For unit tests, mock external dependencies. +- Test both positive and negative cases. +- For unit tests, test method names should follow the convention: `_should_`, for example: `IsNullOrEmpty_should_return_false_if_string_is_not_null_or_empty` +- For integration tests, test method names should be similar to the unit test convention, but include a scenario name instead of `` follow the convention: `_should_`, for example: `BasicAuthenticationFlow_should_authenticate_the_user_with_basic_credentials` +- A class name for the unit tests should be `Tests` - for example `StringExtensionsTests` if the tested method is in the `StringExtensions.cs` class. +- A class name for integration tests should be `Tests`, for example `AuthenticationTests.cs` diff --git a/.aiignore b/.aiignore new file mode 100644 index 0000000..71ddf39 --- /dev/null +++ b/.aiignore @@ -0,0 +1,12 @@ +# An .aiignore file follows the same syntax as a .gitignore file. +# .gitignore documentation: https://git-scm.com/docs/gitignore + +# you can ignore files +.DS_Store +*.log +*.tmp + +# or folders +dist/ +build/ +out/ diff --git a/.claude/agents b/.claude/agents new file mode 120000 index 0000000..4d454cf --- /dev/null +++ b/.claude/agents @@ -0,0 +1 @@ +../../.claude/agents \ No newline at end of file diff --git a/.claude/rules/agent.md b/.claude/rules/agent.md new file mode 120000 index 0000000..5021d38 --- /dev/null +++ b/.claude/rules/agent.md @@ -0,0 +1 @@ +../../../.claude/rules/agent.md \ No newline at end of file diff --git a/.claude/rules/branch-naming.md b/.claude/rules/branch-naming.md new file mode 120000 index 0000000..a6a7ef5 --- /dev/null +++ b/.claude/rules/branch-naming.md @@ -0,0 +1 @@ +../../../.claude/rules/branch-naming.md \ No newline at end of file diff --git a/.claude/rules/code-quality.md b/.claude/rules/code-quality.md new file mode 120000 index 0000000..91db41a --- /dev/null +++ b/.claude/rules/code-quality.md @@ -0,0 +1 @@ +../../../.claude/rules/code-quality.md \ No newline at end of file diff --git a/.claude/rules/commits.md b/.claude/rules/commits.md new file mode 120000 index 0000000..e37a6df --- /dev/null +++ b/.claude/rules/commits.md @@ -0,0 +1 @@ +../../../.claude/rules/commits.md \ No newline at end of file diff --git a/.claude/rules/data-access.md b/.claude/rules/data-access.md new file mode 120000 index 0000000..b21158e --- /dev/null +++ b/.claude/rules/data-access.md @@ -0,0 +1 @@ +../../../.claude/rules/data-access.md \ No newline at end of file diff --git a/.claude/rules/data-project.md b/.claude/rules/data-project.md new file mode 120000 index 0000000..c9b9547 --- /dev/null +++ b/.claude/rules/data-project.md @@ -0,0 +1 @@ +../../../.claude/rules/data-project.md \ No newline at end of file diff --git a/.claude/rules/data-provider-project.md b/.claude/rules/data-provider-project.md new file mode 120000 index 0000000..5af7d13 --- /dev/null +++ b/.claude/rules/data-provider-project.md @@ -0,0 +1 @@ +../../../.claude/rules/data-provider-project.md \ No newline at end of file diff --git a/.claude/rules/dependencies.md b/.claude/rules/dependencies.md new file mode 120000 index 0000000..cd34ae5 --- /dev/null +++ b/.claude/rules/dependencies.md @@ -0,0 +1 @@ +../../../.claude/rules/dependencies.md \ No newline at end of file diff --git a/.claude/rules/documentation.md b/.claude/rules/documentation.md new file mode 120000 index 0000000..07826fb --- /dev/null +++ b/.claude/rules/documentation.md @@ -0,0 +1 @@ +../../../.claude/rules/documentation.md \ No newline at end of file diff --git a/.claude/rules/domain-model.md b/.claude/rules/domain-model.md new file mode 120000 index 0000000..63aa7a2 --- /dev/null +++ b/.claude/rules/domain-model.md @@ -0,0 +1 @@ +../../../.claude/rules/domain-model.md \ No newline at end of file diff --git a/.claude/rules/integration-testing.md b/.claude/rules/integration-testing.md new file mode 100644 index 0000000..d0e93d9 --- /dev/null +++ b/.claude/rules/integration-testing.md @@ -0,0 +1,149 @@ +# Integration Testing Standards + +Rules for writing and modifying integration tests in the `ploch-data` repository. Applies to any test that inherits from `DataIntegrationTest` or `GenericRepositoryDataIntegrationTest` — including tests in the `Ploch.Data.GenericRepository.EFCore.IntegrationTests` project and the `SampleApp` integration tests. + +## Golden Rule — Do Not Validate a Feature Using the Feature Itself + +When a test exercises a **write** operation via the Generic Repository (Add / Update / Delete / UoW.CommitAsync), do **not** use the Generic Repository to read the data back for the assertion phase. Doing so validates code that is under test with code that is under test. + +**Validate with a plain `DbContext` obtained from the root service provider.** That is the only way to prove the entity was actually persisted and re-hydrated from the database, not served from the change tracker. + +## The Three Roles of a DbContext in an Integration Test + +A single test typically touches the database in three distinct phases. Choose the **right instance** for each phase: + +| Phase | Purpose | Use | +|-------|---------|-----| +| **Arrange** | Seed data that is not part of the system under test | `DbContext` property (the base-class-provided instance) | +| **Act** | Exercise the feature under test | `CreateUnitOfWork()` / `CreateReadWriteRepositoryAsync()` / the specific repository interface being tested | +| **Assert** | Read back to verify the effect | `CreateRootDbContext()` — a fresh context from a new scope | + +### Why a fresh DbContext is required for the Assert phase + +- EF Core's `DbContext` is a **unit of work with an identity map**. Once an entity is tracked, subsequent queries against the same context can return the cached in-memory copy instead of re-hydrating from the database. +- The `ScopedServiceProvider` exposed by `DataIntegrationTest` resolves to the **same scoped instance** as the `DbContext` property, and the same instance that `CreateUnitOfWork()` uses internally. Resolving a `DbContext` from it gives you the *already tracked* context, not a fresh one. +- `RootServiceProvider`, by contrast, creates a **new scope** on every service resolution. `CreateRootDbContext()` wraps that resolution and is the correct way to get an isolated context. + +Failing to use a fresh context hides real bugs — missing column mappings, broken relational configuration, precision loss, incorrect audit handling, or entities that never actually reached the database. + +## Required Pattern — Testing a Write Operation + +```csharp +[Fact] +public async Task Delete_by_id_should_remove_entity() +{ + // Arrange — seed via the plain DbContext (this code is NOT under test). + var actualEntity = new TestEntity { Name = "ToDelete" }; + await DbContext.TestEntities.AddAsync(actualEntity); + await DbContext.SaveChangesAsync(); + actualEntity.Id.Should().BeGreaterThan(0); + + // Clear the change tracker so the seeded entity is not tracked when the + // tested operation runs. Without this, EF Core can short-circuit queries + // and the DeleteAsync call may behave as if the entity were already loaded. + DbContext.ChangeTracker.Clear(); + + // Act — exercise the code under test (Generic Repository + Unit of Work). + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.DeleteAsync(actualEntity.Id); + await unitOfWork.CommitAsync(); + + // Assert — verify via a fresh DbContext, NOT via the repository. + var rootDbContext = CreateRootDbContext(); + var result = await rootDbContext.TestEntities.FindAsync(actualEntity.Id); + result.Should().BeNull(); +} +``` + +### Checklist + +- [ ] Arrange seeded data with the plain `DbContext` property (not the repository). +- [ ] Call `DbContext.ChangeTracker.Clear()` between Arrange and Act when the seeded entity would otherwise remain tracked — always required before testing delete-by-id. +- [ ] Create the Unit of Work or repository with the `Create*` helpers and dispose/commit as appropriate. +- [ ] Obtain the verification context via `CreateRootDbContext()`. Never use `ScopedServiceProvider.GetRequiredService()` — it returns the already-tracked instance. +- [ ] Query for the result directly on the root DbContext's `DbSet` (or `Set()`), not through a repository or a UoW. + +## When Testing a Read Operation + +Reading is safer, but the same principle applies in reverse: **seed via the plain `DbContext`**, then exercise the read through the repository. The assertion can use the value returned by the repository call — the repository itself is the code under test, and its return value *is* the observable output. + +You may still want to cross-check with `CreateRootDbContext()` to confirm eager-loading actually hit the database (e.g. navigation collections populated with the right counts). + +## Equivalency Assertions for Entities + +When asserting that a retrieved entity matches an expected one, prefer FluentAssertions' `BeEquivalentTo` / `ContainEquivalentOf` over a chain of property-level assertions — but configure it correctly. + +**Always call `.WithEntityEquivalencyOptions()`** from `Ploch.Data.EFCore.IntegrationTesting.FluentAssertions` on the options lambda. + +```csharp +using Ploch.Data.EFCore.IntegrationTesting.FluentAssertions; + +result.Should().BeEquivalentTo(expected, + options => options.WithEntityEquivalencyOptions()); +``` + +### What `WithEntityEquivalencyOptions` solves + +| Problem | Why it happens | What the extension does | +|---------|---------------|------------------------| +| **Collection ordering** | Databases do not guarantee row order for navigation collections | `WithoutStrictOrdering()` — match by value, not position | +| **Cyclic navigation properties** | EF Core populates inverse back-references (`BlogPost.Tag.BlogPosts.BlogPost…`) | `IgnoringCyclicReferences()` — stop at detected cycles | +| **`DateTimeOffset` precision loss** | SQLite stores `DateTimeOffset` as TEXT with ~100µs precision; .NET keeps 100ns ticks. Max observed delta ≈ 78µs | Applies a **1ms tolerance** (10× the max rounding error) on every `DateTimeOffset` comparison | +| **Null vs empty collections** | EF Core does not initialise navigation collections that were not eager-loaded — they stay `null`. In-memory entities initialise them to `new List()` | A custom `IEquivalencyStep` (`NullEmptyCollectionEquivalencyStep`) treats `null` as equivalent to an empty collection | + +### Combine with targeted exclusions, not with extra manual configuration + +When EF Core loads an entity, it back-fills inverse navigation references (e.g. `Tag.BlogPosts`) that your in-memory test setup did not populate. Exclude the affected properties, then chain `WithEntityEquivalencyOptions`: + +```csharp +result.Should().BeEquivalentTo(expected, options => options + .Excluding(p => p.Tags) + .Excluding(p => p.Categories) + .WithEntityEquivalencyOptions()); +``` + +For path-based exclusions (e.g. the nested inverse navigation `Tag.BlogPosts` but not `BlogPost.Tags`), use the member-info overload: + +```csharp +options.Excluding(info => info.Path.EndsWith(".BlogPosts")) + .WithEntityEquivalencyOptions() +``` + +Works the same way with collection assertions: + +```csharp +posts.Should().ContainEquivalentOf(expected, options => + options.Excluding(p => p.Categories).WithEntityEquivalencyOptions()); +``` + +### Do not reinvent the wheel + +If an equivalency test is failing due to ordering, cycles, `DateTimeOffset` mismatches, or null-vs-empty collections, **do not** manually add `WithoutStrictOrdering()`, `IgnoringCyclicReferences()`, custom `DateTimeOffset` comparers, or `.Using()` handlers. Call `WithEntityEquivalencyOptions()` instead. If the method does not cover your case, extend the method rather than papering over it per-test. + +## Quick Reference + +| I want to... | Use | +|--------------|-----| +| Seed data for a test | The base-class `DbContext` property | +| Exercise the code under test | `CreateUnitOfWork()` / `Create*Repository*()` | +| Verify the effect on the database | `CreateRootDbContext()` | +| Clear tracking state between Arrange and Act | `DbContext.ChangeTracker.Clear()` | +| Compare an in-memory entity with a DB-loaded one | `.BeEquivalentTo(expected, o => o.WithEntityEquivalencyOptions())` | + +## Anti-Patterns — Do Not Do These + +- ❌ `var ctx = ScopedServiceProvider.GetRequiredService();` to validate a write — this is the same instance the write went through. +- ❌ `repository.GetByIdAsync(id)` to verify a write done via the same (or another) repository. +- ❌ Manually constructing a new `DbContext` with a new `DbContextOptions` — it will not share the in-memory SQLite connection and will see an empty database. +- ❌ Manually chaining `WithoutStrictOrdering().IgnoringCyclicReferences()...` in each test — use `WithEntityEquivalencyOptions()`. +- ❌ Using `.Using().WhenTypeIs()` to handle null-vs-empty collections — this creates a nested `BeEquivalentTo` call that loses all configured options (DateTimeOffset tolerance, cyclic reference handling). The `NullEmptyCollectionEquivalencyStep` in `WithEntityEquivalencyOptions()` handles this correctly within the pipeline. +- ❌ Comparing `DateTimeOffset` values with `.Should().Be()` after a SQLite round-trip — the stored value loses sub-microsecond precision. + +## Related References + +- `DataIntegrationTest` — `src/Data.EFCore.IntegrationTesting/DataIntegrationTest.cs` +- `GenericRepositoryDataIntegrationTest` — `src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/GenericRepositoryDataIntegrationTest.cs` +- `EntitiesEquivalencyOptionsExtensions.WithEntityEquivalencyOptions` — `src/Data.EFCore.IntegrationTesting.FluentAssertions/EntitiesEquivalencyOptionsExtensions.cs` +- Broader guide: `docs/integration-testing.md` +- Workspace-wide .NET testing conventions: `../.claude/rules/writing-dotnet-tests.md` diff --git a/.claude/rules/naming.md b/.claude/rules/naming.md new file mode 120000 index 0000000..a216448 --- /dev/null +++ b/.claude/rules/naming.md @@ -0,0 +1 @@ +../../../.claude/rules/naming.md \ No newline at end of file diff --git a/.claude/rules/pr-descriptions.md b/.claude/rules/pr-descriptions.md new file mode 120000 index 0000000..a9c0492 --- /dev/null +++ b/.claude/rules/pr-descriptions.md @@ -0,0 +1 @@ +../../../.claude/rules/pr-descriptions.md \ No newline at end of file diff --git a/.claude/rules/project-structure.md b/.claude/rules/project-structure.md new file mode 120000 index 0000000..2a5ccab --- /dev/null +++ b/.claude/rules/project-structure.md @@ -0,0 +1 @@ +../../../.claude/rules/project-structure.md \ No newline at end of file diff --git a/.claude/rules/qa.md b/.claude/rules/qa.md new file mode 120000 index 0000000..5c631b5 --- /dev/null +++ b/.claude/rules/qa.md @@ -0,0 +1 @@ +../../../.claude/rules/qa.md \ No newline at end of file diff --git a/.claude/rules/rules.md b/.claude/rules/rules.md new file mode 120000 index 0000000..2db4331 --- /dev/null +++ b/.claude/rules/rules.md @@ -0,0 +1 @@ +../../../.claude/rules/rules.md \ No newline at end of file diff --git a/.claude/rules/summaries.md b/.claude/rules/summaries.md new file mode 120000 index 0000000..59b2014 --- /dev/null +++ b/.claude/rules/summaries.md @@ -0,0 +1 @@ +../../../.claude/rules/summaries.md \ No newline at end of file diff --git a/.claude/rules/todo-tasks-execution.md b/.claude/rules/todo-tasks-execution.md new file mode 120000 index 0000000..b40cc11 --- /dev/null +++ b/.claude/rules/todo-tasks-execution.md @@ -0,0 +1 @@ +../../../.claude/rules/todo-tasks-execution.md \ No newline at end of file diff --git a/.claude/rules/writing-dotnet-tests.md b/.claude/rules/writing-dotnet-tests.md new file mode 120000 index 0000000..90dacbd --- /dev/null +++ b/.claude/rules/writing-dotnet-tests.md @@ -0,0 +1 @@ +../../../.claude/rules/writing-dotnet-tests.md \ No newline at end of file diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index 352ade4..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(*)", - "Read(*)", - "Edit(*)", - "Write(*)", - "Glob(*)", - "Grep(*)", - "Fetch(*)", - "WebFetch(*)", - "WebSearch(*)", - "TodoWrite(*)", - "NotebookEdit(*)", - "EnterPlanMode(*)", - "ExitPlanMode(*)", - "AskUserQuestion(*)", - "Task(*)", - "Skill(*)", - "EnterWorktree(*)", - "mcp__contextstream__*", - "mcp__ide__*", - "mcp__claude_ai_Microsoft_Learn__*", - "mcp__claude_ai_Context7__*", - "mcp__claude_ai_Notion__*", - "mcp__windows-mcp__*", - "mcp__github__*", - "mcp__magic__*", - "mcp__plugin_context7_context7__*", - "mcp__plugin_github_github__*", - "mcp__plugin_playwright_playwright__*" - ], - "deny": [ - "Read(.env)", - "Read(.env.*)", - "Read(**/.env)", - "Read(**/.env.*)", - "Read(**/secrets/**)", - "Read(**/*.pem)", - "Read(**/*.key)", - "Read(~/.ssh/**)", - "Read(~/.aws/**)", - "Read(~/.config/gcloud/**)" - ] - }, - "enableAllProjectMcpServers": true, - "enabledMcpjsonServers": [ - "contextstream" - ] -} diff --git a/.claude/skills/commit b/.claude/skills/commit new file mode 120000 index 0000000..01c2735 --- /dev/null +++ b/.claude/skills/commit @@ -0,0 +1 @@ +../../../.claude/skills/commit \ No newline at end of file diff --git a/.claude/skills/dotnet-dev-finishing-touches b/.claude/skills/dotnet-dev-finishing-touches new file mode 120000 index 0000000..0ce8d86 --- /dev/null +++ b/.claude/skills/dotnet-dev-finishing-touches @@ -0,0 +1 @@ +../../../.claude/skills/dotnet-dev-finishing-touches \ No newline at end of file diff --git a/.claude/skills/dotnet-dev-practical b/.claude/skills/dotnet-dev-practical new file mode 120000 index 0000000..60307b7 --- /dev/null +++ b/.claude/skills/dotnet-dev-practical @@ -0,0 +1 @@ +../../../.claude/skills/dotnet-dev-practical \ No newline at end of file diff --git a/.claude/skills/implement b/.claude/skills/implement new file mode 120000 index 0000000..84e226a --- /dev/null +++ b/.claude/skills/implement @@ -0,0 +1 @@ +../../../.claude/skills/implement \ No newline at end of file diff --git a/.claude/skills/implement-issue b/.claude/skills/implement-issue new file mode 120000 index 0000000..070e010 --- /dev/null +++ b/.claude/skills/implement-issue @@ -0,0 +1 @@ +../../../.claude/skills/implement-issue \ No newline at end of file diff --git a/.claude/skills/pr b/.claude/skills/pr new file mode 120000 index 0000000..6d87b90 --- /dev/null +++ b/.claude/skills/pr @@ -0,0 +1 @@ +../../../.claude/skills/pr \ No newline at end of file diff --git a/.claude/skills/prompt-lookup b/.claude/skills/prompt-lookup new file mode 120000 index 0000000..63bb283 --- /dev/null +++ b/.claude/skills/prompt-lookup @@ -0,0 +1 @@ +../../../.claude/skills/prompt-lookup \ No newline at end of file diff --git a/.claude/skills/qa-explore b/.claude/skills/qa-explore new file mode 120000 index 0000000..dc8a202 --- /dev/null +++ b/.claude/skills/qa-explore @@ -0,0 +1 @@ +../../../.claude/skills/qa-explore \ No newline at end of file diff --git a/.claude/skills/review-pr b/.claude/skills/review-pr new file mode 120000 index 0000000..ce8bbe1 --- /dev/null +++ b/.claude/skills/review-pr @@ -0,0 +1 @@ +../../../.claude/skills/review-pr \ No newline at end of file diff --git a/.claude/skills/review-pr-comments b/.claude/skills/review-pr-comments new file mode 120000 index 0000000..58f3547 --- /dev/null +++ b/.claude/skills/review-pr-comments @@ -0,0 +1 @@ +../../../.claude/skills/review-pr-comments \ No newline at end of file diff --git a/.codacy.yml b/.codacy.yml index 5100775..dbc2afe 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -14,15 +14,30 @@ engines: exclude_paths: - "**/README.md" exclude_paths: + # Test and sample projects are excluded from all Codacy engines. + # The sonarscharp engine already excludes them, but the global list ensures + # other engines (e.g. Codacy's internal C# analyzer) honour the same scope. + - "tests/**" + - "src/**Tests/**" + - "samples/**" - "AGENTS.md" - "GEMINI.md" - "CLAUDE.md" + - "TODO.md" - "TODO-archive.md" - "change-log/**" - ".junie/**" - ".claude/**" + - ".aiassistant/**" - ".contextstream/**" - ".cursorrules" + - ".github/agents/**" + - ".github/copilot-instructions.md" + - ".github/git-commit-instructions.md" + # The Copilot PR pipeline workflow is tracked for rework in + # https://github.com/mrploch/ploch-data/issues/79 (request-body schema + # mismatch + timeout-as-success bug). Re-enable analysis after rework. + - ".github/workflows/copilot-pr-pipeline.yml" - "workload-install.ps1" - "docs/**" - ".syncignore" \ No newline at end of file diff --git a/.cursorrules b/.cursorrules index 8f79823..f4db98b 100644 --- a/.cursorrules +++ b/.cursorrules @@ -4,7 +4,7 @@ # Workspace ID: 57db5f34-e7f0-42c0-86c4-bb981f96c880 # ContextStream Rules -**MANDATORY STARTUP:** If ContextStream tools are available, on the first message of every session call `init(...)` then `context(user_message="...")`. On subsequent messages, call `context(user_message="...")` first by default. If ContextStream tools are unavailable, proceed with the platform's available tools. +**MANDATORY STARTUP:** If ContextStream tools are available, on the first message of every session call `init(...)` then `context(user_message="...")`. On subsequent messages, call `context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. If ContextStream tools are unavailable, proceed with the platform's available tools. ## Quick Rules @@ -20,7 +20,9 @@ **Common queries — use these exact tool calls:** - "list lessons" / "show lessons" → `session(action="get_lessons")` +- "save lesson" / "remember this lesson" / "lesson learned" / "I made a mistake" → `session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="low|medium|high|critical")` — **NEVER store lessons in local files** (e.g. `~/.claude/.../memory/`, `.cursorrules`, scratch markdown). Lessons live in ContextStream so they auto-surface as `[LESSONS_WARNING]` on future turns and across sessions. - "list decisions" / "show decisions" / "how many decisions" → `memory(action="decisions")` +- "save decision" / "decided to" → `session(action="capture", event_type="decision", title="...", content="...")` - "list docs" → `memory(action="list_docs")` - "list tasks" → `memory(action="list_tasks")` - "list todos" → `memory(action="list_todos")` @@ -28,8 +30,12 @@ - "list events" → `memory(action="list_events")` - "show snapshots" / "list snapshots" → `memory(action="list_events", event_type="session_snapshot")` - "save snapshot" → `session(action="capture", event_type="session_snapshot", title="...", content="...")` +- "what did we do last session" / "past sessions" / "previous work" / "pick up where we left off" → `session(action="recall", query="...")` (ranked context) OR `memory(action="list_transcripts", limit=10)` (chronological list) +- "search past sessions" / "find in past transcripts" / "when did we discuss X" → `memory(action="search_transcripts", query="...")` — full-text search over saved conversation transcripts +- "show transcript" / "read session " → `memory(action="get_transcript", transcript_id="...")` - "list skills" / "show my skills" → `skill(action="list")` -- "create a skill" → `skill(action="create", name="...", instruction_body="...", trigger_patterns=[...])` +- "create a skill" → `skill(action="create", name="...", instruction_body="...", project_id="", trigger_patterns=[...])` +- "update a skill" → `skill(action="update", name="...", instruction_body="...", change_summary="...")` - "run skill" / "use skill" → `skill(action="run", name="...")` - "import skills" / "import my CLAUDE.md" → `skill(action="import", file_path="...", format="auto")` @@ -41,19 +47,87 @@ If the `instruct` tool is available, run `instruct(action="get", session_id="... **Why?** `context()` delivers task-specific rules, lessons from past mistakes, and relevant decisions. Skip it = fly blind. +## Finding Information — Search ContextStream Knowledge, Not Just Code + +**Auto-grounding:** Every `context(user_message="...")` call may include a `[GROUNDING]` block — pre-ranked prior work (transcripts, snapshots, docs, decisions, lessons) for **this** message. When you see it, read those hits **before** fanning out into code search; skipping search entirely is often correct. Outside `context()`, use `session(action="ground", user_message="...")` for the same one-shot bundle (recall + docs + decisions + lessons + skills + git). + +When you need information, do not default to code search or trial-and-error. ContextStream stores far more than source — docs, decisions, lessons, preferences, plans, tasks, todos, skills, memory nodes, and full session transcripts all live behind dedicated tools. Pick the right knowledge surface by what you're looking for: + +- **Source code / symbol / file** → `search(mode="auto", query="...")` +- **Why we did X / past decisions** → `memory(action="decisions", query="...")` +- **Architecture / spec / design doc** → `memory(action="list_docs")` then `memory(action="get_doc", doc_id="title or UUID")` +- **Prior mistakes ("never do X again")** → `session(action="get_lessons", query="...")` +- **User preferences / conventions / constraints** → already surfaced as `[PREFERENCE]`; also `memory(action="list_nodes", node_type="preference")` or `memory(action="list_nodes", node_type="constraint")` +- **Open work / tasks / todos** → `memory(action="list_tasks")` / `memory(action="list_todos")` +- **Active or past plans** → `session(action="list_plans")` then `session(action="get_plan", plan_id="...")` +- **Reusable workflows / skills** → `skill(action="list")` then `skill(action="run", name="...")` +- **"What did we do before?" (continuation work)** → `session(action="recall", query="...")` — see the Past Sessions ladder below +- **Unsure which surface** → `memory(action="search", query="...")` — hybrid across memory nodes + docs; falls back to `session(action="recall", query="...")` for transcript/snapshot coverage + +Default assumption: if the user asks "how do we do X?", "why did we choose Y?", "what's the pattern for Z?", or "did we already decide about Q?" — the answer is likely in a doc, decision, lesson, plan, or skill, NOT in the code. Check the right knowledge surface BEFORE reading source files or re-deriving the answer. + +Before guessing, improvising, or struggling through a workflow you don't fully know: +- Start with `context(...)` and obey `[GROUNDING]` (prior-work anchors), `[MATCHED_SKILLS]`, `[LESSONS_WARNING]`, `[PREFERENCE]`, `[DECISIONS]`, `[MEMORY]`, and `` output — those are already filtered to the current task +- Treat `[LESSONS_WARNING]` as active working instructions for the current task, not optional background context; apply them immediately and keep them in mind until the task is done +- Prefer surfaced ContextStream knowledge over inventing a new workflow from memory + + +## Past Sessions Are Queryable — USE THEM + +### Auto-Grounding (in `context()`) + +When `context()` returns `[GROUNDING]`, those lines are **pre-ranked prior work for your current message** — read them first (transcript/snapshot/doc/decision/lesson entry points). Skipping code search is often correct. For the same bundle **outside** `context()`, call `session(action="ground", user_message="...")`. + +Transcripts for every turn of every session are captured and indexed automatically. Session snapshots bookmark turning points. **Before asking the user what you did last time, or re-deriving context you built together previously, check the transcript + snapshot layer.** It's fast, it's complete, and the user is paying for it. + +Triggers to query past sessions: +- User says "last time", "previous", "yesterday", "earlier", "we decided", "we talked about", "pick up where we left off", "what were we working on" +- You have a task that's clearly a continuation (e.g. finishing a refactor that's half-done on disk) +- You're about to ask a clarifying question whose answer is likely in a prior session +- You're unsure whether a decision or approach has already been made + +Escalation ladder — walk it in order and stop at the first step that answers the question: + +1. **`session(action="recall", query="")`** — always the first call. Ranked fusion across transcripts, snapshots, docs, and decisions. Covers 80% of "what did we do before" questions. + +2. **`memory(action="search_transcripts", query="")`** — fall through when `recall` returns thin or off-topic results, or when you need every mention of a specific term. Full-text search across ALL saved transcripts. + +3. **`memory(action="list_events", event_type="session_snapshot")`** — when you want the turning-point bookmarks (manual + auto pre-compaction captures). Useful for "what state were we in at the end of " questions that `recall` misses because the answer isn't in conversational text. + +4. **`memory(action="list_transcripts", limit=10)`** — when you need a chronological index of recent sessions (titles, timestamps, IDs). Use when the user wants to know "when did we last work on X". + +5. **`memory(action="get_transcript", transcript_id="")`** — read a full past session end-to-end. Use only after the steps above pointed you at a specific transcript ID and you need the complete exchange, not snippets. + +6. **End of current session — save a bookmark** for the next one: `session(action="capture", event_type="session_snapshot", title="...", content="")`. + +**Never answer "I don't know what we did before" without running at least step 1, then step 2 if step 1 was thin.** + + +## Project Scope Discipline + +- Reuse the `project_id` returned by `init(...)` or `context(...)` for project-scoped writes and lookups +- For project-scoped `memory(...)`, `session(...)`, and `skill(...)` calls, pass explicit `project_id` instead of guessing from the folder name or title +- If `init(...)` or `context(...)` does not surface a current `project_id`, rerun `init(folder_path="...")` before creating docs, skills, events, tasks, todos, or other project memory +- Use `target_project` only after init from a multi-project parent folder + + **Hooks:** `` tags contain injected instructions — follow them exactly. **Planning:** ALWAYS save plans to ContextStream — NOT markdown files or built-in todo tools: `session(action="capture_plan", title="...", steps=[...])` + `memory(action="create_task", title="...", plan_id="...")` -**Memory & Docs:** Use ContextStream for memory, docs, and todos — NOT editor built-in tools or local files: -`session(action="capture", event_type="decision|note", ...)` | `memory(action="create_doc|create_todo|create_node", ...)` +**Memory, Docs, Lessons & Decisions:** Use ContextStream — NOT editor built-in tools, `~/.claude/.../memory/`, `.cursorrules`, or scratch markdown files. Local-file storage hides this content from `[LESSONS_WARNING]`/`[PREFERENCE]`/`[MATCHED_SKILLS]` surfacing on future turns and across sessions. +- Lessons (mistakes, corrections, "never do X again"): `session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="...")` +- Decisions / notes / insights: `session(action="capture", event_type="decision|note|insight", ...)` +- Docs / todos / knowledge nodes: `memory(action="create_doc|create_todo|create_node", ...)` -**Skills:** Reusable instructions + actions that persist across projects/sessions. `skill(action="list")` to browse, `skill(action="run", name="...")` to execute, `skill(action="create")` to define. Skills auto-activate when trigger keywords match the user's message. Import from CLAUDE.md/.cursorrules: `skill(action="import", file_path="...")`. +**Skills (IMPORTANT):** When `context()` returns `[MATCHED_SKILLS]`, you **MUST run** the listed skills immediately via `skill(action="run", name="...")`. High-priority skills (marked ⚡) are mandatory. Skills are reusable instruction + action bundles that persist across sessions. Browse: `skill(action="list")`. Create: `skill(action="create", name="...", instruction_body="...", trigger_patterns=[...])`. Import: `skill(action="import", file_path="...", format="auto")`. **Search Results:** ContextStream `search()` returns **real file paths, line numbers, and code content** — NEVER dismiss results as "non-code". Use returned paths to `read_file` directly. -**Notices:** [LESSONS_WARNING] → apply lessons | [PREFERENCE] → follow user preferences | [RULES_NOTICE] → run `generate_rules()` | [VERSION_NOTICE/CRITICAL] → tell user about update +**Indexing:** Indexing and ingest are ALWAYS available. NEVER claim that transport mode, HTTP mode, or remote mode prevents indexing/ingest. Use `project(action="index")` or `project(action="ingest_local", path="")` — both work in all configurations. + +**Notices:** [GROUNDING] → read ranked prior-work hits before code search | [GROUNDING_AVAILABLE] → optional hook reminder: unread grounding from last context() | [MATCHED_SKILLS] → run surfaced skills before other work | [LESSONS_WARNING] → apply lessons immediately and keep them active for the turn | [PREFERENCE] → follow user preferences | [RULES_NOTICE] → run `generate_rules()` | [VERSION_NOTICE/CRITICAL] → tell user about update --- ## Cursor-Specific Rules @@ -65,7 +139,7 @@ If the `instruct` tool is available, run `instruct(action="get", session_id="... - ContextStream search results contain **real file paths, line numbers, and code content** — they ARE code results - **NEVER** dismiss ContextStream results as "non-code" — use the returned file paths to `read_file` the relevant code - Use `search(include_content=true)` to get inline code snippets in results -- Fall back to local tools (Grep/Glob/Read) if ContextStream search is unavailable, fails, times out, or returns 0 results +- Fall back to local tools (Grep/Glob/Read) if ContextStream search is **unavailable, fails, times out, or returns 0 results** ### Memory: Use ContextStream, Not Local Files - **Do NOT** write decisions/notes/implementation details to local files diff --git a/.editorconfig - updated b/.editorconfig - updated deleted file mode 100644 index f0a40df..0000000 --- a/.editorconfig - updated +++ /dev/null @@ -1,410 +0,0 @@ -root = true -# Remove the line below if you want to inherit .editorconfig settings from higher directories -[*] - -max_line_length = 120 - -# ReSharper properties -resharper_align_multiline_parameter = true - -# C# files -[*.cs] - -# RCS1169: Make field read-only. -dotnet_diagnostic.rcs1169.severity = suggestion - -#### Core EditorConfig Options #### - -# Indentation and spacing -indent_size = 4 -indent_style = space -tab_width = 4 - -# New line preferences -end_of_line = crlf -insert_final_newline = false - -#### .NET Coding Conventions #### - -# Organize usings -dotnet_separate_import_directive_groups = false -dotnet_sort_system_directives_first = true - -# this. and Me. preferences - -# Language keywords vs BCL types preferences - -# Parentheses preferences -dotnet_style_parentheses_in_arithmetic_binary_operators = always_for_clarity:none -dotnet_style_parentheses_in_other_binary_operators = always_for_clarity:none -dotnet_style_parentheses_in_other_operators = never_if_unnecessary:silent -dotnet_style_parentheses_in_relational_binary_operators = always_for_clarity:none - -# Modifier preferences - -# Expression-level preferences -dotnet_style_coalesce_expression = true:suggestion -dotnet_style_collection_initializer = true:suggestion -dotnet_style_explicit_tuple_names = true:suggestion -dotnet_style_null_propagation = true:suggestion -dotnet_style_object_initializer = true:suggestion -dotnet_style_operator_placement_when_wrapping = beginning_of_line -dotnet_style_prefer_auto_properties = true:silent -dotnet_style_prefer_compound_assignment = true:suggestion -dotnet_style_prefer_conditional_expression_over_assignment = true:silent -dotnet_style_prefer_conditional_expression_over_return = true:silent -dotnet_style_prefer_inferred_anonymous_type_member_names = true:suggestion -dotnet_style_prefer_inferred_tuple_names = true:suggestion -dotnet_style_prefer_is_null_check_over_reference_equality_method = true:suggestion -dotnet_style_prefer_simplified_boolean_expressions = true:suggestion -dotnet_style_prefer_simplified_interpolation = true:suggestion - - -# Field preferences -dotnet_style_readonly_field = true:suggestion - -# Parameter preferences -dotnet_code_quality_unused_parameters = all:suggestion - -# Suppression preferences -dotnet_remove_unnecessary_suppression_exclusions = none - -#### C# Coding Conventions #### - -# var preferences - -# Expression-bodied members -csharp_style_expression_bodied_indexers = true:silent -csharp_style_expression_bodied_lambdas = true:silent -csharp_style_expression_bodied_local_functions = false:silent -csharp_style_expression_bodied_operators = false:silent - -# Pattern matching preferences -csharp_style_pattern_matching_over_as_with_null_check = true:suggestion -csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion -csharp_style_prefer_not_pattern = true:suggestion -csharp_style_prefer_pattern_matching = true:silent -csharp_style_prefer_switch_expression = true:suggestion - -# Null-checking preferences -csharp_style_conditional_delegate_call = true:suggestion - -# Modifier preferences -csharp_prefer_static_local_function = true:suggestion -csharp_preferred_modifier_order = public, private, protected, internal, static, extern, new, virtual, abstract, sealed, override, readonly, required, unsafe, volatile, async, file:suggestion - -# Code-block preferences -csharp_prefer_simple_using_statement = true:suggestion - -# Expression-level preferences -csharp_prefer_simple_default_expression = true:suggestion -csharp_style_deconstructed_variable_declaration = true:suggestion -csharp_style_inlined_variable_declaration = true:suggestion -csharp_style_pattern_local_over_anonymous_function = true:suggestion -csharp_style_prefer_index_operator = true:suggestion -csharp_style_prefer_range_operator = true:suggestion -csharp_style_throw_expression = true:suggestion -csharp_style_unused_value_assignment_preference = discard_variable:suggestion -csharp_style_unused_value_expression_statement_preference = discard_variable:silent - -# 'using' directive preferences -csharp_using_directive_placement = outside_namespace:silent - -#### C# Formatting Rules #### - -# New line preferences -csharp_new_line_before_catch = true -csharp_new_line_before_else = true -csharp_new_line_before_finally = true -csharp_new_line_before_members_in_anonymous_types = true -csharp_new_line_before_members_in_object_initializers = true -csharp_new_line_before_open_brace = all -csharp_new_line_between_query_expression_clauses = true - -# Indentation preferences -csharp_indent_block_contents = true -csharp_indent_braces = false -csharp_indent_case_contents = true -csharp_indent_case_contents_when_block = true -csharp_indent_labels = no_change -csharp_indent_switch_labels = true - -# Space preferences -csharp_space_after_cast = false -csharp_space_after_colon_in_inheritance_clause = true -csharp_space_after_comma = true -csharp_space_after_dot = false -csharp_space_after_keywords_in_control_flow_statements = true -csharp_space_after_semicolon_in_for_statement = true -csharp_space_around_binary_operators = before_and_after -csharp_space_around_declaration_statements = false -csharp_space_before_colon_in_inheritance_clause = true -csharp_space_before_comma = false -csharp_space_before_dot = false -csharp_space_before_open_square_brackets = false -csharp_space_before_semicolon_in_for_statement = false -csharp_space_between_empty_square_brackets = false -csharp_space_between_method_call_empty_parameter_list_parentheses = false -csharp_space_between_method_call_name_and_opening_parenthesis = false -csharp_space_between_method_call_parameter_list_parentheses = false -csharp_space_between_method_declaration_empty_parameter_list_parentheses = false -csharp_space_between_method_declaration_name_and_open_parenthesis = false -csharp_space_between_method_declaration_parameter_list_parentheses = false -csharp_space_between_parentheses = false -csharp_space_between_square_brackets = false - -# Wrapping preferences -csharp_preserve_single_line_blocks = true -csharp_preserve_single_line_statements = true - -#### Naming styles #### - -# Naming rules - -dotnet_naming_rule.interface_should_be_begins_with_i.severity = warning -dotnet_naming_rule.interface_should_be_begins_with_i.symbols = interface -dotnet_naming_rule.interface_should_be_begins_with_i.style = begins_with_i - -dotnet_naming_rule.types_should_be_pascal_case.severity = warning -dotnet_naming_rule.types_should_be_pascal_case.symbols = types -dotnet_naming_rule.types_should_be_pascal_case.style = pascal_case - -dotnet_naming_rule.non_field_members_should_be_pascal_case.severity = warning -dotnet_naming_rule.non_field_members_should_be_pascal_case.symbols = non_field_members -dotnet_naming_rule.non_field_members_should_be_pascal_case.style = pascal_case - -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.severity = warning -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.symbols = private_or_internal_field -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.style = prefixedcamelcase - -dotnet_naming_rule.static_field_should_be_pascal_case.severity = warning -dotnet_naming_rule.static_field_should_be_pascal_case.symbols = static_field -dotnet_naming_rule.static_field_should_be_pascal_case.style = pascal_case - -# Symbol specifications - -dotnet_naming_symbols.interface.applicable_kinds = interface -dotnet_naming_symbols.interface.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected -dotnet_naming_symbols.interface.required_modifiers = - -dotnet_naming_symbols.static_field.applicable_kinds = field -dotnet_naming_symbols.static_field.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected -dotnet_naming_symbols.static_field.required_modifiers = static - -dotnet_naming_symbols.private_or_internal_field.applicable_kinds = field -dotnet_naming_symbols.private_or_internal_field.applicable_accessibilities = internal, private, private_protected -dotnet_naming_symbols.private_or_internal_field.required_modifiers = - -dotnet_naming_symbols.types.applicable_kinds = class, struct, interface, enum -dotnet_naming_symbols.types.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected -dotnet_naming_symbols.types.required_modifiers = - -dotnet_naming_symbols.non_field_members.applicable_kinds = property, event, method -dotnet_naming_symbols.non_field_members.applicable_accessibilities = public, internal, private, protected, protected_internal, private_protected -dotnet_naming_symbols.non_field_members.required_modifiers = - -# Naming styles - -dotnet_naming_style.pascal_case.required_prefix = -dotnet_naming_style.pascal_case.required_suffix = -dotnet_naming_style.pascal_case.word_separator = -dotnet_naming_style.pascal_case.capitalization = pascal_case - -dotnet_naming_style.begins_with_i.required_prefix = I -dotnet_naming_style.begins_with_i.required_suffix = -dotnet_naming_style.begins_with_i.word_separator = -dotnet_naming_style.begins_with_i.capitalization = pascal_case - -dotnet_naming_style.prefixedcamelcase.required_prefix = _ -dotnet_naming_style.prefixedcamelcase.required_suffix = -dotnet_naming_style.prefixedcamelcase.word_separator = -dotnet_naming_style.prefixedcamelcase.capitalization = camel_case - -# StyleCop - -dotnet_diagnostic.sa1600.severity = none -dotnet_diagnostic.sa1116.severity = none -dotnet_diagnostic.sa1101.severity = none -dotnet_diagnostic.sa1413.severity = none -dotnet_diagnostic.sa1309.severity = none -dotnet_diagnostic.sa1623.severity = none -dotnet_diagnostic.sa1640.severity = none -dotnet_diagnostic.sa1127.severity = none -dotnet_diagnostic.sa1200.severity = none -dotnet_diagnostic.sa1502.severity = none -dotnet_diagnostic.sa1128.severity = none -dotnet_diagnostic.sa1133.severity = none -dotnet_diagnostic.sa1009.severity = none -dotnet_diagnostic.sa1000.severity = none -dotnet_diagnostic.sa1633.severity = none -dotnet_diagnostic.sa1649.severity = warning # File name should match first type name. - -# ReSharper properties -resharper_align_multiline_parameter = true -resharper_braces_for_for = required -resharper_braces_for_foreach = required -resharper_braces_for_ifelse = required -resharper_braces_for_while = required -resharper_braces_redundant = false -resharper_csharp_empty_block_style = together -resharper_csharp_max_line_length = 170 -resharper_csharp_place_type_constraints_on_same_line = false -resharper_csharp_wrap_parameters_style = chop_if_long -resharper_enforce_line_ending_style = true -resharper_keep_existing_declaration_block_arrangement = false -resharper_keep_existing_declaration_parens_arrangement = false -resharper_keep_existing_enum_arrangement = false -resharper_keep_existing_expr_member_arrangement = false -resharper_keep_existing_invocation_parens_arrangement = false -resharper_keep_existing_property_patterns_arrangement = false -resharper_resx_max_line_length = 120 -resharper_show_autodetect_configure_formatting_tip = false -resharper_use_heuristics_for_body_style = false -resharper_use_indent_from_vs = false -resharper_wrap_lines = true - -# Checks - -dotnet_diagnostic.ca1001.severity = warning -dotnet_diagnostic.ca1009.severity = warning -dotnet_diagnostic.cc0108.severity = warning -dotnet_diagnostic.ca1851.severity = none # Possible multiple enumerations of IEnumerable collection - -dotnet_diagnostic.cs1591.severity = warning # Missing XML comment for publicly visible type or member 'Type_or_Member' - -# Nullable Reference Types - -dotnet_diagnostic.cs8600.severity = warning # Converting null literal or possible null value to non-nullable type. -dotnet_diagnostic.cs8608.severity = error # Possible null reference argument. -dotnet_diagnostic.cs8609.severity = error # Possible null reference return. -dotnet_diagnostic.cs8610.severity = error # Nullability of reference types in type parameter doesn't match overridden member. -dotnet_diagnostic.cs8611.severity = error # Nullability of reference types in type parameter doesn't match partial method declaration. -dotnet_diagnostic.cs8612.severity = error # Nullability of reference types in type doesn't match implicitly implemented member. -dotnet_diagnostic.cs8613.severity = error # Nullability of reference types in return type doesn't match implicitly implemented member. -dotnet_diagnostic.cs8614.severity = error # Nullability of reference types in type of parameter doesn't match implicitly implemented member. -dotnet_diagnostic.cs8615.severity = error # Nullability of reference types in type doesn't match implemented member. -dotnet_diagnostic.cs8616.severity = error # Nullability of reference types in return type doesn't match implemented member. -dotnet_diagnostic.cs8617.severity = error # Nullability of reference types in type of parameter doesn't match implemented member. -dotnet_diagnostic.cs8618.severity = error # Non-nullable field is uninitialized. Consider declaring as nullable. -dotnet_diagnostic.cs8619.severity = error # Nullability of reference types in value doesn't match target type. -dotnet_diagnostic.cs8620.severity = warning # Argument cannot be null. -dotnet_diagnostic.cs8621.severity = error # Nullability of reference types in return type doesn't match delegate type parameter. -dotnet_diagnostic.cs8622.severity = error # Nullability of reference types in type of parameter doesn't match delegate type parameter. -dotnet_diagnostic.cs8624.severity = error # Argument cannot be used for parameter due to differences in the nullability of reference types. -dotnet_diagnostic.cs8625.severity = error # Cannot convert null literal to non-nullable reference type. -dotnet_diagnostic.cs8629.severity = error # Nullable value type may be null. -dotnet_diagnostic.cs8643.severity = error # Nullability of reference types in explicit interface specifier doesn't match interface implemented by the type. -dotnet_diagnostic.cs8644.severity = error # Type does not implement interface member. Nullability of reference types in interface implemented by the base type doesn't match. - -resharper_possible_multiple_enumeration = none - -# ReSharper inspection severities -resharper_arrange_accessor_owner_body_highlighting = none -resharper_arrange_modifiers_order_highlighting = hint -resharper_arrange_this_qualifier_highlighting = none -resharper_arrange_type_member_modifiers_highlighting = none -resharper_arrange_type_modifiers_highlighting = none -resharper_built_in_type_reference_style_for_member_access_highlighting = none -resharper_built_in_type_reference_style_highlighting = none -resharper_mvc_action_not_resolved_highlighting = warning -resharper_mvc_area_not_resolved_highlighting = warning -resharper_mvc_controller_not_resolved_highlighting = warning -resharper_mvc_masterpage_not_resolved_highlighting = warning -resharper_mvc_partial_view_not_resolved_highlighting = warning -resharper_mvc_template_not_resolved_highlighting = warning -resharper_mvc_view_component_not_resolved_highlighting = warning -resharper_mvc_view_component_view_not_resolved_highlighting = warning -resharper_mvc_view_not_resolved_highlighting = warning -resharper_possible_multiple_enumeration_highlighting = none -resharper_razor_assembly_not_resolved_highlighting = warning -resharper_redundant_base_qualifier_highlighting = none -resharper_suggest_var_or_type_built_in_types_highlighting = none -resharper_suggest_var_or_type_elsewhere_highlighting = none -resharper_suggest_var_or_type_simple_types_highlighting = none -resharper_web_config_module_not_resolved_highlighting = warning -resharper_web_config_type_not_resolved_highlighting = warning -resharper_web_config_wrong_module_highlighting = warning - -# Microsoft .NET properties -csharp_style_prefer_utf8_string_literals = true:suggestion -dotnet_diagnostic.ca2252.severity = warning -dotnet_naming_rule.interface_should_be_begins_with_i.import_to_resharper = True -dotnet_naming_rule.interface_should_be_begins_with_i.resharper_description = interface_should_be_begins_with_i -dotnet_naming_rule.interface_should_be_begins_with_i.resharper_guid = d9464fad-754c-4d9b-929e-4a5687447e09 -dotnet_naming_rule.non_field_members_should_be_pascal_case.import_to_resharper = True -dotnet_naming_rule.non_field_members_should_be_pascal_case.resharper_description = non_field_members_should_be_pascal_case -dotnet_naming_rule.non_field_members_should_be_pascal_case.resharper_guid = f0a1e30f-d7ab-4b7d-9da9-83300dcfc496 -dotnet_naming_rule.private_constants_rule.import_to_resharper = True -dotnet_naming_rule.private_constants_rule.resharper_description = Constant fields (private) -dotnet_naming_rule.private_constants_rule.resharper_guid = 236f7aa5-7b06-43ca-bf2a-9b31bfcff09a -dotnet_naming_rule.private_constants_rule.severity = warning -dotnet_naming_rule.private_constants_rule.style = prefixedcamelcase -dotnet_naming_rule.private_constants_rule.symbols = private_constants_symbols -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.import_to_resharper = True -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.resharper_description = private_or_internal_field_should_be_prefixedcamelcase -dotnet_naming_rule.private_or_internal_field_should_be_prefixedcamelcase.resharper_guid = 7bb8990e-1f2f-4e51-9d00-822cde3c7587 -dotnet_naming_rule.private_static_fields_rule.import_to_resharper = True -dotnet_naming_rule.private_static_fields_rule.resharper_description = Static fields (private) -dotnet_naming_rule.private_static_fields_rule.resharper_guid = f9fce829-e6f4-4cb2-80f1-5497c44f51df -dotnet_naming_rule.private_static_fields_rule.resharper_style = _ + aaBb, AaBb -dotnet_naming_rule.private_static_fields_rule.severity = warning -dotnet_naming_rule.private_static_fields_rule.style = prefixedcamelcase -dotnet_naming_rule.private_static_fields_rule.symbols = private_static_fields_symbols -dotnet_naming_rule.private_static_readonly_rule.import_to_resharper = True -dotnet_naming_rule.private_static_readonly_rule.resharper_description = Static readonly fields (private) -dotnet_naming_rule.private_static_readonly_rule.resharper_guid = 15b5b1f1-457c-4ca6-b278-5615aedc07d3 -dotnet_naming_rule.private_static_readonly_rule.resharper_style = _ + aaBb, AaBb -dotnet_naming_rule.private_static_readonly_rule.severity = warning -dotnet_naming_rule.private_static_readonly_rule.style = prefixedcamelcase -dotnet_naming_rule.private_static_readonly_rule.symbols = private_static_readonly_symbols -dotnet_naming_rule.static_field_should_be_pascal_case.import_to_resharper = True -dotnet_naming_rule.static_field_should_be_pascal_case.resharper_description = static_field_should_be_pascal_case -dotnet_naming_rule.static_field_should_be_pascal_case.resharper_guid = 58a1ad23-c8d0-4001-a3e6-ad4cc29e6c4d -dotnet_naming_rule.types_should_be_pascal_case.import_to_resharper = True -dotnet_naming_rule.types_should_be_pascal_case.resharper_description = types_should_be_pascal_case -dotnet_naming_rule.types_should_be_pascal_case.resharper_guid = 24e38638-ec23-46b7-9261-ee35e8e211bd -dotnet_naming_rule.unity_serialized_field_rule.import_to_resharper = True -dotnet_naming_rule.unity_serialized_field_rule.resharper_description = Unity serialized field -dotnet_naming_rule.unity_serialized_field_rule.resharper_guid = 5f0fdb63-c892-4d2c-9324-15c80b22a7ef -dotnet_naming_rule.unity_serialized_field_rule.severity = warning -dotnet_naming_rule.unity_serialized_field_rule.style = lower_camel_case_style -dotnet_naming_rule.unity_serialized_field_rule.symbols = unity_serialized_field_symbols -dotnet_naming_style.lower_camel_case_style.capitalization = camel_case -dotnet_naming_symbols.private_constants_symbols.applicable_accessibilities = private -dotnet_naming_symbols.private_constants_symbols.applicable_kinds = field -dotnet_naming_symbols.private_constants_symbols.required_modifiers = const -dotnet_naming_symbols.private_constants_symbols.resharper_applicable_kinds = constant_field -dotnet_naming_symbols.private_constants_symbols.resharper_required_modifiers = any -dotnet_naming_symbols.private_static_fields_symbols.applicable_accessibilities = private -dotnet_naming_symbols.private_static_fields_symbols.applicable_kinds = field -dotnet_naming_symbols.private_static_fields_symbols.required_modifiers = static -dotnet_naming_symbols.private_static_fields_symbols.resharper_applicable_kinds = field -dotnet_naming_symbols.private_static_fields_symbols.resharper_required_modifiers = static -dotnet_naming_symbols.private_static_readonly_symbols.applicable_accessibilities = private -dotnet_naming_symbols.private_static_readonly_symbols.applicable_kinds = field -dotnet_naming_symbols.private_static_readonly_symbols.required_modifiers = readonly,static -dotnet_naming_symbols.private_static_readonly_symbols.resharper_applicable_kinds = readonly_field -dotnet_naming_symbols.private_static_readonly_symbols.resharper_required_modifiers = static -dotnet_naming_symbols.unity_serialized_field_symbols.applicable_accessibilities = * -dotnet_naming_symbols.unity_serialized_field_symbols.applicable_kinds = -dotnet_naming_symbols.unity_serialized_field_symbols.resharper_applicable_kinds = unity_serialised_field -dotnet_naming_symbols.unity_serialized_field_symbols.resharper_required_modifiers = instance - -# Test projects overrides -[**/*Tests.cs] - -dotnet_diagnostic.ca1707.severity = none # Identifiers should not contain underscores -dotnet_diagnostic.ca1851.severity = none # Possible multiple enumerations of IEnumerable collection - -dotnet_diagnostic.sa1402.severity = none # File may only contain a single type -dotnet_diagnostic.sa0001.severity = none # XML comment analysis is disabled due to project configuration -dotnet_diagnostic.s4487.severity = none -# dotnet_diagnostic.sa1206.severity = none # Keyword order (modifiers order) give wrong result for required - -resharper_possible_multiple_enumeration_highlighting = none -resharper_lambda_expression_can_be_made_static_highlighting = none - -[*.{appxmanifest,asax,ascx,aspx,axaml,blockshader,build,c,c++,c++m,cc,ccm,cginc,compute,cp,cpp,cppm,cs,cshtml,cu,cuh,cxx,cxxm,dtd,feature,fs,fsi,fsscript,fsx,fx,fxh,h,h++,hh,hlsl,hlsli,hlslinc,hp,hpp,hxx,icc,inc,inl,ino,ipp,ixx,master,ml,mli,mpp,mq4,mq5,mqh,mxx,nuspec,paml,razor,resw,resx,shader,shaderFoundry,skin,tcc,tpp,urtshader,usf,ush,uxml,vb,xaml,xamlx,xoml,xsd}] -indent_style = space -indent_size = 4 -tab_width = 4 diff --git a/.github/ISSUE_TEMPLATE/01_bug_report.yml b/.github/ISSUE_TEMPLATE/01_bug_report.yml new file mode 100644 index 0000000..4f4d052 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/01_bug_report.yml @@ -0,0 +1,149 @@ +name: Bug Report +description: Report a bug or unexpected behaviour in a Ploch.Data package. +title: "[Bug]: " +labels: [ "bug", "triage" ] +body: + - type: checkboxes + id: prerequisites + attributes: + label: Before you submit + options: + - label: I have searched [existing issues](https://github.com/mrploch/ploch-data/issues) and this has not been reported before. + required: true + - label: I have read the [documentation](https://github.com/mrploch/ploch-data#readme) and this is not covered there. + required: true + - label: I am not reporting a security vulnerability (use the [security advisory](https://github.com/mrploch/ploch-data/security/advisories/new) instead). + required: true + + - type: dropdown + id: affected-package + attributes: + label: Affected Package + description: Which Ploch.Data package is affected? + options: + - Ploch.Data.Model + - Ploch.Data.EFCore (GenericRepository / Unit of Work) + - Ploch.Data.EFCore.SqLite + - Ploch.Data.EFCore.SqlServer + - Ploch.Data.EFCore.IntegrationTesting + - Ploch.Data.StandardDataSets + - Ploch.Data.Utilities + - Multiple packages + - Not sure + validations: + required: true + + - type: textarea + id: description + attributes: + label: Bug Description + description: A clear and concise description of the bug. + placeholder: What happened? What did you expect to happen instead? + validations: + required: true + + - type: textarea + id: reproduction + attributes: + label: Steps to Reproduce + description: Minimal steps or code to reproduce the behaviour. The more specific, the faster we can investigate. + placeholder: | + 1. Register services with... + 2. Call repository method... + 3. Observe error... + validations: + required: true + + - type: textarea + id: code + attributes: + label: Reproduction Code + description: Minimal code sample that demonstrates the issue. + render: csharp + + - type: textarea + id: expected + attributes: + label: Expected Behaviour + description: What you expected to happen. + validations: + required: true + + - type: textarea + id: actual + attributes: + label: Actual Behaviour + description: What actually happened. + validations: + required: true + + - type: textarea + id: stacktrace + attributes: + label: Exception / Stack Trace + description: Paste the full exception and stack trace, if applicable. + render: text + + - type: dropdown + id: regression + attributes: + label: Regression? + description: Did this work in a previous version? + options: + - "Yes — it worked in a previous version" + - "No — this is new functionality or I haven't tested older versions" + - "Not sure" + validations: + required: false + + - type: textarea + id: workarounds + attributes: + label: Known Workarounds + description: Have you found any workarounds? This helps other users while we investigate. + + - type: input + id: package-version + attributes: + label: Package Version + description: The version of the affected Ploch.Data package. + placeholder: "3.0.0" + validations: + required: true + + - type: input + id: dotnet-version + attributes: + label: .NET Version + description: Target framework or `dotnet --version` output. + placeholder: "net8.0 / net10.0 / 10.0.100" + validations: + required: true + + - type: dropdown + id: database-provider + attributes: + label: Database Provider + description: Which database provider are you using? + options: + - SQLite + - SQL Server + - PostgreSQL (Npgsql) + - In-Memory (for testing) + - Other + - Not applicable + validations: + required: false + + - type: input + id: os + attributes: + label: Operating System + description: OS and version (e.g. Windows 11, Ubuntu 24.04, macOS 15). + placeholder: "Windows 11" + + - type: textarea + id: additional + attributes: + label: Additional Context + description: Any other context, screenshots, or log output that might help. diff --git a/.github/ISSUE_TEMPLATE/02_feature_request.yml b/.github/ISSUE_TEMPLATE/02_feature_request.yml new file mode 100644 index 0000000..2bd2560 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/02_feature_request.yml @@ -0,0 +1,76 @@ +name: Feature Request +description: Suggest a new feature or improvement for a Ploch.Data package. +title: "[Feature]: " +labels: [ "enhancement", "triage" ] +body: + - type: checkboxes + id: prerequisites + attributes: + label: Before you submit + options: + - label: I have searched [existing issues](https://github.com/mrploch/ploch-data/issues) and this has not been requested before. + required: true + + - type: dropdown + id: affected-package + attributes: + label: Related Package + description: Which package would this feature belong to? + options: + - Ploch.Data.Model + - Ploch.Data.EFCore (GenericRepository / Unit of Work) + - Ploch.Data.EFCore.SqLite + - Ploch.Data.EFCore.SqlServer + - Ploch.Data.EFCore.IntegrationTesting + - Ploch.Data.StandardDataSets + - Ploch.Data.Utilities + - New package + - Not sure + validations: + required: false + + - type: textarea + id: problem + attributes: + label: Problem Statement + description: What problem are you trying to solve? Focus on the problem, not the solution. + placeholder: "I am trying to [...] but [...]" + validations: + required: true + + - type: textarea + id: solution + attributes: + label: Proposed Solution + description: Describe the solution you'd like. Include API shape, usage examples, or pseudocode if you have ideas. + + - type: textarea + id: api-usage + attributes: + label: Example Usage + description: How would the feature be used in code? + render: csharp + + - type: textarea + id: alternatives + attributes: + label: Alternatives Considered + description: What alternatives have you considered? Why are they insufficient? + + - type: dropdown + id: breaking-change + attributes: + label: Would this be a breaking change? + description: Would this require changes to existing public API or behaviour? + options: + - "No" + - "Yes" + - "Not sure" + validations: + required: false + + - type: textarea + id: additional + attributes: + label: Additional Context + description: Any other context, mockups, links to similar features in other libraries, etc. diff --git a/.github/ISSUE_TEMPLATE/03_api_proposal.yml b/.github/ISSUE_TEMPLATE/03_api_proposal.yml new file mode 100644 index 0000000..5119bfa --- /dev/null +++ b/.github/ISSUE_TEMPLATE/03_api_proposal.yml @@ -0,0 +1,95 @@ +name: API Proposal +description: Propose a new public API addition or change to a Ploch.Data package. +title: "[API Proposal]: " +labels: [ "api-proposal", "triage" ] +body: + - type: checkboxes + id: prerequisites + attributes: + label: Before you submit + options: + - label: I have searched [existing issues](https://github.com/mrploch/ploch-data/issues) for similar API proposals. + required: true + + - type: dropdown + id: affected-package + attributes: + label: Target Package + description: Which package would this API belong to? + options: + - Ploch.Data.Model + - Ploch.Data.EFCore (GenericRepository / Unit of Work) + - Ploch.Data.EFCore.SqLite + - Ploch.Data.EFCore.SqlServer + - Ploch.Data.EFCore.IntegrationTesting + - Ploch.Data.StandardDataSets + - Ploch.Data.Utilities + - New package + validations: + required: true + + - type: textarea + id: background + attributes: + label: Background and Motivation + description: Why is this API needed? What scenario does it enable? + validations: + required: true + + - type: textarea + id: api-proposal + attributes: + label: Proposed API + description: | + Define the new or modified public API surface. Include types, methods, interfaces, + and extension methods. Mark new additions clearly. + value: | + ```csharp + namespace Ploch.Data; + + // New interface / class / extension method: + public interface IExample + { + // TODO: Define API surface + } + ``` + render: csharp + validations: + required: true + + - type: textarea + id: usage + attributes: + label: Usage Examples + description: Show how a consumer would use the proposed API. + value: | + ```csharp + // Example: How a consumer would use this API + ``` + render: csharp + validations: + required: true + + - type: textarea + id: alternatives + attributes: + label: Alternative Designs + description: What other approaches did you consider? Why is this design preferred? + + - type: dropdown + id: breaking-change + attributes: + label: Breaking Change? + description: Does this modify or remove existing public API? + options: + - "No — purely additive" + - "Yes — modifies existing API" + - "Yes — removes existing API" + validations: + required: true + + - type: textarea + id: risks + attributes: + label: Risks and Considerations + description: Any risks, edge cases, performance concerns, or compatibility issues to consider. diff --git a/.github/ISSUE_TEMPLATE/04_task.yml b/.github/ISSUE_TEMPLATE/04_task.yml new file mode 100644 index 0000000..c792b2f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/04_task.yml @@ -0,0 +1,65 @@ +name: Task +description: Internal task for maintenance, refactoring, CI/CD, documentation, or other non-feature work. +title: "[Task]: " +labels: [ "task" ] +body: + - type: textarea + id: description + attributes: + label: Description + description: What needs to be done and why? + validations: + required: true + + - type: textarea + id: acceptance-criteria + attributes: + label: Acceptance Criteria + description: Define what "done" looks like. + value: | + - [ ] + - [ ] + - [ ] + validations: + required: true + + - type: dropdown + id: affected-area + attributes: + label: Affected Area + description: What part of the project does this task affect? + options: + - Source code + - Tests + - CI/CD pipeline + - Documentation + - Build configuration + - Dependencies + - Multiple areas + validations: + required: false + + - type: dropdown + id: affected-repos + attributes: + label: Affected Repositories + description: Does this task affect other repositories in the MrPloch workspace? + multiple: true + options: + - ploch-data only + - ploch-common + - ploch-lists + - ploch-endpoints + - ploch-groupmatters + - mrploch-development + - ploch-github-actions + - ploch-templates-dotnet-repository + - Other + validations: + required: false + + - type: textarea + id: additional + attributes: + label: Additional Context + description: Any related issues, PRs, documentation, or context. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..129abd2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + - name: Security Vulnerability + url: https://github.com/mrploch/ploch-data/security/advisories/new + about: Report a security vulnerability privately. Do not open a public issue. + - name: Question or Discussion + url: https://github.com/mrploch/ploch-data/discussions + about: Ask questions, get help, or discuss ideas. Issues are for bugs and feature requests only. + - name: Issue in Ploch.Common + url: https://github.com/mrploch/ploch-common/issues + about: If your issue is in a Ploch.Common package (not Ploch.Data), report it there instead. diff --git a/.github/agents/plan-critic.agent.md b/.github/agents/plan-critic.agent.md new file mode 100644 index 0000000..2e761aa --- /dev/null +++ b/.github/agents/plan-critic.agent.md @@ -0,0 +1,46 @@ +--- +name: plan-critic +description: Critique a remediation or implementation plan for completeness, risk, missing validations, missing PR comment coverage, weak assumptions, and CI blind spots. Use this programmatically from another custom agent before finalizing a non-trivial plan. +target: github-copilot +tools: ["read", "search", "github/*"] +model: claude-opus-4.6 +disable-model-invocation: true +user-invocable: false +--- + +You are an independent plan reviewer. + +Your job is to challenge a draft plan before code changes begin. + +Review for: + +1. Missing review findings or risk areas. +2. Missing handling for PR comments, conversations, and review threads. +3. Missing validation steps, especially tests, sample-app validation, and CI checks. +4. Weak assumptions about tickets, linked issues, related PRs, or historical behavior. +5. Gaps between the proposed fixes and the stated pass criteria. + +Rules: + +- Do not write code. +- Do not soften criticism for the sake of tone. +- Prefer precise, actionable objections. +- If the plan is acceptable, say why it is acceptable and what remains highest risk. + +Output format: + +## Verdict + +- `approve` or `revise` + +## Required changes + +- Every gap that must be fixed before implementation + +## Optional improvements + +- Useful but non-blocking refinements + +## Residual risk + +- What could still go wrong even if the plan is followed diff --git a/.github/agents/pr-pipeline-orchestrator.agent.md b/.github/agents/pr-pipeline-orchestrator.agent.md new file mode 100644 index 0000000..996b9ca --- /dev/null +++ b/.github/agents/pr-pipeline-orchestrator.agent.md @@ -0,0 +1,47 @@ +--- +name: pr-pipeline-orchestrator +description: Run the full PR investigation, review-planning, and remediation pipeline for a specified pull request. Use this when you want one agent to coordinate the whole process while delegating stage-specific work to specialized agents. +target: github-copilot +tools: ["read", "search", "edit", "execute", "agent", "github/*"] +model: gpt-5.3-codex +disable-model-invocation: true +user-invocable: true +--- + +You are the pipeline orchestrator for deep PR work. + +You coordinate a staged pipeline. Because GitHub.com cloud agent does not support YAML `handoffs`, you must sequence the stages explicitly and treat each stage result as a checkpoint before continuing. + +Pipeline: + +1. Invoke `repo-investigator` to gather repository-specific context. +2. Invoke `pr-review-planner` to produce an exhaustive remediation plan. +3. Ensure non-trivial plans are reviewed by `plan-critic` before implementation. If `pr-review-planner` already performed that review, verify the critique was incorporated. +4. Only after the plan is acceptable, invoke `pr-remediation`. +5. Re-check the final state. If new failures or unresolved PR feedback remain, loop back to planning instead of forcing completion. + +Rules: + +- Do not skip stages. +- Do not proceed to remediation without a written plan. +- Do not mark the pipeline complete while required CI checks are failing. +- If comment replies are required but write-capable GitHub tools are not configured, surface that as a configuration blocker. + +Output format: + +## Stage status + +- Investigation +- Review and planning +- Plan critique +- Remediation + +## Current blockers + +- Technical blockers +- Access or configuration blockers + +## Ready state + +- Whether the PR is ready now +- If not, what remains diff --git a/.github/agents/pr-remediation.agent.md b/.github/agents/pr-remediation.agent.md new file mode 100644 index 0000000..110d63a --- /dev/null +++ b/.github/agents/pr-remediation.agent.md @@ -0,0 +1,54 @@ +--- +name: pr-remediation +description: Execute an approved PR remediation plan, validate every change, address all valid review feedback, reply to false positives when write-capable GitHub tools are configured, and keep iterating until the PR is in a fully passing state. Use this after the PR review plan exists. +target: github-copilot +tools: ["read", "search", "edit", "execute", "agent", "github/*"] +model: gpt-5.3-codex +disable-model-invocation: true +user-invocable: true +--- + +You are the PR remediation specialist. + +You take an existing plan and drive the PR to a clean state. + +Required workflow: + +1. Re-open the PR, the approved plan, and all relevant review context before changing code. +2. Implement the required fixes in a controlled order. +3. Validate after each meaningful batch of changes using the most relevant tests first, then broader validation before you finish. +4. Re-check PR comments, conversations, and CI after changes land. +5. For every valid review item, make the required code change. +6. For every false positive, reply with concise evidence if write-capable GitHub tools are available. +7. If the current plan becomes invalid because of new failures, regressions, or misunderstood requirements, stop and return to planning. If the revised plan is non-trivial, invoke `plan-critic`. + +Hard requirements: + +- Do not declare success while any required CI check is failing. +- Do not skip comments or conversations. +- Do not assume reply capability exists. If the repository is still using the default read-only GitHub MCP setup, report that comment-reply automation is blocked and explain the missing configuration. +- If a change can affect SampleApp package consumption, validate the relevant SampleApp build path as well. +- If you cannot fully verify a fix, say exactly what remains unverified. + +Output format: + +## Changes made + +- What was changed and why + +## Validation + +- Commands run +- Results + +## Comment and conversation resolution + +- One line per item: + - `code changed` + - `replied with evidence` + - `blocked by missing write access` + +## Final status + +- Whether the PR is ready +- Any remaining blockers diff --git a/.github/agents/pr-review-planner.agent.md b/.github/agents/pr-review-planner.agent.md new file mode 100644 index 0000000..0a105e1 --- /dev/null +++ b/.github/agents/pr-review-planner.agent.md @@ -0,0 +1,76 @@ +--- +name: pr-review-planner +description: Review a specified pull request without editing code, research all linked context, inspect every review comment and conversation, check CI, and produce a complete remediation plan. Use this when you need an exhaustive PR review and a plan for what must change before the PR can be considered ready. +target: github-copilot +tools: ["read", "search", "execute", "agent", "github/*"] +model: gpt-5.3-codex +disable-model-invocation: true +user-invocable: true +--- + +# PR Review Planner Agent + +You are the PR review and remediation planner. + +You do not change code. You create the best possible plan for the next implementation stage. + +Required workflow: + +1. Open the specified PR and understand the intent, changed files, commits, and current branch state. +2. Read all available PR discussion: + - top-level PR conversation + - review summaries + - review comments + - unresolved and resolved threads + - follow-up conversations on prior commits when relevant +3. Read the associated issue or ticket. If the PR, issue, commits, or comments reference related issues or pull requests, inspect those too, including closed ones when they matter. +4. Research the touched code in the repository so you understand the implementation, not just the diff. +5. Check CI status and every check run that applies to the PR. +6. Build a remediation plan that covers: + - defects or risks you identify in the implementation + - every valid PR comment that requires a code change + - every false positive that needs a reply with evidence + - every CI failure, flaky test, or missing validation that must be addressed +7. If the draft plan is non-trivial, invoke `plan-critic` before you finalize it. Treat a plan as non-trivial if any of the following are true: + - more than one project is affected + - more than five files are touched + - multiple review threads need different responses + - CI is failing or incomplete + - the change touches shared abstractions, provider-selection behavior, or public APIs + - the change can affect the SampleApp consumer experience +8. Incorporate the critique and produce the final plan. + +Coverage requirements: + +- No PR comment or conversation may be skipped. +- If you cannot inspect a conversation because tooling or permissions are insufficient, say so explicitly and mark the plan incomplete. +- Separate "must change", "must reply", and "verify again" work clearly. + +Output format: + +## PR understanding + +- What the PR is trying to do +- What changed technically + +## Findings + +- Defects, risks, or regressions you found + +## Comment disposition + +- One line per PR comment or thread: + - `change required` + - `reply only` + - `blocked by missing access` + +## CI and checks + +- Current state +- What must pass before merge + +## Remediation plan + +- Ordered implementation steps +- Validation after each major step +- Final pass criteria diff --git a/.github/agents/repo-investigator.agent.md b/.github/agents/repo-investigator.agent.md new file mode 100644 index 0000000..c469447 --- /dev/null +++ b/.github/agents/repo-investigator.agent.md @@ -0,0 +1,43 @@ +--- +name: repo-investigator +description: Investigate this repository and build project-specific understanding before deep PR review or implementation. Use this when a task needs architecture research, conventions, validation commands, likely impact areas, or expert repository context before planning or changing code. +target: github-copilot +tools: ["read", "search", "execute", "github/*"] +model: gpt-5.3-codex +disable-model-invocation: true +user-invocable: true +--- + +You are the repository investigator for `ploch-data`. + +Your job is to build expert understanding of the repository before review or implementation work starts. + +Process: + +1. Read the repository-level instructions first, including `.github/copilot-instructions.md` and any agent guidance files that are present. +2. Build a concise mental model of the solution structure, package boundaries, architecture patterns, sample application constraints, CI workflows, validation commands, and repository conventions. +3. When a PR, issue, or feature area is specified, identify the most relevant projects, workflows, files, abstractions, and likely regression surfaces. +4. Prefer repository evidence over guesses. If something is unclear, state the uncertainty and the fastest way to verify it. +5. Do not edit code. + +Output format: + +## Repository model + +- Key projects and patterns +- Important conventions +- Relevant CI or release constraints + +## Task-specific context + +- Files, projects, and abstractions most likely to matter +- Risks or coupling to watch + +## Validation map + +- Commands to run +- Which tests or workflows matter most + +## Open questions + +- Unknowns that must be resolved before implementation diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index f3fac37..6f9c7e4 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,25 +1,526 @@ + +# Workspace: MrPloch +# Workspace ID: 57db5f34-e7f0-42c0-86c4-bb981f96c880 + +# ContextStream Rules + +**MANDATORY STARTUP:** On the first message of EVERY session call `init(...)` then `context(user_message="...")`. On subsequent messages, call `context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. + +## Required Tool Calls + +1. **First message in session**: Call `init(folder_path="")` then `context(user_message="...", session_id="")` +2. **Subsequent messages (default)**: Call `context(user_message="...", session_id="")` first. Narrow bypass: immediate read-only ContextStream calls with fresh context + no state changes. +3. **Before file search**: Call `search(mode="auto", query="...")` before local tools + +**Read-only examples** (default: call `context(...)` first; narrow bypass only for immediate read-only ContextStream calls when context is fresh and no state-changing tool has run): `workspace(action="list"|"get"|"create")`, `memory(action="list_docs"|"list_events"|"list_todos"|"list_tasks"|"list_transcripts"|"list_nodes"|"decisions"|"get_doc"|"get_event"|"get_task"|"get_todo"|"get_transcript")`, `session(action="get_lessons"|"get_plan"|"list_plans"|"recall")`, `help(action="version"|"tools"|"auth")`, `project(action="list"|"get"|"index_status")`, `reminder(action="list"|"active")`, any read-only data query + +**Common queries — use these exact tool calls:** + +- "list lessons" / "show lessons" → `session(action="get_lessons")` +- "save lesson" / "remember this lesson" / "lesson learned" / "I made a mistake" → `session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="low|medium|high|critical")` — **NEVER store lessons in local files** (e.g. `~/.claude/.../memory/`, `.cursorrules`, scratch markdown). Lessons live in ContextStream so they auto-surface as `[LESSONS_WARNING]` on future turns and across sessions. +- "list decisions" / "show decisions" / "how many decisions" → `memory(action="decisions")` +- "save decision" / "decided to" → `session(action="capture", event_type="decision", title="...", content="...")` +- "list docs" → `memory(action="list_docs")` +- "list tasks" → `memory(action="list_tasks")` +- "list todos" → `memory(action="list_todos")` +- "list plans" → `session(action="list_plans")` +- "list events" → `memory(action="list_events")` +- "show snapshots" / "list snapshots" → `memory(action="list_events", event_type="session_snapshot")` +- "save snapshot" → `session(action="capture", event_type="session_snapshot", title="...", content="...")` +- "what did we do last session" / "past sessions" / "previous work" / "pick up where we left off" → `session(action="recall", query="...")` (ranked context) OR `memory(action="list_transcripts", limit=10)` (chronological list) +- "search past sessions" / "find in past transcripts" / "when did we discuss X" → `memory(action="search_transcripts", query="...")` — full-text search over saved conversation transcripts +- "show transcript" / "read session " → `memory(action="get_transcript", transcript_id="...")` +- "list skills" / "show my skills" → `skill(action="list")` +- "create a skill" → `skill(action="create", name="...", instruction_body="...", project_id="", trigger_patterns=[...])` +- "update a skill" → `skill(action="update", name="...", instruction_body="...", change_summary="...")` +- "run skill" / "use skill" → `skill(action="run", name="...")` +- "import skills" / "import my CLAUDE.md" → `skill(action="import", file_path="...", format="auto")` + +Use `context(user_message="...", mode="fast")` for quick turns. +Use `context(user_message="...")` for deeper analysis and coding tasks. +If the `instruct` tool is available, run `instruct(action="get", session_id="...")` before `context(...)` on each turn, then `instruct(action="ack", session_id="...", ids=[...])` after using entries. + +**Plan-mode guardrail:** Entering plan mode does NOT bypass search-first. Do NOT use Explore, Task subagents, Grep, Glob, Find, SemanticSearch, `code_search`, `grep_search`, `find_by_name`, or shell search commands (`grep`, `find`, `rg`, `fd`). Start with `search(mode="auto", query="...")` — it handles glob patterns, regex, exact text, file paths, and semantic queries. Only Read narrowed files/line ranges returned by search. + +## Why These Rules? + +- `context()` returns task-specific rules, lessons from past mistakes, and relevant decisions +- `search()` uses semantic understanding to find relevant code faster than file scanning +- Transcript capture is optional and OFF by default. Enable per session with `save_exchange=true` (and `session_id`), disable with `save_exchange=false`. +- Default context-first keeps state reliable; the narrow read-only bypass avoids unnecessary repeats + +## Finding Information — Search ContextStream Knowledge, Not Just Code + +**Auto-grounding:** Every `context(user_message="...")` call may include a `[GROUNDING]` block — pre-ranked prior work (transcripts, snapshots, docs, decisions, lessons) for **this** message. When you see it, read those hits **before** fanning out into code search; skipping search entirely is often correct. Outside `context()`, use `session(action="ground", user_message="...")` for the same one-shot bundle (recall + docs + decisions + lessons + skills + git). + +When you need information, do not default to code search or trial-and-error. ContextStream stores far more than source — docs, decisions, lessons, preferences, plans, tasks, todos, skills, memory nodes, and full session transcripts all live behind dedicated tools. Pick the right knowledge surface by what you're looking for: + +- **Source code / symbol / file** → `search(mode="auto", query="...")` +- **Why we did X / past decisions** → `memory(action="decisions", query="...")` +- **Architecture / spec / design doc** → `memory(action="list_docs")` then `memory(action="get_doc", doc_id="title or UUID")` +- **Prior mistakes ("never do X again")** → `session(action="get_lessons", query="...")` +- **User preferences / conventions / constraints** → already surfaced as `[PREFERENCE]`; also `memory(action="list_nodes", node_type="preference")` or `memory(action="list_nodes", node_type="constraint")` +- **Open work / tasks / todos** → `memory(action="list_tasks")` / `memory(action="list_todos")` +- **Active or past plans** → `session(action="list_plans")` then `session(action="get_plan", plan_id="...")` +- **Reusable workflows / skills** → `skill(action="list")` then `skill(action="run", name="...")` +- **"What did we do before?" (continuation work)** → `session(action="recall", query="...")` — see the Past Sessions ladder below +- **Unsure which surface** → `memory(action="search", query="...")` — hybrid across memory nodes + docs; falls back to `session(action="recall", query="...")` for transcript/snapshot coverage + +Default assumption: if the user asks "how do we do X?", "why did we choose Y?", "what's the pattern for Z?", or "did we already decide about Q?" — the answer is likely in a doc, decision, lesson, plan, or skill, NOT in the code. Check the right knowledge surface BEFORE reading source files or re-deriving the answer. + +Before guessing, improvising, or struggling through a workflow you don't fully know: + +- Start with `context(...)` and obey `[GROUNDING]` (prior-work anchors), `[MATCHED_SKILLS]`, `[LESSONS_WARNING]`, `[PREFERENCE]`, `[DECISIONS]`, `[MEMORY]`, and `` output — those are already filtered to the current task +- Treat `[LESSONS_WARNING]` as active working instructions for the current task, not optional background context; apply them immediately and keep them in mind until the task is done +- Prefer surfaced ContextStream knowledge over inventing a new workflow from memory + +## Past Sessions Are Queryable — USE THEM + +### Auto-Grounding (in `context()`) + +When `context()` returns `[GROUNDING]`, those lines are **pre-ranked prior work for your current message** — read them first (transcript/snapshot/doc/decision/lesson entry points). Skipping code search is often correct. For the same bundle **outside** `context()`, call `session(action="ground", user_message="...")`. + +Transcripts for every turn of every session are captured and indexed automatically. Session snapshots bookmark turning points. **Before asking the user what you did last time, or re-deriving context you built together previously, check the transcript + snapshot layer.** It's fast, it's complete, and the user is paying for it. + +Triggers to query past sessions: + +- User says "last time", "previous", "yesterday", "earlier", "we decided", "we talked about", "pick up where we left off", "what were we working on" +- You have a task that's clearly a continuation (e.g. finishing a refactor that's half-done on disk) +- You're about to ask a clarifying question whose answer is likely in a prior session +- You're unsure whether a decision or approach has already been made + +Escalation ladder — walk it in order and stop at the first step that answers the question: + +1. **`session(action="recall", query="")`** — always the first call. Ranked fusion across transcripts, snapshots, docs, and decisions. Covers 80% of "what did we do before" questions. + +2. **`memory(action="search_transcripts", query="")`** — fall through when `recall` returns thin or off-topic results, or when you need every mention of a specific term. Full-text search across ALL saved transcripts. + +3. **`memory(action="list_events", event_type="session_snapshot")`** — when you want the turning-point bookmarks (manual + auto pre-compaction captures). Useful for "what state were we in at the end of " questions that `recall` misses because the answer isn't in conversational text. + +4. **`memory(action="list_transcripts", limit=10)`** — when you need a chronological index of recent sessions (titles, timestamps, IDs). Use when the user wants to know "when did we last work on X". + +5. **`memory(action="get_transcript", transcript_id="")`** — read a full past session end-to-end. Use only after the steps above pointed you at a specific transcript ID and you need the complete exchange, not snippets. + +6. **End of current session — save a bookmark** for the next one: `session(action="capture", event_type="session_snapshot", title="...", content="")`. + +**Never answer "I don't know what we did before" without running at least step 1, then step 2 if step 1 was thin.** + +## Project Scope Discipline + +- Reuse the `project_id` returned by `init(...)` or `context(...)` for project-scoped writes and lookups +- For project-scoped `memory(...)`, `session(...)`, and `skill(...)` calls, pass explicit `project_id` instead of guessing from the folder name or title +- If `init(...)` or `context(...)` does not surface a current `project_id`, rerun `init(folder_path="...")` before creating docs, skills, events, tasks, todos, or other project memory +- Use `target_project` only after init from a multi-project parent folder + +## Response to Notices + +- `[GROUNDING]` → Read ranked prior-work hits (from `context()`) before broad code search; optional one-shot: `session(action="ground", user_message="...")` +- `[GROUNDING_AVAILABLE]` → Your editor may remind you when unread grounding exists — advisory only +- `[MATCHED_SKILLS]` → Run the surfaced skills before other work +- `[LESSONS_WARNING]` → Apply the lessons shown immediately and keep them active for the current task +- `[PREFERENCE]` → Follow user preferences exactly +- `[RULES_NOTICE]` → Run `generate_rules()` to update rules +- `[VERSION_NOTICE]` → Inform user about available updates + +## System Reminders + +`` tags in messages contain injected instructions from hooks. +These should be followed exactly as they contain real-time context. + +## Search Protocol + +**IMPORTANT: Indexing and ingest are ALWAYS available. NEVER claim that transport mode, HTTP mode, or remote mode prevents indexing/ingest.** + +1. Check project index: `project(action="index_status")` +2. If indexed & fresh: `search(mode="auto", query="...")` before local tools +3. If NOT indexed or stale: wait for background refresh (up to ~20s, configurable), retry `search(mode="auto", ...)`, then use local tools only after the grace window elapses +4. If search returns 0 results after refresh/retry: local tools are allowed + +### Search Mode Selection + +- `auto` (recommended): query-aware mode selection +- `hybrid`: mixed semantic + keyword retrieval for broad discovery +- `semantic`: conceptual/natural-language questions ("how does auth work?") +- `keyword`: exact text or quoted string +- `pattern`: glob/regex queries (`*.sql`, `foo\s+bar`) +- `refactor`: symbol usage / rename-safe lookup (`UserService`, `snake_case`) +- `exhaustive`: all occurrences / complete match sets +- `team`: cross-project team search + +### Output Format Hints + +- `output_format="paths"` for file lists and rename targets +- `output_format="count"` for "how many" queries + +### Two-Phase Search Playbook (recommended) + +1. **Discovery pass**: run `search(mode="auto", query="", output_format="paths", limit=10)` +2. **Precision pass**: use symbols from pass 1 with a specific mode: + - Exact symbol/text: `search(mode="keyword", query="\"my_symbol\"", include_content=true, file_types=["rs"], limit=20)` + - Symbol usage/rename-safe lookup: `search(mode="refactor", query="MySymbol", output_format="paths")` + - Complete usage sweep: `search(mode="exhaustive", query="my_symbol", file_types=["rs"])` +3. **Read locally only after narrowing**: use Read/Grep on returned paths, not the full repo. + +## Plans and Tasks + +**ALWAYS** use ContextStream for plans and tasks — do NOT create markdown plan files or use built-in todo tools: + +- Plans: `session(action="capture_plan", title="...", steps=[...])` +- Tasks: `memory(action="create_task", title="...", description="...")` +- Link tasks to plans: `memory(action="create_task", plan_id="...")` + +## Memory, Docs & Todos + +**ALWAYS** use ContextStream for memory, lessons, decisions, documents, and todos — NOT editor built-in tools, `~/.claude/.../memory/`, `.cursorrules`, or local files. Local-file storage is invisible to the lesson/preference/skill auto-surfacing pipeline that fires on every future turn. + +- Lessons (mistakes, corrections, "never do X again"): `session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="low|medium|high|critical", category="...")` +- Decisions: `session(action="capture", event_type="decision", title="...", content="...")` +- Notes/insights: `session(action="capture", event_type="note|insight", title="...", content="...")` +- Facts/preferences: `memory(action="create_node", node_type="fact|preference", title="...", content="...")` +- Documents: `memory(action="create_doc", title="...", content="...", doc_type="spec|general")` +- Todos: `memory(action="create_todo", title="...", todo_priority="high|medium|low")` +Do NOT use `create_memory`, `TodoWrite`, `todo_list`, or local file writes for persistence. + +## Skills (IMPORTANT — Do Not Ignore Matched Skills) + +When `context()` returns `[MATCHED_SKILLS]`, you **MUST run** the listed skills via `skill(action="run", name="...")`. + +- Skills marked ⚡ (high-priority, priority ≥ 80) are **mandatory** — run them immediately before other work +- Skills marked ▶ (recommended, priority ≥ 60) should be run unless clearly irrelevant +- Skills marked ○ (available) are optional but often helpful + +Reusable instruction + action bundles that persist across projects and sessions: + +- Browse: `skill(action="list")` or `skill(action="list", scope="team")` +- Create: `skill(action="create", name="...", instruction_body="...", trigger_patterns=[...])` +- Update: `skill(action="update", name="...", instruction_body="...", change_summary="...")` (name or `skill_id`) +- Run: `skill(action="run", name="...")` — executes the skill's action pipeline +- Import: `skill(action="import", file_path="CLAUDE.md", format="auto")` — imports from any rules file +- Skills auto-activate when their trigger keywords match the user's message. The `context()` response surfaces them. + +## Code Search + +**ALWAYS** use ContextStream `search()` before Glob, Grep, Read, SemanticSearch, `code_search`, `grep_search`, or `find_by_name`. +Do NOT launch Task/explore subagents for code search — use `search(mode="auto", query="...")` directly. +ContextStream search results contain **real file paths, line numbers, and code content** — they ARE code results. +**NEVER** dismiss ContextStream results as "non-code" — use the returned file paths to `read_file` the relevant code. +Use `search(include_content=true)` to get inline code snippets in results. + +## Context Pressure + +When `context()` returns `context_pressure.level: "high"`: + +- Save a session snapshot before compaction +- `session(action="capture", event_type="session_snapshot", title="...", content="...")` +- After compaction: `init(folder_path="...", is_post_compact=true)` to restore + +--- + +## IMPORTANT: No Hooks Available + +**This editor does NOT have hooks to enforce ContextStream behavior.** +You MUST follow these rules manually - there is no automatic enforcement. + +## ContextStream Knowledge First + +**Before guessing or struggling through an unfamiliar workflow, check ContextStream first.** + +- Start with `context(...)` and follow `[MATCHED_SKILLS]`, `[LESSONS_WARNING]`, `[PREFERENCE]`, and `` output +- Treat `[LESSONS_WARNING]` as active working instructions for the current task, not optional background context +- If the task is unfamiliar, process-heavy, or likely documented already, inspect `skill(action="list")`, `memory(action="list_docs")`, `session(action="get_lessons")`, or `memory(action="decisions")` before trial-and-error +- If `context()` returns `[MATCHED_SKILLS]`, run the listed skills before other work + +--- + +## SESSION START PROTOCOL + +**On EVERY new session, you MUST:** + +1. **Call `init(folder_path="")`** FIRST + - This triggers project indexing + - Check response for `indexing_status` + - If `"started"` or `"refreshing"`: wait before searching + +2. **Generate a unique session_id** (e.g., `"session-" + timestamp` or a UUID) + - Use this SAME session_id for ALL `context()` calls in this conversation + +3. **Call `context(user_message="", session_id="")`** + - Gets task-specific rules, lessons, and preferences + - Check for [LESSONS_WARNING], [PREFERENCE], [RULES_NOTICE] + - If [LESSONS_WARNING] appears, treat those lessons as mandatory instructions for the task until it is finished + +4. **Default behavior:** call `context(...)` first on each message. Narrow bypass is allowed only for immediate read-only ContextStream calls when previous context is still fresh and no state-changing tool has run. + +5. **Instruction alignment (if tool is exposed):** call `instruct(action="get", session_id="")` before `context(...)` each turn, and `instruct(action="ack", session_id="", ids=[...])` after using entries. + +--- + +## TRANSCRIPT SAVING (OPTIONAL) + +Transcripts are OFF by default. + +### Enable for this chat + +``` +context(user_message="", save_exchange=true, session_id="") +``` + +### Disable for this chat + +``` +context(user_message="", save_exchange=false, session_id="") +``` + +### Default policy via MCP config env + +- `CONTEXTSTREAM_TRANSCRIPTS_ENABLED="true|false"` +- `CONTEXTSTREAM_HOOK_TRANSCRIPTS_ENABLED="true|false"` + +### Session ID Guidelines + +- Generate ONCE at the start of the conversation +- Use a unique identifier (UUID or timestamp-based) +- Keep the SAME session_id for ALL context() calls +- Different sessions = different transcript preference state + +--- + +## FILE INDEXING (CRITICAL) + +**There is NO automatic file indexing in this editor.** +You MUST manage indexing manually: + +**IMPORTANT: Indexing and ingest are ALWAYS available. NEVER claim that transport mode, HTTP mode, or remote mode prevents indexing/ingest operations. Both `project(action="index")` and `project(action="ingest_local")` work in all configurations.** + +### After Creating/Editing Files + +``` +project(action="index") +``` + +If folder context is active, this resolves the current repo and uses the local ingest path automatically. + +### To Target A Specific Folder Or Recover From Stale Scope + +``` +project(action="ingest_local", path="") +``` + +### Signs You Need to Re-index + +- Search doesn't find code you just wrote +- Search returns old versions of functions +- New files don't appear in search results + +--- + +## SEARCH-FIRST (No PreToolUse Hook) + +**There is NO hook to redirect local tools.** You MUST self-enforce: + +### Before ANY Search, Check Index Status + +``` +project(action="index_status") +``` + +### Search Protocol + +- **IF indexed & fresh:** `search(mode="auto", query="...")` before local tools +- **IF NOT indexed or stale (>7 days):** wait up to ~20s for background refresh, retry `search(mode="auto", ...)`, then allow local tools only after the grace window elapses +- **IF search returns 0 results after retry/window:** local tools are allowed + +### Choose Search Mode Intelligently + +- `auto` (recommended): query-aware mode selection +- `hybrid`: mixed semantic + keyword retrieval for broad discovery +- `semantic`: conceptual questions ("how does X work?") +- `keyword`: exact text / quoted string +- `pattern`: glob or regex (`*.ts`, `foo\s+bar`) +- `refactor`: symbol usage / rename-safe lookup +- `exhaustive`: all occurrences / complete match coverage +- `team`: cross-project team search + +### Output Format Hints (Search-First Section) + +- Use `output_format="paths"` for file listings and rename targets +- Use `output_format="count"` for "how many" queries + +### Two-Phase Search Pattern (for precision) + +- Pass 1 (discovery): `search(mode="auto", query="", output_format="paths", limit=10)` +- Pass 2 (precision): use one of: + - exact text/symbol: `search(mode="keyword", query="\"exact_text\"", include_content=true)` + - symbol usage: `search(mode="refactor", query="SymbolName", output_format="paths")` + - all occurrences: `search(mode="exhaustive", query="symbol_or_text")` +- Then use local Read/Grep only on paths returned by ContextStream. + +### When Local Tools Are OK + +- The stale/not-indexed grace window has elapsed (~20s default, configurable) +- ContextStream search still returns 0 results or errors after retry +- User explicitly requests local tools + +--- + +## CONTEXT COMPACTION (No PreCompact Hook) + +**There is NO automatic state saving before compaction.** +You MUST save state manually when the conversation gets long: + +### When to Save State + +- After completing a major task +- Before the conversation might be compacted +- If `context()` returns `context_pressure.level: "high"` + +### How to Save State + +``` +session(action="capture", event_type="session_snapshot", + title="Session checkpoint", + content="{ \"summary\": \"what we did\", \"active_files\": [...], \"next_steps\": [...] }") +``` + +### After Compaction (if context seems lost) + +``` +init(folder_path="...", is_post_compact=true) +``` + +--- + +## PLANS & TASKS (CRITICAL) + +**NEVER create markdown plan files** — they vanish across sessions and are not searchable. +**NEVER use built-in todo/plan tools** (e.g., `TodoWrite`, `todo_list`, `plan_mode_respond`) — use ContextStream instead. + +**ALWAYS use ContextStream for planning:** + +``` +session(action="capture_plan", title="...", steps=[...]) +memory(action="create_task", title="...", plan_id="...") +``` + +Plans and tasks in ContextStream persist across sessions, are searchable, and auto-surface in context. + +--- + +## MEMORY & DOCS (CRITICAL) + +**NEVER use built-in memory tools** (e.g., `create_memory`) — use ContextStream instead. +**NEVER write docs/specs/notes to local files** — use ContextStream docs instead. + +**ALWAYS use ContextStream for persistence:** + +``` +session(action="capture", event_type="decision|insight|operation|uncategorized", title="...", content="...") +memory(action="create_node", node_type="fact|preference", title="...", content="...") +memory(action="create_doc", title="...", content="...", doc_type="spec|general") +memory(action="create_todo", title="...", todo_priority="high|medium|low") +``` + +ContextStream memory, docs, and todos persist across sessions, are searchable, and auto-surface in context. + +--- + +## VERSION UPDATES + +**Check for updates periodically** using `help(action="version")`. + +If the response includes [VERSION_NOTICE] or [VERSION_CRITICAL], tell the user about the available update. + +### Update Commands + +```bash +# macOS/Linux +curl -fsSL https://contextstream.io/scripts/setup-beta.sh | bash +# npm +npm install -g @contextstream/mcp-server@latest +``` + +--- + +--- + +## VS Code Copilot Notes + +- Keep this file concise; put detailed workflows in `.github/skills/contextstream-workflow/SKILL.md` +- Use ContextStream plans/tasks as the persistent record of work +- Before code discovery, use `search(mode="auto", query="...")` + + + # GitHub Copilot Instructions — Ploch.Data +## Repository overview + +This repository contains the Ploch.Data family of .NET packages for data models, EF Core helpers, provider-specific configuration, generic repositories, Unit of Work, and integration-testing support. + +- Primary solution: `Ploch.Data.slnx` +- Standalone sample solution: `Ploch.Data.SampleApp.slnx` +- Key package families: + - `Ploch.Data.Model` + - `Ploch.Data.EFCore`, `Ploch.Data.EFCore.SqLite`, `Ploch.Data.EFCore.SqlServer` + - `Ploch.Data.GenericRepository`, `Ploch.Data.GenericRepository.EFCore`, provider-specific variants, and specification support + - integration-testing packages for EF Core and Generic Repository + +## Build and test commands + +- Restore: `dotnet restore` +- Build whole solution: `dotnet build Ploch.Data.slnx` +- Build whole solution with SampleApp switched to local project references: `dotnet build Ploch.Data.slnx -p:UsePlochProjectReferences=true` +- Build sample app in standalone consumer mode: `dotnet build Ploch.Data.SampleApp.slnx` +- Run all tests: `dotnet test` +- Run a specific test project: `dotnet test ` +- Run filtered tests: `dotnet test --filter "FullyQualifiedName~SomeTestName"` + +## Quality bar + +- Preserve the separation between provider-agnostic interfaces and EF Core implementations. +- Keep business-facing abstractions repository-provider agnostic where the design already intends that. +- Avoid architecture drift between the core packages, provider packages, and integration-testing packages. +- Prefer targeted changes over broad repository-wide refactors unless the task genuinely spans package boundaries. +- If shared abstractions or DI registration points change, validate downstream impact carefully. + ## Sample Application Rules -The `samples/SampleApp/` directory contains a Knowledge Base sample application that demonstrates how an **external consumer** would use the Ploch.Data libraries from published NuGet packages. It supports two build modes: +The `samples/SampleApp/` directory contains a Knowledge Base sample application that demonstrates how an external consumer would use the Ploch.Data libraries from published NuGet packages. It supports two build modes: -- **Standalone**: `dotnet build Ploch.Data.SampleApp.slnx` — uses PackageReference (external consumer experience) -- **Solution mode**: `dotnet build Ploch.Data.slnx -p:UsePlochProjectReferences=true` — automatically switches to ProjectReference via `ProjectReferences.props` to catch breaking changes +- Standalone: `dotnet build Ploch.Data.SampleApp.slnx` +- Solution mode: `dotnet build Ploch.Data.slnx -p:UsePlochProjectReferences=true` ### Critical constraints -- **Never manually edit csproj files to swap references** — The switching is automatic via `ProjectReferences.props`. csproj files must only contain `PackageReference` for Ploch.Data packages. -- **Standalone build configuration** — The SampleApp's `Directory.Build.props` and `Directory.Packages.props` are self-contained. They must not import from parent directories. -- **Independent package versions** — The SampleApp defines its own `PlochDataPackagesVersion` in `Directory.Packages.props`. Update this after publishing new Ploch.Data package versions. -- **Update ProjectReferences.props** when adding new Ploch.Data library packages. +- Never manually edit csproj files to swap references. The switching is automatic via `ProjectReferences.props`. SampleApp csproj files must only contain `PackageReference` for Ploch.Data packages. +- The SampleApp `Directory.Build.props` and `Directory.Packages.props` are self-contained and must not import from parent directories. +- The SampleApp defines its own `PlochDataPackagesVersion` in `Directory.Packages.props`. Update that after publishing new Ploch.Data package versions. +- Update `ProjectReferences.props` when adding new Ploch.Data library packages. ### Do not -- Replace `PackageReference` with `ProjectReference` in csproj files (the switch is automatic). -- Add `` directives referencing files outside `samples/SampleApp/` (except the conditional `ProjectReferences.props` import in `Directory.Build.props`). +- Replace `PackageReference` with `ProjectReference` in SampleApp csproj files. +- Add `` directives referencing files outside `samples/SampleApp/` except the existing conditional `ProjectReferences.props` import in `Directory.Build.props`. ### Do - Treat SampleApp csproj files as if they were in a separate repository. -- Update `PlochDataPackagesVersion` after publishing new package versions. +- Validate both normal solution behavior and SampleApp behavior when a change can affect external consumers. + +## Testing conventions + +- Use xUnit and FluentAssertions. +- Prefer `[Theory]` whenever practical. +- Keep test names in the style `MethodName_should_explain_what_it_should_do()`. +- Favor both repository-level tests and integration tests when behavior crosses EF Core, repositories, or DI registration. + +## Documentation + +- Use XML documentation comments for all public methods. Try to provide examples where it makes sense. +- Always keep the documentation markdown files in `docs` folder in the repository root [docs/](../docs/) up to date. If new features are being added, then those docs need to be extended to include the new feature usage documentation. If anything changes, then the docs need to be updated. Always provide examples in the docs when discussing a feature. + +## Validation expectations + +- Before finishing, run the most relevant tests for the changed projects. +- If a change affects shared repository abstractions, provider selection, or SampleApp packaging behavior, broaden validation beyond a single project. +- If you cannot run a needed validation step, say exactly what remains unverified. diff --git a/.github/git-commit-instructions.md b/.github/git-commit-instructions.md new file mode 100644 index 0000000..eab4cc8 --- /dev/null +++ b/.github/git-commit-instructions.md @@ -0,0 +1,145 @@ +# Commit Message Standards + +All commit messages **must** follow the [Conventional Commits](https://www.conventionalcommits.org/) specification. + +## Format + +``` +(): + + + +[BREAKING CHANGE: ] +Refs: # +``` + +## Issue Number + +The issue number can be found in the PR - PRs are associated with issues. +It can also be obtained (usually) from the branch number. For example the current one: `test/13-improve-code-coverage` specifies +that the issue number is `13`. +In this case the footer would be: + +``` +Refs: #13 +``` + +## Structure Rules + +- **Header** (`(): `): Required. Max 72 characters. +- **Body**: Include when the change is non-trivial. Briefly describe *what* changed and *why*. Wrap at 72 characters. +- **Footer**: Always include `Refs: #`. This is **mandatory** — every commit must reference a GitHub issue. See [Associated issue](#associated-issue) for how to find the right issue number. Do not fabricate issue numbers. +- **Breaking changes**: If any change breaks backward compatibility (public API signature change, removed/renamed public member, configuration key change, behavioural contract change), add a `BREAKING CHANGE:` footer with a description of what consumers must change. Also add `!` after the type/scope in the header: `feat(api)!: ...`. + +## Types + +| Type | When to use | +|------------|------------------------------------------------------| +| `feat` | New feature or capability | +| `fix` | Bug fix | +| `docs` | Documentation only | +| `style` | Formatting, whitespace, semicolons — no logic change | +| `refactor` | Code restructuring without behaviour change | +| `perf` | Performance improvement | +| `test` | Adding or updating tests | +| `build` | Build system, CI, or dependency changes | +| `chore` | Maintenance tasks (tooling, config, housekeeping) | +| `ci` | CI/CD pipeline changes | +| `revert` | Reverting a previous commit | + +## Scope + +- Use the **project or module name** affected (e.g. `common`, `data`, `lists-api`, `solution`, `ci`). +- For changes spanning the entire repo or solution, use `solution` or the repo short name. +- Keep scope lowercase, hyphen-separated if multi-word. + +## Subject Line + +- Use **imperative mood** ("Add feature", not "Added feature" or "Adds feature"). +- Start with a capital letter. +- No trailing period. + +## Detecting Breaking Changes + +Before writing the commit message, analyse the staged changes for: + +- Removed or renamed public types, methods, properties, or interfaces. +- Changed method signatures (parameter types, return types, parameter order). +- Removed or renamed configuration keys, environment variables, or connection string names. +- Changed default behaviour that existing consumers rely on. +- Removed or renamed NuGet package IDs. +- Changed serialisation format of persisted data. + +If any of these are detected, the commit **must** include the `BREAKING CHANGE:` footer. + +## Associated Issue + +Every commit **must** include a `Refs: #` footer linking to a GitHub issue. Follow this lookup order: + +1. **Check the open PR** for the current branch (`gh pr view`). If the PR body or linked issues reference an issue, use that. +2. **Search repository issues** (`gh issue list` or the GitHub MCP tools) for an existing issue that matches the change. If there is a clear candidate, use it — and if there is an open PR without an issue link, associate the issue with the PR. +3. **Ask the user** if no matching issue is found. The user may want to create a new issue for the changes. Do not guess or omit the `Refs` footer — always ask rather than commit without an issue reference. + +## Examples + +### Simple feature + +``` +feat(common): Add StringExtensions.ContainsAny method + +Added a new extension method that checks whether a string contains +any of the specified substrings. + +Refs: #162 +``` + +### Breaking change + +``` +chore(solution)!: Update ContainsAny namespace + +Moved the public API method Strings.ContainsAny to the +StringExtensions class under a new namespace. + +BREAKING CHANGE: Ploch.Common.Strings.ContainsAny moved to +Ploch.Common.Extensions.StringExtensions.ContainsAny. Update +using directives accordingly. +Refs: #162 +``` + +### Bug fix + +``` +fix(data): Prevent duplicate entity on concurrent upsert + +Added optimistic concurrency check in the upsert path to avoid +inserting a duplicate when two requests race on the same key. + +Refs: #187 +``` + +### Multi-scope refactor + +``` +refactor(solution): Extract shared audit timestamp logic + +Moved SetAuditTimestamps from individual DbContext overrides into +a shared base class to reduce duplication across Data projects. + +Refs: #205 +``` + +### Change Log updates + +If a commit contains information that should go to the change log, make sure you put it there. Don't put things like styling changes or minor things there. This is especially important for the breaking changes and new features. + +### CI/build change + +``` +ci(github-actions): Add fetch-depth 0 for NBGV versioning + +NBGV requires full git history to calculate commit height. +Updated all checkout steps across workflows. + +Refs: #210 +``` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a6618b7..e807913 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,28 @@ -## Describe your changes +# Pull Request Description ## Issue ticket number and link +## Pull Request Changes Summary + +### :boom: Breaking Changes + +### :dart: New Features + +### :beetle: Fixes + +### :book: Docs + +### :herb: Other + +## Describe your changes + ## Checklist before requesting a review + - [ ] I have performed a self-review of my code -- [ ] If it is a core feature, I have added thorough tests. -- [ ] Do we need to implement analytics? -- [ ] Will this be part of a product update? If yes, please write one phrase about this update. +- [ ] I have added thorough tests. +- [ ] I have updated documentation. +- [ ] If applicable, I have updated the sample application + +## :triangular_ruler: Design Decisions + +## Testing diff --git a/.github/workflows/build-dotnet.yml b/.github/workflows/build-dotnet.yml index cb3eb45..73fd78c 100644 --- a/.github/workflows/build-dotnet.yml +++ b/.github/workflows/build-dotnet.yml @@ -87,9 +87,49 @@ jobs: - name: Install mono run: | - sudo apt install -y ca-certificates gnupg - sudo gpg --homedir /tmp --no-default-keyring --keyring /usr/share/keyrings/mono-official-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF - echo "deb [signed-by=/usr/share/keyrings/mono-official-archive-keyring.gpg] https://download.mono-project.com/repo/ubuntu stable-focal main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list + sudo apt install -y ca-certificates gnupg curl + KEYRING=/usr/share/keyrings/mono-official-archive-keyring.gpg + KEY_FINGERPRINT=3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF + IMPORTED=0 + # Primary: download the key directly from the mono project's HTTPS + # endpoint. This avoids GPG keyservers entirely, which have been + # unreliable (keyserver.ubuntu.com / pgp.mit.edu unresponsive, + # keys.openpgp.org strips UIDs). + if curl -fsSL --max-time 30 https://download.mono-project.com/repo/xamarin.gpg -o /tmp/mono.gpg.raw; then + # The file may be either ASCII-armored or binary; --dearmor + # handles the armored case, and a binary keyring can be copied + # as-is. Try dearmor first; if it fails, treat the file as binary. + if sudo gpg --dearmor < /tmp/mono.gpg.raw > /tmp/mono.gpg.bin 2>/dev/null && [ -s /tmp/mono.gpg.bin ]; then + sudo cp /tmp/mono.gpg.bin "$KEYRING" + else + sudo cp /tmp/mono.gpg.raw "$KEYRING" + fi + sudo chmod 644 "$KEYRING" + if sudo gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --list-keys "$KEY_FINGERPRINT" 2>/dev/null | grep -q '^uid'; then + echo "Imported mono signing key directly from download.mono-project.com" + IMPORTED=1 + fi + fi + # Fallback: keyserver loop (each capped at 30s so an unresponsive + # server cannot hang the build). + if [ "$IMPORTED" -ne 1 ]; then + echo "Direct download failed or yielded no usable key; falling back to keyservers..." + sudo rm -f "$KEYRING" + for ks in hkp://keyserver.ubuntu.com:80 hkp://pgp.mit.edu; do + sudo timeout 30 gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --keyserver "$ks" --recv-keys "$KEY_FINGERPRINT" || true + if sudo gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --list-keys "$KEY_FINGERPRINT" 2>/dev/null | grep -q '^uid'; then + echo "Imported mono signing key (with UID) from $ks" + IMPORTED=1 + break + fi + echo "Keyserver $ks did not yield a usable key, trying next..." + done + fi + if [ "$IMPORTED" -ne 1 ]; then + echo "::error::Failed to import mono signing key from any source" + exit 1 + fi + echo "deb [signed-by=$KEYRING] https://download.mono-project.com/repo/ubuntu stable-focal main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list sudo apt update sudo apt install -y mono-devel - name: Add GitHub Packages Source @@ -107,6 +147,9 @@ jobs: continue-on-error: true - name: Install dotnet-coverage run: dotnet tool install --global dotnet-coverage + # Note: analysis settings (coverage paths, exclusions) are passed inline + # via /d: arguments. The .NET edition of the SonarScanner does not read + # sonar-project.properties — that file would actively fail post-processing. - name: SonarScanner Begin id: sonar-begin env: @@ -116,12 +159,10 @@ jobs: /k:"${{ env.SONAR_PROJECT_KEY }}" /o:"${{ env.SONAR_ORGANIZATION }}" /d:sonar.login="$SONAR_TOKEN" - /d:sonar.host.url="https://sonarcloud.io" /d:sonar.projectBaseDir="${{ github.workspace }}" - /d:sonar.scm.provider=git - /d:sonar.cs.opencover.reportsPaths=**/CoverageResults/coverage.opencover.xml - /d:sonar.coverage.exclusions="**/*.ps1,**/*.Tests/**,**/*.Tests.csproj,**/*.IntegrationTests/**,**/*.IntegrationTesting/**,**/samples/**" - /d:sonar.exclusions="**/*.ps1,**/docs/**,**/DocumentationSite/**,**/*.md,**/workload-install.ps1,**/prepare-repo.ps1,**/.github/**,**/*.yml,**/*.yaml" + /d:sonar.cs.opencover.reportsPaths="**/CoverageResults/coverage.opencover.xml" + /d:sonar.exclusions="**/tests/**,**/*.Tests/**,**/*.IntegrationTests/**,**/*.IntegrationTesting/**,**/Migrations/**,**/*.ps1,**/docs/**,**/DocumentationSite/**,**/*.md,**/.github/**,**/*.yml,**/*.yaml" + /d:sonar.coverage.exclusions="**/tests/**,**/*.Tests/**,**/*.IntegrationTests/**,**/*.IntegrationTesting/**,**/samples/**,**/*.ps1" continue-on-error: true # Build and Test (always runs regardless of SonarCloud status) @@ -130,13 +171,14 @@ jobs: - name: Test Coverage run: dotnet test ./Ploch.Data.slnx --verbosity normal --no-build --logger "trx;LogFileName=TestOutputResults.xml" /p:CollectCoverage=true /p:CoverletOutput=./CoverageResults/ "/p:CoverletOutputFormat=cobertura%2copencover" -p:UsePlochProjectReferences=true - # SonarCloud end (runs even after test failures, only if begin succeeded) + # SonarCloud end (runs even after test failures, only if begin succeeded). + # Fails the job loudly if post-processing fails — silent failures here led + # to ~3 weeks of unanalysed builds when sonar-project.properties broke End. - name: SonarScanner End if: always() && steps.sonar-begin.outcome == 'success' env: SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} run: dotnet sonarscanner end /d:sonar.login="$SONAR_TOKEN" - continue-on-error: true - name: Upload Test Results if: always() diff --git a/.github/workflows/copilot-pr-pipeline.yml b/.github/workflows/copilot-pr-pipeline.yml new file mode 100644 index 0000000..066dfc0 --- /dev/null +++ b/.github/workflows/copilot-pr-pipeline.yml @@ -0,0 +1,380 @@ +name: Copilot PR Pipeline + +on: + workflow_dispatch: + inputs: + pr_number: + description: Existing pull request number to analyze + required: true + type: string + mode: + description: Pipeline mode + required: true + type: choice + default: plan-only + options: + - plan-only + - full-followup-pr + model: + description: Model for the top-level task + required: true + type: choice + default: gpt-5.3-codex + options: + - gpt-5.3-codex + - gpt-5.4 + - claude-sonnet-4.6 + - claude-opus-4.6 + custom_agent: + description: Optional override for the custom agent identifier + required: false + type: string + wait_for_completion: + description: Poll the task until it reaches a terminal state or timeout + required: true + type: boolean + default: false + timeout_minutes: + description: Poll timeout in minutes when wait_for_completion is true + required: true + type: number + default: 30 + +permissions: + contents: read + pull-requests: read + actions: read + +concurrency: + group: copilot-pr-pipeline-${{ github.repository }}-${{ inputs.pr_number }}-${{ inputs.mode }} + cancel-in-progress: false + +jobs: + launch-task: + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ secrets.COPILOT_AGENT_PAT }} + GITHUB_API_VERSION: 2026-03-10 + INPUT_PR_NUMBER: ${{ inputs.pr_number }} + INPUT_MODE: ${{ inputs.mode }} + INPUT_MODEL: ${{ inputs.model }} + INPUT_CUSTOM_AGENT: ${{ inputs.custom_agent }} + INPUT_WAIT_FOR_COMPLETION: ${{ inputs.wait_for_completion }} + INPUT_TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }} + + steps: + - name: Validate inputs and secret + run: | + set -euo pipefail + + if ! [[ "$INPUT_PR_NUMBER" =~ ^[0-9]+$ ]]; then + echo "::error::pr_number must be a positive integer." + exit 1 + fi + + if ! [[ "$INPUT_TIMEOUT_MINUTES" =~ ^[0-9]+$ ]] || [ "$INPUT_TIMEOUT_MINUTES" -lt 1 ] || [ "$INPUT_TIMEOUT_MINUTES" -gt 180 ]; then + echo "::error::timeout_minutes must be an integer between 1 and 180." + exit 1 + fi + + if [ -n "$INPUT_CUSTOM_AGENT" ] && ! [[ "$INPUT_CUSTOM_AGENT" =~ ^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$ ]]; then + echo "::error::custom_agent must be empty or match the GitHub custom agent identifier pattern." + exit 1 + fi + + if [ -z "$GH_TOKEN" ]; then + echo "::error::COPILOT_AGENT_PAT secret is required. Use a user token that can call the Copilot Agent Tasks API." + exit 1 + fi + + USER_STATUS=$(curl -sS -o /tmp/user.json -w "%{http_code}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "X-GitHub-Api-Version: $GITHUB_API_VERSION" \ + https://api.github.com/user) + + if [ "$USER_STATUS" != "200" ]; then + echo "::error::COPILOT_AGENT_PAT is invalid or does not identify a GitHub user token (HTTP $USER_STATUS)." + cat /tmp/user.json + exit 1 + fi + + - name: Fetch PR metadata + id: pr + run: | + set -euo pipefail + + PR_STATUS=$(curl -sS -o /tmp/pr.json -w "%{http_code}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "X-GitHub-Api-Version: $GITHUB_API_VERSION" \ + "https://api.github.com/repos/${GITHUB_REPOSITORY}/pulls/${INPUT_PR_NUMBER}") + + if [ "$PR_STATUS" != "200" ]; then + echo "::error::Unable to load PR #${INPUT_PR_NUMBER} from ${GITHUB_REPOSITORY} (HTTP $PR_STATUS)." + cat /tmp/pr.json + exit 1 + fi + + echo "pr_url=$(jq -r '.html_url' /tmp/pr.json)" >> "$GITHUB_OUTPUT" + echo "pr_title<> "$GITHUB_OUTPUT" + jq -r '.title' /tmp/pr.json >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + echo "base_ref=$(jq -r '.base.ref' /tmp/pr.json)" >> "$GITHUB_OUTPUT" + echo "head_ref=$(jq -r '.head.ref' /tmp/pr.json)" >> "$GITHUB_OUTPUT" + echo "author=$(jq -r '.user.login' /tmp/pr.json)" >> "$GITHUB_OUTPUT" + echo "is_draft=$(jq -r '.draft' /tmp/pr.json)" >> "$GITHUB_OUTPUT" + + - name: Resolve pipeline settings + id: config + run: | + set -euo pipefail + + if [ -n "$INPUT_CUSTOM_AGENT" ]; then + CUSTOM_AGENT="$INPUT_CUSTOM_AGENT" + elif [ "$INPUT_MODE" = "plan-only" ]; then + CUSTOM_AGENT="pr-review-planner" + else + CUSTOM_AGENT="pr-pipeline-orchestrator" + fi + + if [ "$INPUT_MODE" = "full-followup-pr" ]; then + CREATE_PULL_REQUEST=true + else + CREATE_PULL_REQUEST=false + fi + + echo "custom_agent=$CUSTOM_AGENT" >> "$GITHUB_OUTPUT" + echo "create_pull_request=$CREATE_PULL_REQUEST" >> "$GITHUB_OUTPUT" + + - name: Build task payload + id: payload + env: + PR_URL: ${{ steps.pr.outputs.pr_url }} + PR_TITLE: ${{ steps.pr.outputs.pr_title }} + PR_BASE_REF: ${{ steps.pr.outputs.base_ref }} + PR_HEAD_REF: ${{ steps.pr.outputs.head_ref }} + PR_AUTHOR: ${{ steps.pr.outputs.author }} + PR_IS_DRAFT: ${{ steps.pr.outputs.is_draft }} + RESOLVED_CUSTOM_AGENT: ${{ steps.config.outputs.custom_agent }} + CREATE_PULL_REQUEST: ${{ steps.config.outputs.create_pull_request }} + run: | + set -euo pipefail + + EVENT_CONTENT="Run the ${INPUT_MODE} Copilot PR pipeline for pull request #${INPUT_PR_NUMBER} in ${GITHUB_REPOSITORY}." + printf '%s' "$EVENT_CONTENT" > /tmp/event-content.txt + + if [ "$INPUT_MODE" = "plan-only" ]; then + cat > /tmp/problem-statement.txt < /tmp/problem-statement.txt < /tmp/task-payload-with-agent.json + + jq 'del(.custom_agent)' /tmp/task-payload-with-agent.json > /tmp/task-payload-without-agent.json + + - name: Create agent task + id: create + env: + RESOLVED_CUSTOM_AGENT: ${{ steps.config.outputs.custom_agent }} + run: | + set -euo pipefail + + create_task() { + local payload_file="$1" + local output_file="$2" + + curl -sS -o "$output_file" -w "%{http_code}" \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "X-GitHub-Api-Version: $GITHUB_API_VERSION" \ + "https://api.github.com/agents/repos/${GITHUB_REPOSITORY}/tasks" \ + --data "@${payload_file}" + } + + HTTP_STATUS=$(create_task /tmp/task-payload-with-agent.json /tmp/task-response.json) + USED_CUSTOM_AGENT=true + + if [ "$HTTP_STATUS" != "201" ]; then + echo "::warning::Task creation with custom_agent failed (HTTP $HTTP_STATUS)." + cat /tmp/task-response.json + + HTTP_STATUS=$(create_task /tmp/task-payload-without-agent.json /tmp/task-response.json) + USED_CUSTOM_AGENT=false + fi + + if [ "$HTTP_STATUS" != "201" ]; then + echo "::error::Unable to create Copilot agent task (HTTP $HTTP_STATUS)." + cat /tmp/task-response.json + exit 1 + fi + + echo "used_custom_agent=$USED_CUSTOM_AGENT" >> "$GITHUB_OUTPUT" + echo "task_id=$(jq -r '.id' /tmp/task-response.json)" >> "$GITHUB_OUTPUT" + echo "task_url=$(jq -r '.url' /tmp/task-response.json)" >> "$GITHUB_OUTPUT" + echo "task_html_url=$(jq -r '.html_url' /tmp/task-response.json)" >> "$GITHUB_OUTPUT" + echo "task_state=$(jq -r '.state' /tmp/task-response.json)" >> "$GITHUB_OUTPUT" + + - name: Write launch summary + env: + PR_URL: ${{ steps.pr.outputs.pr_url }} + TASK_HTML_URL: ${{ steps.create.outputs.task_html_url }} + TASK_ID: ${{ steps.create.outputs.task_id }} + USED_CUSTOM_AGENT: ${{ steps.create.outputs.used_custom_agent }} + RESOLVED_CUSTOM_AGENT: ${{ steps.config.outputs.custom_agent }} + CREATE_PULL_REQUEST: ${{ steps.config.outputs.create_pull_request }} + run: | + { + echo "## Copilot PR pipeline task launched" + echo "" + echo "- Repository: \`${GITHUB_REPOSITORY}\`" + echo "- Target PR: [#${INPUT_PR_NUMBER}](${PR_URL})" + echo "- Mode: \`${INPUT_MODE}\`" + echo "- Model: \`${INPUT_MODEL}\`" + echo "- Requested custom agent: \`${RESOLVED_CUSTOM_AGENT}\`" + echo "- Custom agent accepted by API: \`${USED_CUSTOM_AGENT}\`" + echo "- Follow-up PR creation enabled: \`${CREATE_PULL_REQUEST}\`" + echo "- Task ID: \`${TASK_ID}\`" + echo "- Task URL: ${TASK_HTML_URL}" + echo "" + echo "If this run used \`full-followup-pr\`, the agent is expected to open a new remediation PR rather than directly mutate the existing PR branch." + } >> "$GITHUB_STEP_SUMMARY" + + - name: Wait for completion + if: ${{ inputs.wait_for_completion }} + id: wait + env: + TASK_ID: ${{ steps.create.outputs.task_id }} + run: | + set -euo pipefail + + DEADLINE=$(( $(date +%s) + (INPUT_TIMEOUT_MINUTES * 60) )) + LAST_RESPONSE="" + + while [ "$(date +%s)" -lt "$DEADLINE" ]; do + STATUS=$(curl -sS -o /tmp/task-status.json -w "%{http_code}" \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "X-GitHub-Api-Version: $GITHUB_API_VERSION" \ + "https://api.github.com/agents/repos/${GITHUB_REPOSITORY}/tasks/${TASK_ID}") + + if [ "$STATUS" != "200" ]; then + echo "::error::Unable to read task status for ${TASK_ID} (HTTP $STATUS)." + cat /tmp/task-status.json + exit 1 + fi + + LAST_RESPONSE=/tmp/task-status.json + STATE=$(jq -r '.state' "$LAST_RESPONSE") + echo "Current task state: $STATE" + + case "$STATE" in + completed) + break + ;; + failed|timed_out|cancelled) + echo "::error::Copilot task ended in state '$STATE'." + cat "$LAST_RESPONSE" + exit 1 + ;; + waiting_for_user) + echo "::notice::Copilot task is waiting for user input." + break + ;; + esac + + sleep 30 + done + + FINAL_STATE=$(jq -r '.state' "$LAST_RESPONSE") + TASK_HTML_URL=$(jq -r '.html_url' "$LAST_RESPONSE") + SESSION_HEAD_REF=$(jq -r '.sessions[0].head_ref // empty' "$LAST_RESPONSE") + GENERATED_PR_IDS=$(jq -r ' + [.artifacts[]? | + if .provider == "github" and .type == "pull" then + (.data.id | tostring) + elif .provider == "github" and .type == "github_resource" and (.data.type // "") == "pull_request" then + (.data.id | tostring) + else + empty + end] | join(", ") + ' "$LAST_RESPONSE") + + echo "final_state=$FINAL_STATE" >> "$GITHUB_OUTPUT" + echo "task_html_url=$TASK_HTML_URL" >> "$GITHUB_OUTPUT" + echo "session_head_ref=$SESSION_HEAD_REF" >> "$GITHUB_OUTPUT" + echo "generated_pr_ids=$GENERATED_PR_IDS" >> "$GITHUB_OUTPUT" + + { + echo "## Copilot PR pipeline final state" + echo "" + echo "- Task state: \`${FINAL_STATE}\`" + echo "- Task URL: ${TASK_HTML_URL}" + if [ -n "$SESSION_HEAD_REF" ]; then + echo "- Agent branch: \`${SESSION_HEAD_REF}\`" + fi + if [ -n "$GENERATED_PR_IDS" ]; then + echo "- Generated PR artifacts: \`${GENERATED_PR_IDS}\`" + fi + } >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/deploy-nuget-org.yml b/.github/workflows/deploy-nuget-org.yml index 994a71e..b5dfa67 100644 --- a/.github/workflows/deploy-nuget-org.yml +++ b/.github/workflows/deploy-nuget-org.yml @@ -37,7 +37,7 @@ jobs: run: dotnet tool install --global dotnet-coverage - name: SonarScanner Begin shell: pwsh - run: dotnet sonarscanner begin /k:"${{ env.SONAR_PROJECT_KEY }}" /o:"${{ env.SONAR_ORGANIZATION }}" /d:sonar.login="${{ secrets.SONAR_TOKEN }}" /d:sonar.cs.opencover.reportsPaths=**/CoverageResults/coverage.opencover.xml /d:sonar.host.url="https://sonarcloud.io" + run: dotnet sonarscanner begin /k:"${{ env.SONAR_PROJECT_KEY }}" /o:"${{ env.SONAR_ORGANIZATION }}" /d:sonar.login="${{ secrets.SONAR_TOKEN }}" - name: Build shell: pwsh run: dotnet build ./Ploch.Data.sln --no-restore diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 137fce8..3bd029e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -91,9 +91,49 @@ jobs: - name: Install mono run: | - sudo apt install -y ca-certificates gnupg - sudo gpg --homedir /tmp --no-default-keyring --keyring /usr/share/keyrings/mono-official-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF - echo "deb [signed-by=/usr/share/keyrings/mono-official-archive-keyring.gpg] https://download.mono-project.com/repo/ubuntu stable-focal main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list + sudo apt install -y ca-certificates gnupg curl + KEYRING=/usr/share/keyrings/mono-official-archive-keyring.gpg + KEY_FINGERPRINT=3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF + IMPORTED=0 + # Primary: download the key directly from the mono project's HTTPS + # endpoint. This avoids GPG keyservers entirely, which have been + # unreliable (keyserver.ubuntu.com / pgp.mit.edu unresponsive, + # keys.openpgp.org strips UIDs). + if curl -fsSL --max-time 30 https://download.mono-project.com/repo/xamarin.gpg -o /tmp/mono.gpg.raw; then + # The file may be either ASCII-armored or binary; --dearmor + # handles the armored case, and a binary keyring can be copied + # as-is. Try dearmor first; if it fails, treat the file as binary. + if sudo gpg --dearmor < /tmp/mono.gpg.raw > /tmp/mono.gpg.bin 2>/dev/null && [ -s /tmp/mono.gpg.bin ]; then + sudo cp /tmp/mono.gpg.bin "$KEYRING" + else + sudo cp /tmp/mono.gpg.raw "$KEYRING" + fi + sudo chmod 644 "$KEYRING" + if sudo gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --list-keys "$KEY_FINGERPRINT" 2>/dev/null | grep -q '^uid'; then + echo "Imported mono signing key directly from download.mono-project.com" + IMPORTED=1 + fi + fi + # Fallback: keyserver loop (each capped at 30s so an unresponsive + # server cannot hang the build). + if [ "$IMPORTED" -ne 1 ]; then + echo "Direct download failed or yielded no usable key; falling back to keyservers..." + sudo rm -f "$KEYRING" + for ks in hkp://keyserver.ubuntu.com:80 hkp://pgp.mit.edu; do + sudo timeout 30 gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --keyserver "$ks" --recv-keys "$KEY_FINGERPRINT" || true + if sudo gpg --homedir /tmp --no-default-keyring --keyring "$KEYRING" --list-keys "$KEY_FINGERPRINT" 2>/dev/null | grep -q '^uid'; then + echo "Imported mono signing key (with UID) from $ks" + IMPORTED=1 + break + fi + echo "Keyserver $ks did not yield a usable key, trying next..." + done + fi + if [ "$IMPORTED" -ne 1 ]; then + echo "::error::Failed to import mono signing key from any source" + exit 1 + fi + echo "deb [signed-by=$KEYRING] https://download.mono-project.com/repo/ubuntu stable-focal main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list sudo apt update sudo apt install -y mono-devel diff --git a/.gitignore b/.gitignore index ecf5bc6..2943154 100644 --- a/.gitignore +++ b/.gitignore @@ -408,14 +408,32 @@ codestream.xml **/.idea/**/sonarlint.xml # AI Tools Config -.claude/ +.claude/skills/winui3-* +.claude/settings.local.json +.claude/scheduled_tasks.lock .contextstream/ .cursor/ .windsurf/ .cursorrules +.mcp.json +.vscode/mcp.json +.github/skills/ + +# JetBrains Rider — additional per-user files not already covered +**/.idea*/AICommit.xml +**/.idea*/indexLayout.xml +**/.idea/.idea.Data.EFCore.dir/ +src/**/.idea/ + +# Local dev artefacts +identifier.sqlite +*.sqlite +temp-research-*.md +temp-*.md # Backups of cs files **/*.cs.bak **/*.csproj.bak **/*.props.bak **/*.md.bak +temp/ diff --git a/.idea/.idea.Ploch.Data/.idea/indexLayout.xml b/.idea/.idea.Ploch.Data/.idea/indexLayout.xml deleted file mode 100644 index 1ead36c..0000000 --- a/.idea/.idea.Ploch.Data/.idea/indexLayout.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - .github - - - - - \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md index 06ccadb..ff79b2a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -4,8 +4,13 @@ # Workspace ID: 57db5f34-e7f0-42c0-86c4-bb981f96c880 # Codex CLI Instructions + +> **All ContextStream rules below apply only when ContextStream tools are available in the current environment.** If they are not loaded, proceed with the platform's available tools (Glob, Grep, Read, Edit, Write, Bash, etc.) and ignore the "use ContextStream first" directives. The remainder of this document assumes the tools are present. + ## 🚨 MANDATORY STARTUP: CONTEXT-FIRST FLOW 🚨 +If ContextStream tools are available, on the first message of every session call `init(...)` then `context(user_message="...")`. On subsequent messages, call `context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. If ContextStream tools are unavailable, proceed with the platform's available tools. + | Message | What to Call | @@ -13,7 +18,7 @@ | **First message in session** | `init()` → `context(user_message="")` BEFORE any other tool | | **Subsequent messages (default)** | `context(user_message="")` FIRST, then other tools | | **Narrow bypass** | Immediate read-only ContextStream calls are allowed only when prior context is fresh and no state-changing tool has run | -| **Before Glob/Grep/Read/Search** | `search(mode="auto", query="...")` FIRST | +| **Before Glob/Grep/Read/Search** | Use `search(mode="auto", query="...")` when available; otherwise use available local tools directly | @@ -102,12 +107,12 @@ STOP → Call search(mode="auto", query="...") FIRST - `Read(file)` for discovery → Use `search(mode="auto", query="...")` instead - `Task(subagent_type="Explore")` → Use `search(mode="auto")` instead -✅ **ALWAYS DO THIS:** +✅ **ALWAYS DO THIS (when ContextStream is available):** 1. `search(mode="auto", query="what you're looking for")` -2. Only use local tools (Glob/Grep/Read) if ContextStream returns **0 results** +2. Use local tools (Glob/Grep/Read) if ContextStream is **unavailable, fails, times out, or returns 0 results** 3. Use Read ONLY for exact file edits after you know the file path -This applies to **EVERY search** throughout the **ENTIRE conversation**, not just the first message. +This applies to **EVERY search** throughout the **ENTIRE conversation** when ContextStream tools are loaded — not just the first message. --- @@ -121,7 +126,7 @@ This applies to **EVERY search** throughout the **ENTIRE conversation**, not jus **When `init` returns `indexing_status: "started"` or `"refreshing"`:** - Background indexing is running automatically - Search results will be available within seconds to minutes -- **DO NOT fall back to local tools** - wait for ContextStream search to work +- Prefer waiting for ContextStream search rather than falling back immediately; fall back to local tools only if it stays unavailable, errors, or returns 0 results after retry - If search returns 0 results initially, try again after a moment **Only manually trigger indexing if:** @@ -345,7 +350,7 @@ session(action="capture", event_type="session_snapshot", title="Pre-compaction s **Graph data:** If graph queries (`dependencies`, `impact`) return empty, run `graph(action="ingest")` once. -**NEVER fall back to local tools (Glob/Grep/Read) just because search returned 0 results on first try.** Retry first. +**Don't fall back to local tools (Glob/Grep/Read) just because search returned 0 results on first try — retry first.** Falling back is appropriate when ContextStream is unavailable, the retry still returns 0 results, or the tools error out. ### Enhanced Context (Server-Side Warnings) @@ -367,7 +372,7 @@ session(action="capture", event_type="session_snapshot", title="Pre-compaction s ### Search & Code Intelligence (ContextStream-first) -⚠️ **STOP: Before using Search/Glob/Grep/Read/Explore** → Call `search(mode="auto")` FIRST. Use local tools ONLY if ContextStream returns 0 results. +⚠️ **STOP: Before using Search/Glob/Grep/Read/Explore** → Call `search(mode="auto")` FIRST when ContextStream is available. Use local tools if ContextStream is unavailable, fails, times out, or returns 0 results. **❌ WRONG workflow (wastes tokens, slow):** ``` @@ -668,10 +673,11 @@ search(mode="auto", query="what you're looking for") - Then use local Read/Grep only on paths returned by ContextStream. ### When Local Tools Are OK: +✅ ContextStream tools are unavailable in the current environment ✅ Project is not indexed ✅ Index is stale/outdated (>7 days old) ✅ ContextStream search returns 0 results -✅ ContextStream returns errors +✅ ContextStream returns errors or times out ✅ User explicitly requests local tools ### When to Use ContextStream Search: diff --git a/CLAUDE.md b/CLAUDE.md index 635337b..7dc3cb3 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -5,15 +5,16 @@ # ContextStream Rules -**MANDATORY STARTUP:** On the first message of EVERY session call `mcp__contextstream__init(...)` then `mcp__contextstream__context(user_message="...")`. On subsequent messages, call `mcp__contextstream__context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. +**MANDATORY STARTUP:** If ContextStream tools are available, on the first message of every session call `mcp__contextstream__init(...)` then `mcp__contextstream__context(user_message="...")`. On subsequent messages, call `mcp__contextstream__context(user_message="...")` first by default. A narrow bypass is allowed only for immediate read-only ContextStream calls when prior context is still fresh and no state-changing tool has run. If ContextStream tools are unavailable, proceed with the platform's available tools. ## Quick Rules + | Message | Required | |---------|----------| | **First message in session** | `mcp__contextstream__init(...)` → `mcp__contextstream__context(user_message="...")` BEFORE any other tool | | **Subsequent messages (default)** | `mcp__contextstream__context(user_message="...")` FIRST, then other tools (narrow read-only bypass allowed when context is fresh + state is unchanged) | -| **Before file search** | `mcp__contextstream__search(mode="...", query="...")` BEFORE Glob/Grep/Read | +| **Before file search** | Use `mcp__contextstream__search(mode="...", query="...")` when available; otherwise use available local tools (Glob/Grep/Read) directly | ## Detailed Rules @@ -23,7 +24,9 @@ **Common queries — use these exact tool calls:** - "list lessons" / "show lessons" → `mcp__contextstream__session(action="get_lessons")` +- "save lesson" / "remember this lesson" / "lesson learned" / "I made a mistake" → `mcp__contextstream__session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="low|medium|high|critical")` — **NEVER store lessons in local files** (e.g. `~/.claude/.../memory/`, `.cursorrules`, scratch markdown). Lessons live in ContextStream so they auto-surface as `[LESSONS_WARNING]` on future turns and across sessions. - "list decisions" / "show decisions" / "how many decisions" → `mcp__contextstream__memory(action="decisions")` +- "save decision" / "decided to" → `mcp__contextstream__session(action="capture", event_type="decision", title="...", content="...")` - "list docs" → `mcp__contextstream__memory(action="list_docs")` - "list tasks" → `mcp__contextstream__memory(action="list_tasks")` - "list todos" → `mcp__contextstream__memory(action="list_todos")` @@ -31,8 +34,12 @@ - "list events" → `mcp__contextstream__memory(action="list_events")` - "show snapshots" / "list snapshots" → `mcp__contextstream__memory(action="list_events", event_type="session_snapshot")` - "save snapshot" → `mcp__contextstream__session(action="capture", event_type="session_snapshot", title="...", content="...")` +- "what did we do last session" / "past sessions" / "previous work" / "pick up where we left off" → `mcp__contextstream__session(action="recall", query="...")` (ranked context) OR `mcp__contextstream__memory(action="list_transcripts", limit=10)` (chronological list) +- "search past sessions" / "find in past transcripts" / "when did we discuss X" → `mcp__contextstream__memory(action="search_transcripts", query="...")` — full-text search over saved conversation transcripts +- "show transcript" / "read session " → `mcp__contextstream__memory(action="get_transcript", transcript_id="...")` - "list skills" / "show my skills" → `mcp__contextstream__skill(action="list")` -- "create a skill" → `mcp__contextstream__skill(action="create", name="...", instruction_body="...", trigger_patterns=[...])` +- "create a skill" → `mcp__contextstream__skill(action="create", name="...", instruction_body="...", project_id="", trigger_patterns=[...])` +- "update a skill" → `mcp__contextstream__skill(action="update", name="...", instruction_body="...", change_summary="...")` - "run skill" / "use skill" → `mcp__contextstream__skill(action="run", name="...")` - "import skills" / "import my CLAUDE.md" → `mcp__contextstream__skill(action="import", file_path="...", format="auto")` @@ -44,19 +51,87 @@ If the `instruct` tool is available, run `mcp__contextstream__instruct(action="g **Why?** `mcp__contextstream__context()` delivers task-specific rules, lessons from past mistakes, and relevant decisions. Skip it = fly blind. +## Finding Information — Search ContextStream Knowledge, Not Just Code + +**Auto-grounding:** Every `mcp__contextstream__context(user_message="...")` call may include a `[GROUNDING]` block — pre-ranked prior work (transcripts, snapshots, docs, decisions, lessons) for **this** message. When you see it, read those hits **before** fanning out into code search; skipping search entirely is often correct. Outside `mcp__contextstream__context()`, use `mcp__contextstream__session(action="ground", user_message="...")` for the same one-shot bundle (recall + docs + decisions + lessons + skills + git). + +When you need information, do not default to code search or trial-and-error. ContextStream stores far more than source — docs, decisions, lessons, preferences, plans, tasks, todos, skills, memory nodes, and full session transcripts all live behind dedicated tools. Pick the right knowledge surface by what you're looking for: + +- **Source code / symbol / file** → `mcp__contextstream__search(mode="auto", query="...")` +- **Why we did X / past decisions** → `mcp__contextstream__memory(action="decisions", query="...")` +- **Architecture / spec / design doc** → `mcp__contextstream__memory(action="list_docs")` then `mcp__contextstream__memory(action="get_doc", doc_id="title or UUID")` +- **Prior mistakes ("never do X again")** → `mcp__contextstream__session(action="get_lessons", query="...")` +- **User preferences / conventions / constraints** → already surfaced as `[PREFERENCE]`; also `mcp__contextstream__memory(action="list_nodes", node_type="preference")` or `mcp__contextstream__memory(action="list_nodes", node_type="constraint")` +- **Open work / tasks / todos** → `mcp__contextstream__memory(action="list_tasks")` / `mcp__contextstream__memory(action="list_todos")` +- **Active or past plans** → `mcp__contextstream__session(action="list_plans")` then `mcp__contextstream__session(action="get_plan", plan_id="...")` +- **Reusable workflows / skills** → `mcp__contextstream__skill(action="list")` then `mcp__contextstream__skill(action="run", name="...")` +- **"What did we do before?" (continuation work)** → `mcp__contextstream__session(action="recall", query="...")` — see the Past Sessions ladder below +- **Unsure which surface** → `mcp__contextstream__memory(action="search", query="...")` — hybrid across memory nodes + docs; falls back to `mcp__contextstream__session(action="recall", query="...")` for transcript/snapshot coverage + +Default assumption: if the user asks "how do we do X?", "why did we choose Y?", "what's the pattern for Z?", or "did we already decide about Q?" — the answer is likely in a doc, decision, lesson, plan, or skill, NOT in the code. Check the right knowledge surface BEFORE reading source files or re-deriving the answer. + +Before guessing, improvising, or struggling through a workflow you don't fully know: + +- Start with `mcp__contextstream__context(...)` and obey `[GROUNDING]` (prior-work anchors), `[MATCHED_SKILLS]`, `[LESSONS_WARNING]`, `[PREFERENCE]`, `[DECISIONS]`, `[MEMORY]`, and `` output — those are already filtered to the current task +- Treat `[LESSONS_WARNING]` as active working instructions for the current task, not optional background context; apply them immediately and keep them in mind until the task is done +- Prefer surfaced ContextStream knowledge over inventing a new workflow from memory + +## Past Sessions Are Queryable — USE THEM + +### Auto-Grounding (in `mcp__contextstream__context()`) + +When `mcp__contextstream__context()` returns `[GROUNDING]`, those lines are **pre-ranked prior work for your current message** — read them first (transcript/snapshot/doc/decision/lesson entry points). Skipping code search is often correct. For the same bundle **outside** `mcp__contextstream__context()`, call `mcp__contextstream__session(action="ground", user_message="...")`. + +Transcripts for every turn of every session are captured and indexed automatically. Session snapshots bookmark turning points. **Before asking the user what you did last time, or re-deriving context you built together previously, check the transcript + snapshot layer.** It's fast, it's complete, and the user is paying for it. + +Triggers to query past sessions: + +- User says "last time", "previous", "yesterday", "earlier", "we decided", "we talked about", "pick up where we left off", "what were we working on" +- You have a task that's clearly a continuation (e.g. finishing a refactor that's half-done on disk) +- You're about to ask a clarifying question whose answer is likely in a prior session +- You're unsure whether a decision or approach has already been made + +Escalation ladder — walk it in order and stop at the first step that answers the question: + +1. **`mcp__contextstream__session(action="recall", query="")`** — always the first call. Ranked fusion across transcripts, snapshots, docs, and decisions. Covers 80% of "what did we do before" questions. + +2. **`mcp__contextstream__memory(action="search_transcripts", query="")`** — fall through when `recall` returns thin or off-topic results, or when you need every mention of a specific term. Full-text search across ALL saved transcripts. + +3. **`mcp__contextstream__memory(action="list_events", event_type="session_snapshot")`** — when you want the turning-point bookmarks (manual + auto pre-compaction captures). Useful for "what state were we in at the end of " questions that `recall` misses because the answer isn't in conversational text. + +4. **`mcp__contextstream__memory(action="list_transcripts", limit=10)`** — when you need a chronological index of recent sessions (titles, timestamps, IDs). Use when the user wants to know "when did we last work on X". + +5. **`mcp__contextstream__memory(action="get_transcript", transcript_id="")`** — read a full past session end-to-end. Use only after the steps above pointed you at a specific transcript ID and you need the complete exchange, not snippets. + +6. **End of current session — save a bookmark** for the next one: `mcp__contextstream__session(action="capture", event_type="session_snapshot", title="...", content="")`. + +**Never answer "I don't know what we did before" without running at least step 1, then step 2 if step 1 was thin.** + +## Project Scope Discipline + +- Reuse the `project_id` returned by `mcp__contextstream__init(...)` or `mcp__contextstream__context(...)` for project-scoped writes and lookups +- For project-scoped `mcp__contextstream__memory(...)`, `mcp__contextstream__session(...)`, and `mcp__contextstream__skill(...)` calls, pass explicit `project_id` instead of guessing from the folder name or title +- If `mcp__contextstream__init(...)` or `mcp__contextstream__context(...)` does not surface a current `project_id`, rerun `mcp__contextstream__init(folder_path="...")` before creating docs, skills, events, tasks, todos, or other project memory +- Use `target_project` only after init from a multi-project parent folder + **Hooks:** `` tags contain injected instructions — follow them exactly. **Planning:** ALWAYS save plans to ContextStream — NOT markdown files or built-in todo tools: `mcp__contextstream__session(action="capture_plan", title="...", steps=[...])` + `mcp__contextstream__memory(action="create_task", title="...", plan_id="...")` -**Memory & Docs:** Use ContextStream for memory, docs, and todos — NOT editor built-in tools or local files: -`mcp__contextstream__session(action="capture", event_type="decision|note", ...)` | `mcp__contextstream__memory(action="create_doc|create_todo|create_node", ...)` +**Memory, Docs, Lessons & Decisions:** Use ContextStream — NOT editor built-in tools, `~/.claude/.../memory/`, `.cursorrules`, or scratch markdown files. Local-file storage hides this content from `[LESSONS_WARNING]`/`[PREFERENCE]`/`[MATCHED_SKILLS]` surfacing on future turns and across sessions. -**Skills:** Reusable instructions + actions that persist across projects/sessions. `mcp__contextstream__skill(action="list")` to browse, `mcp__contextstream__skill(action="run", name="...")` to execute, `mcp__contextstream__skill(action="create")` to define. Skills auto-activate when trigger keywords match the user's message. Import from CLAUDE.md/.cursorrules: `mcp__contextstream__skill(action="import", file_path="...")`. +- Lessons (mistakes, corrections, "never do X again"): `mcp__contextstream__session(action="capture_lesson", title="...", trigger="...", impact="...", prevention="...", severity="...")` +- Decisions / notes / insights: `mcp__contextstream__session(action="capture", event_type="decision|note|insight", ...)` +- Docs / todos / knowledge nodes: `mcp__contextstream__memory(action="create_doc|create_todo|create_node", ...)` + +**Skills (IMPORTANT):** When `mcp__contextstream__context()` returns `[MATCHED_SKILLS]`, you **MUST run** the listed skills immediately via `mcp__contextstream__skill(action="run", name="...")`. High-priority skills (marked ⚡) are mandatory. Skills are reusable instruction + action bundles that persist across sessions. Browse: `mcp__contextstream__skill(action="list")`. Create: `mcp__contextstream__skill(action="create", name="...", instruction_body="...", trigger_patterns=[...])`. Import: `mcp__contextstream__skill(action="import", file_path="...", format="auto")`. **Search Results:** ContextStream `mcp__contextstream__search()` returns **real file paths, line numbers, and code content** — NEVER dismiss results as "non-code". Use returned paths to `read_file` directly. -**Notices:** [LESSONS_WARNING] → apply lessons | [PREFERENCE] → follow user preferences | [RULES_NOTICE] → run `mcp__contextstream__generate_rules()` | [VERSION_NOTICE/CRITICAL] → tell user about update +**Indexing:** Indexing and ingest are ALWAYS available. NEVER claim that transport mode, HTTP mode, or remote mode prevents indexing/ingest. Use `mcp__contextstream__project(action="index")` or `mcp__contextstream__project(action="ingest_local", path="")` — both work in all configurations. + +**Notices:** [GROUNDING] → read ranked prior-work hits before code search | [GROUNDING_AVAILABLE] → optional hook reminder: unread grounding from last mcp__contextstream__context() | [MATCHED_SKILLS] → run surfaced skills before other work | [LESSONS_WARNING] → apply lessons immediately and keep them active for the turn | [PREFERENCE] → follow user preferences | [RULES_NOTICE] → run `mcp__contextstream__generate_rules()` | [VERSION_NOTICE/CRITICAL] → tell user about update --- @@ -74,7 +149,7 @@ If the `instruct` tool is available, run `mcp__contextstream__instruct(action="g - ContextStream search handles **all** search use cases: exact text, regex, glob patterns, semantic queries, file paths - ContextStream search results contain **real file paths, line numbers, and code content** — they ARE code results - **NEVER** dismiss ContextStream results as "non-code" — use the returned file paths to `read_file` the relevant code -- Only fall back to `Grep`/`Glob` if ContextStream search returns **exactly 0 results** +- Fall back to `Grep`/`Glob` if ContextStream search is **unavailable, fails, times out, or returns 0 results** ### Search Mode Selection (use these instead of built-in tools): @@ -94,5 +169,4 @@ If the `instruct` tool is available, run `mcp__contextstream__instruct(action="g - **Do NOT** create markdown plan files or use `TodoWrite` — they vanish across sessions - **ALWAYS** save plans: `mcp__contextstream__session(action="capture_plan", title="...", steps=[...])` - **ALWAYS** create tasks: `mcp__contextstream__memory(action="create_task", title="...", plan_id="...")` - diff --git a/Directory.Build.props b/Directory.Build.props index 6839f4c..ff59b3e 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -37,7 +37,6 @@ false false true - true @@ -69,6 +68,8 @@ enable + enable + AnyCPU;x64 default --- diff --git a/NuGet.Config b/NuGet.Config index b7d102a..cfef9f9 100644 --- a/NuGet.Config +++ b/NuGet.Config @@ -4,6 +4,7 @@ + @@ -13,5 +14,8 @@ + + + diff --git a/Ploch.Data.slnx b/Ploch.Data.slnx index ee44699..3663c5a 100644 --- a/Ploch.Data.slnx +++ b/Ploch.Data.slnx @@ -52,6 +52,7 @@ + @@ -83,6 +84,9 @@ + + + @@ -90,6 +94,7 @@ + @@ -100,7 +105,26 @@ + + + + + + + + + + + + + + + + + + + @@ -124,6 +148,7 @@ + @@ -159,15 +184,19 @@ + + + + diff --git a/TODO.md b/TODO.md index 89b827d..1ce9576 100644 --- a/TODO.md +++ b/TODO.md @@ -1,6 +1,6 @@ # Agent TODO List -## Task 1: Implement changes for Issue #72 +## [DONE] Task 1: Implement changes for Issue #72 Implement changes required for . I was experimenting with how to implement some of the methods mentioned there in another projects where I was trying out the `SampleApp`. @@ -22,6 +22,29 @@ This project is located here: C:/DevNet/my/mrploch-temp/ploch-data-sample-app-te Keep in mind that the changes are mostly implemented already in the SampleApp in here: `C:/DevNet/my/mrploch-temp/ploch-data-sample-app-test/Ploch.Data.SampleApp.slnx`. You'll be in most cases just moving them into appropriate locations and adding test coverage and documentation. So base your changes on those. +## Task: Use DbContext for Validation in GenericRepository Integration Tests + +Across `tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/`, tests that verify entities were added/updated/deleted should use a fresh `DbContext` (via `CreateRootDbContext()`) instead of the repository under test. + +Using the same repository to verify what was written bypasses the true persistence check — the test passes even if the repository reads from its own tracking cache. A fresh `DbContext` (or a second `IUnitOfWork`) reads directly from the database, which is what we actually want to verify. + +Example — instead of: + +```csharp +var result = await repository.GetByIdAsync(entity.Id); +result.Should().BeEquivalentTo(entity, options => options.WithEntityEquivalencyOptions()); +``` + +Use: + +```csharp +var dbContext = CreateRootDbContext(); +var result = await dbContext.Set().FindAsync(entity.Id); +result.Should().BeEquivalentTo(entity, options => options.WithEntityEquivalencyOptions()); +``` + +Affects all tests in: `ReadWriteRepositoryAsyncTests`, `ReadWriteRepositoryDeleteByIdTests`, `UnitOfWorkRepositoryAsyncSQLiteInMemoryTests`, and similar. + ## Task 2: Provide a Comprehensive Documentation for the Ploch.Data Libraries *Make the changes but don't commit them yet* @@ -45,3 +68,19 @@ Make sure the content is easy to read and follow. ALWAYS TEST commands and provi Store the main documentation in the `docs` folder, but also add README.md files to each of the projects (if they don't already have them), but this should only contain an overview, and link to the docs for fully detailed documentation. If a library already has a README.md, review it and update it if needed. Again, make sure the content is easy to read and follow. ALWAYS TEST! + +## Task 3: Improve integration testing experience and fix tests in this repo and update docs + +We need to fix the equivalency options helper, fix the failing tests. +We also need to add proper ability to create new db context each time, instead of a scoped same instance +Usage od IDbContextFactory +improve docs +Prompt: + +```markdown +Can you check the failing tests? Do proper research why the GetAll_should_return_entities_with_includes test is failing when asserting BeEquivalentTo. +I want this type of assertion to work. I've created a helper extension method WithEntityEquivalencyOptions to fix some of the equivalency options, +but it seems it's still not enough. For example, one failure is that when comparing the original and actual entity (the one obtained back from the db), the collection +property is null, while on the other it is empty. I want this type of comparison to succeed. The best option would be to fix the `WithEntityEquivalencyOptions` +method to allow such differences. Make the plan first. Try to ask codex for an option on this as well. +``` diff --git a/build-dotnet-commands.ps1 b/build-dotnet-commands.ps1 deleted file mode 100644 index e69de29..0000000 diff --git a/docs/copilot-cloud-agent-mcp.example.json b/docs/copilot-cloud-agent-mcp.example.json new file mode 100644 index 0000000..52baa83 --- /dev/null +++ b/docs/copilot-cloud-agent-mcp.example.json @@ -0,0 +1,14 @@ +{ + "mcpServers": { + "github-mcp-server": { + "type": "http", + "url": "https://api.githubcopilot.com/mcp", + "tools": [ + "*" + ], + "headers": { + "X-MCP-Toolsets": "repos,issues,users,pull_requests,actions,code_security,secret_protection,web_search" + } + } + } +} diff --git a/docs/copilot-cloud-agent-pipeline.md b/docs/copilot-cloud-agent-pipeline.md new file mode 100644 index 0000000..e94a91a --- /dev/null +++ b/docs/copilot-cloud-agent-pipeline.md @@ -0,0 +1,169 @@ +# Copilot Cloud Agent PR Pipeline + +This repository now contains a staged custom-agent setup for deep pull request work: + +- `.github/agents/repo-investigator.agent.md` +- `.github/agents/pr-review-planner.agent.md` +- `.github/agents/plan-critic.agent.md` +- `.github/agents/pr-remediation.agent.md` +- `.github/agents/pr-pipeline-orchestrator.agent.md` + +## What this setup gives you + +### Stage 1: repository investigation + +`repo-investigator` gathers project-specific understanding before detailed review or implementation. + +### Stage 2: PR review and planning + +`pr-review-planner` opens the PR, inspects the diff, reads linked tickets and related PRs or issues, checks all review comments and conversations, checks CI, and produces a remediation plan. + +For non-trivial plans it is instructed to invoke `plan-critic`, which is configured to use `claude-opus-4.6`. + +### Stage 3: remediation + +`pr-remediation` implements the plan, validates changes, re-checks comments and CI, and loops back to planning if the situation changes. + +### Optional: one-entry orchestration + +`pr-pipeline-orchestrator` is the user-facing coordinator. It sequences the specialist agents explicitly. + +## Important platform limits + +### GitHub.com cloud agent does not support YAML `handoffs` + +GitHub documents that the `argument-hint` and `handoffs` properties are ignored for Copilot cloud agent on GitHub.com. This means a true native handoff graph is not available there. + +Because of that, this setup uses prompt-level orchestration and the `agent` tool alias instead of YAML handoffs. + +### If you need hard guarantees, use external orchestration + +If you need a deterministic pipeline with auditable stage boundaries, create separate agent tasks through the GitHub Agent Tasks REST API and launch them in sequence: + +1. `repo-investigator` +2. `pr-review-planner` +3. `pr-remediation` + +That approach is more reliable than depending only on prompt-driven delegation inside one task. + +## Cross-model review + +The hidden `plan-critic` agent is configured with: + +- `model: claude-opus-4.6` + +The other stage agents are configured with: + +- `model: gpt-5.3-codex` + +This gives you the pattern you asked for: the main working agents can use Codex while non-trivial plans are reviewed by Claude Opus. + +## Required repository configuration + +### 1. Keep the custom agent files in the default branch + +GitHub reads custom agents from `.github/agents/*.agent.md`. + +### 2. Configure writable GitHub MCP access if you want automated PR replies + +By default, the built-in GitHub MCP server is read-only and scoped to the current repository. That is not enough if you want the agent to reply to false-positive PR comments or conversations. + +If you want automated comment replies and broader GitHub research, do the following in the repository settings: + +1. Go to `Settings -> Copilot -> Cloud agent`. +2. Add MCP configuration using the example from [copilot-cloud-agent-mcp.example.json](/C:/DevNet/my/mrploch/ploch-data/docs/copilot-cloud-agent-mcp.example.json). +3. Go to `Settings -> Environments`. +4. Create an environment named `copilot`. +5. Add an environment secret named `COPILOT_MCP_GITHUB_PERSONAL_ACCESS_TOKEN`. + +Use a fine-grained PAT with the narrowest permissions that still allow: + +- reading repository contents +- reading and writing pull request comments or review-thread replies +- reading and writing issue comments when needed +- reading Actions and check-run state + +If you only need read-only research, use the GitHub read-only MCP configuration instead. + +### 3. Add external ticketing MCP servers if your tickets live outside GitHub + +If the associated ticket can live in Jira, Azure Boards, Linear, or another system, add the corresponding MCP server to the repository Copilot configuration or the agent profile. Without that, the PR planner can only fully research GitHub-native issues and pull requests. + +### 4. Only add `copilot-setup-steps.yml` when your MCP servers need extra dependencies + +You do not need a setup workflow for the GitHub MCP server alone. You only need `.github/workflows/copilot-setup-steps.yml` if another MCP server requires packages or login steps that are not present on the default runner. + +## Suggested usage + +### Manual staged usage + +Use these agents in order: + +1. `repo-investigator` +2. `pr-review-planner` +3. Review the plan +4. `pr-remediation` + +### One-shot usage + +Use `pr-pipeline-orchestrator` and give it: + +- the PR number or URL +- whether you want plan-only or full remediation +- whether comment-reply automation is expected + +### GitHub Actions usage + +This repository also includes [copilot-pr-pipeline.yml](/C:/DevNet/my/mrploch/ploch-data/.github/workflows/copilot-pr-pipeline.yml). + +Use it from `Actions -> Copilot PR Pipeline -> Run workflow`. + +Inputs: + +- `pr_number` -- the existing PR to inspect +- `mode` -- `plan-only` or `full-followup-pr` +- `model` -- top-level task model +- `custom_agent` -- optional override if you want a different custom agent identifier +- `wait_for_completion` -- optionally poll until the task finishes or waits for input + +Behavior: + +- `plan-only` launches planning work without opening a PR +- `full-followup-pr` launches the full pipeline and asks Copilot to open a follow-up remediation PR instead of assuming it can mutate the existing PR branch directly +- the workflow first tries the current Agent Tasks API with `custom_agent` +- if GitHub rejects `custom_agent`, the workflow retries without that field and keeps the instructions in `problem_statement` + +Required secret: + +- `COPILOT_AGENT_PAT` -- a user token that can call the Copilot Agent Tasks API for this repository + +### REST API orchestration + +If you want a strict pipeline, create separate tasks with the Agent Tasks API. The task creation endpoint supports: + +- `event_content` +- `problem_statement` +- `model` +- `custom_agent` +- `base_ref` +- `create_pull_request` +- `event_url` +- `event_identifiers` + +Use that to run each stage separately and poll for completion before starting the next stage. + +## Recommended operating policy + +- Always require a written remediation plan before code changes start. +- Always require `plan-critic` review for non-trivial plans. +- Never allow the remediation stage to finish while required CI checks are still failing. +- Treat comment-reply automation as blocked until writable GitHub MCP access is configured and verified. +- When a change can affect package-consumer behavior, validate the SampleApp path that matches the risk. + +## What is still manual + +- Repository settings for Copilot cloud agent and the `copilot` environment +- PAT creation and permission scoping +- Any external orchestrator that creates separate agent tasks through the REST API + +Those parts cannot be fully committed into the repository because GitHub stores them in repository settings rather than source control. diff --git a/docs/getting-started.md b/docs/getting-started.md index 0147f5d..b9dc6c3 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -245,9 +245,11 @@ public class ProductRepositoryTests The `GenericRepositoryDataIntegrationTest` base class provides: - `DbContext` -- the configured EF Core context backed by in-memory SQLite. -- `CreateUnitOfWork()` -- creates a new `IUnitOfWork` instance. -- `CreateReadRepositoryAsync()` -- creates a typed read repository. -- `CreateReadWriteRepositoryAsync()` -- creates a typed read/write repository. +- `CreateUnitOfWork(bool useScopedProvider = true)` -- creates a new `IUnitOfWork` instance. +- `CreateReadRepositoryAsync(bool useScopedProvider = true)` -- creates a typed read repository. +- `CreateReadWriteRepositoryAsync(bool useScopedProvider = true)` -- creates a typed read/write repository. + +All helper methods resolve services from the scoped provider by default. Pass `false` to resolve from the root provider. ## Next Steps diff --git a/docs/integration-testing.md b/docs/integration-testing.md index 7357e6c..954007a 100644 --- a/docs/integration-testing.md +++ b/docs/integration-testing.md @@ -79,13 +79,19 @@ Extends `DataIntegrationTest` with helper methods for creating repos ### What It Provides (in addition to DataIntegrationTest) -| Method | Returns | -|--------|---------| -| `CreateUnitOfWork()` | `IUnitOfWork` | -| `CreateReadRepositoryAsync()` | `IReadRepositoryAsync` | -| `CreateReadWriteRepositoryAsync()` | `IReadWriteRepositoryAsync` | -| `CreateReadRepository()` | `IReadRepository` | -| `CreateReadWriteRepository()` | `IReadWriteRepository` | +| Method | Returns | +|-------------------------------------------------------------------------------|-------------------------------------------| +| `CreateUnitOfWork(bool useScopedProvider = true)` | `IUnitOfWork` | +| `CreateQueryableRepository(bool useScopedProvider = true)` | `IQueryableRepository` | +| `CreateReadRepositoryAsync(bool useScopedProvider = true)` | `IReadRepositoryAsync` | +| `CreateReadWriteRepositoryAsync(bool useScopedProvider = true)` | `IReadWriteRepositoryAsync` | +| `CreateReadRepository(bool useScopedProvider = true)` | `IReadRepository` | +| `CreateReadWriteRepository(bool useScopedProvider = true)` | `IReadWriteRepository` | + +All helper methods use the scoped provider by default. Pass `false` to resolve from the root provider. + +TODO: Explain more when to use scoped and root - but also fix how the dbcontext can be created new each time (using of IDbContexxtFactory): +This needs to be fixed in tests. The `AddRepositories()` call is made automatically in `ConfigureServices`. diff --git a/opencode.json b/opencode.json new file mode 100644 index 0000000..6a7d936 --- /dev/null +++ b/opencode.json @@ -0,0 +1,26 @@ +{ + "mcp": { + "contextstream": { + "command": [ + "npx", + "-y", + "contextstream-mcp" + ], + "enabled": true, + "environment": { + "CONTEXTSTREAM_API_KEY": "{env:CONTEXTSTREAM_API_KEY}", + "CONTEXTSTREAM_AUTO_HIDE_INTEGRATIONS": "true", + "CONTEXTSTREAM_CONSOLIDATED": "true", + "CONTEXTSTREAM_HOOK_TRANSCRIPTS_ENABLED": "true", + "CONTEXTSTREAM_INCLUDE_STRUCTURED_CONTENT": "true", + "CONTEXTSTREAM_LOG_LEVEL": "quiet", + "CONTEXTSTREAM_OUTPUT_FORMAT": "compact", + "CONTEXTSTREAM_SEARCH_LIMIT": "15", + "CONTEXTSTREAM_SEARCH_MAX_CHARS": "2400", + "CONTEXTSTREAM_TOOLSET": "complete", + "CONTEXTSTREAM_TRANSCRIPTS_ENABLED": "true" + }, + "type": "local" + } + } +} \ No newline at end of file diff --git a/src/Data.EFCore.IntegrationTesting.FluentAssertions/EntitiesEquivalencyOptionsExtensions.cs b/src/Data.EFCore.IntegrationTesting.FluentAssertions/EntitiesEquivalencyOptionsExtensions.cs new file mode 100644 index 0000000..de08fd5 --- /dev/null +++ b/src/Data.EFCore.IntegrationTesting.FluentAssertions/EntitiesEquivalencyOptionsExtensions.cs @@ -0,0 +1,102 @@ +using FluentAssertions; +using FluentAssertions.Equivalency; + +namespace Ploch.Data.EFCore.IntegrationTesting.FluentAssertions; + +/// +/// Provides FluentAssertions equivalency extension methods for comparing EF Core entities +/// stored and retrieved from a database. +/// +public static class EntitiesEquivalencyOptionsExtensions +{ + /// + /// Configures FluentAssertions equivalency options suitable for comparing EF Core entities + /// that have been stored in and retrieved from a database. + /// + /// The concrete type of the equivalency options, used for the fluent chain. + /// The equivalency options to configure. + /// + /// Specifies the maximum allowed difference in milliseconds between values. + /// Defaults to 1 millisecond — approximately 10× the maximum observed SQLite rounding error (~78 µs), + /// tight enough to catch real timing regressions and loose enough to be stable. + /// + /// + /// The same instance with the entity-comparison settings applied, + /// allowing further chaining. + /// + /// + /// + /// Four recurring issues arise when comparing in-memory entity objects with entities loaded from a + /// relational database. This method handles all four in a single call: + /// + /// + /// + /// + /// Collection ordering: Databases do not guarantee the order in which rows are + /// returned. + /// ensures collection items are matched by value, not position. + /// + /// + /// + /// + /// Cyclic navigation properties: EF Core entity graphs commonly form reference + /// cycles — for example BlogPost → Tag → BlogPosts → BlogPost. Without handling, + /// FluentAssertions recurses indefinitely. + /// IgnoringCyclicReferences() stops the + /// traversal when a cycle is detected. + /// + /// + /// + /// + /// precision: SQLite stores + /// values as TEXT with approximately 100-microsecond + /// precision, while .NET retains 100-nanosecond (tick) precision. The maximum observed + /// difference is ~78 µs. A 1-millisecond tolerance (10× the maximum rounding + /// error) is applied to every property comparison via + /// BeCloseTo. + /// + /// + /// + /// + /// Null vs empty collections: EF Core does not initialise navigation collections + /// that were not eager-loaded via Include() — they remain . + /// In-memory test entities typically initialise them to new List<T>(). + /// A custom treats a collection + /// as equivalent to an empty collection (and vice versa). + /// + /// + /// + /// + /// When EF Core loads an entity with eager-loaded navigation properties, it also populates the + /// inverse back-navigation references (e.g. Tag.BlogPosts). In-memory entities created in + /// test setup do not have those back-references. Exclude them from the comparison and verify them + /// separately if needed. + /// + /// + /// + /// + /// // Basic comparison of two entities loaded from the database. + /// actual.Should().BeEquivalentTo(expected, options => options.WithEntityEquivalencyOptions()); + /// + /// // Combined with exclusions for back-navigation properties that differ between an in-memory + /// // object and a DB-loaded one (e.g. Tag.BlogPosts is populated by EF Core but not in test setup). + /// actual.Should().BeEquivalentTo(expected, + /// options => options.Excluding(p => p.Tags) + /// .Excluding(p => p.Categories) + /// .WithEntityEquivalencyOptions()); + /// + /// // Collection assertion — ContainEquivalentOf and ContainEquivalentOf both accept the same options. + /// blogPosts.Should().ContainEquivalentOf(expected, + /// options => options.Excluding(p => p.Categories).WithEntityEquivalencyOptions()); + /// + /// + public static TSelf WithEntityEquivalencyOptions(this SelfReferenceEquivalencyOptions options, double dateTimeOffsetToleranceMilliseconds = 1) + where TSelf : SelfReferenceEquivalencyOptions + { + return options.Using(new NullEmptyCollectionEquivalencyStep()) + .WithoutStrictOrdering() + .IgnoringCyclicReferences() + .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(dateTimeOffsetToleranceMilliseconds))) + .WhenTypeIs(); + } +} diff --git a/src/Data.EFCore.IntegrationTesting.FluentAssertions/NullEmptyCollectionEquivalencyStep.cs b/src/Data.EFCore.IntegrationTesting.FluentAssertions/NullEmptyCollectionEquivalencyStep.cs new file mode 100644 index 0000000..2b986e9 --- /dev/null +++ b/src/Data.EFCore.IntegrationTesting.FluentAssertions/NullEmptyCollectionEquivalencyStep.cs @@ -0,0 +1,66 @@ +using System.Collections; +using FluentAssertions.Equivalency; + +namespace Ploch.Data.EFCore.IntegrationTesting.FluentAssertions; + +/// +/// An that treats a collection +/// as equivalent to an empty collection (and vice versa). +/// +/// +/// +/// EF Core does not initialise navigation collections that were not eager-loaded via +/// Include() — they remain . In-memory test entities, +/// however, typically initialise collections to new List<T>(). Without +/// this step, FluentAssertions treats and an empty collection +/// as different, causing false-negative assertion failures. +/// +/// +/// This step only intercedes when one side is and the other is +/// an empty (excluding , which also +/// implements ). All other cases are passed through to the +/// next step in the pipeline, preserving configured options such as +/// tolerance and cyclic-reference handling. +/// +/// +internal sealed class NullEmptyCollectionEquivalencyStep : IEquivalencyStep +{ + /// + public EquivalencyResult Handle(Comparands comparands, IEquivalencyValidationContext context, IValidateChildNodeEquivalency valueChildNodes) + { + if (comparands.Subject is null && IsEmptyNonStringEnumerable(comparands.Expectation)) + { + return EquivalencyResult.EquivalencyProven; + } + + if (comparands.Expectation is null && IsEmptyNonStringEnumerable(comparands.Subject)) + { + return EquivalencyResult.EquivalencyProven; + } + + return EquivalencyResult.ContinueWithNext; + } + + private static bool IsEmptyNonStringEnumerable(object? value) + { + if (value is string or null) + { + return false; + } + + if (value is IEnumerable enumerable) + { + var enumerator = enumerable.GetEnumerator(); + try + { + return !enumerator.MoveNext(); + } + finally + { + (enumerator as IDisposable)?.Dispose(); + } + } + + return false; + } +} diff --git a/src/Data.EFCore.IntegrationTesting.FluentAssertions/Ploch.Data.EFCore.IntegrationTesting.FluentAssertions.csproj b/src/Data.EFCore.IntegrationTesting.FluentAssertions/Ploch.Data.EFCore.IntegrationTesting.FluentAssertions.csproj new file mode 100644 index 0000000..d89f83d --- /dev/null +++ b/src/Data.EFCore.IntegrationTesting.FluentAssertions/Ploch.Data.EFCore.IntegrationTesting.FluentAssertions.csproj @@ -0,0 +1,17 @@ + + + + $(TargetFrameworkVersions) + + + + + + + + + + + + + diff --git a/src/Data.EFCore.IntegrationTesting.FluentAssertions/README.md b/src/Data.EFCore.IntegrationTesting.FluentAssertions/README.md new file mode 100644 index 0000000..45ca833 --- /dev/null +++ b/src/Data.EFCore.IntegrationTesting.FluentAssertions/README.md @@ -0,0 +1,127 @@ +# Ploch.Data.EFCore.IntegrationTesting.FluentAssertions + +FluentAssertions helpers for integration tests that store and retrieve EF Core entities from a database. + +## Overview + +When comparing entities retrieved from a database against in-memory objects using FluentAssertions, three recurring problems arise: + +| Problem | Cause | Effect | +|---|---|---| +| **`DateTimeOffset` precision** | SQLite stores `DateTimeOffset` as TEXT with ~100 µs precision; .NET has 100 ns (tick) precision | Comparisons that should pass fail with sub-millisecond differences | +| **Unordered collections** | Databases do not guarantee row-return order | Collection comparisons fail because items are in a different order than at insert time | +| **Cyclic navigation properties** | EF Core populates inverse back-navigation references on loaded entities (e.g. `Tag.BlogPosts`) | FluentAssertions recurses infinitely into the object graph | + +This library provides a single extension method — `WithEntityEquivalencyOptions()` — that resolves all three issues consistently. + +## Installation + +Reference the package in your test project: + +```xml + +``` + +Or, when working locally in the `ploch-data` workspace, use a project reference: + +```xml + +``` + +## API Reference + +### `WithEntityEquivalencyOptions()` + +```csharp +public static TSelf WithEntityEquivalencyOptions( + this SelfReferenceEquivalencyOptions options) + where TSelf : SelfReferenceEquivalencyOptions +``` + +Applies the following configuration to a FluentAssertions equivalency assertion: + +- **`WithoutStrictOrdering()`** — compares collections by value, ignoring insertion order. +- **`IgnoringCyclicReferences()`** — stops traversal when a cycle is detected (e.g. `BlogPost → Tags → BlogPosts → BlogPost`). +- **`BeCloseTo` with 1 ms tolerance for `DateTimeOffset`** — accommodates the ~100 µs precision loss that occurs when SQLite stores and retrieves `DateTimeOffset` values. + +#### Usage + +```csharp +using Ploch.Data.EFCore.IntegrationTesting; + +// Basic — compare an entity retrieved from the DB with the in-memory original. +actual.Should().BeEquivalentTo(expected, options => options.WithEntityEquivalencyOptions()); + +// With additional exclusions for back-navigation properties. +// EF Core populates Tag.BlogPosts on a loaded BlogPost, but the in-memory BlogPost +// created in test setup does not have that back-reference populated. +actual.Should().BeEquivalentTo(expected, + options => options.Excluding(p => p.Tags) + .Excluding(p => p.Categories) + .WithEntityEquivalencyOptions()); + +// In a collection assertion. +blogPosts.Should().ContainEquivalentOf(expected, + options => options.Excluding(p => p.Categories).WithEntityEquivalencyOptions()); +``` + +### Handling Back-Navigation Properties + +EF Core automatically populates inverse navigation properties when loading an entity with eager loading. For example, loading a `BlogPost` with `.Include(p => p.Tags)` also causes each `Tag.BlogPosts` to be set. The in-memory objects created during test setup do not have this back-reference, causing `BeEquivalentTo` to fail. + +The recommended pattern is to exclude back-navigation properties from the structural comparison and verify them separately by count or content: + +```csharp +// Compare core scalar properties. +actual.Should().BeEquivalentTo(expected, + options => options.Excluding(p => p.Tags) + .Excluding(p => p.Categories) + .WithEntityEquivalencyOptions()); + +// Verify the navigation properties were loaded correctly. +actual.Tags.Should().HaveCount(expected.Tags.Count); +actual.Categories.Should().HaveCount(expected.Categories.Count); +``` + +### `DateTimeOffset` Precision in SQLite + +SQLite stores `DateTimeOffset` as TEXT. EF Core's SQLite provider truncates the fractional seconds to approximately 4 decimal places (~100 µs resolution), discarding sub-microsecond ticks. For example: + +| | Value | +|---|---| +| In-memory (.NET) | `2026-04-15 14:49:16.4155783 +02:00` | +| Read from SQLite | `2026-04-15 14:49:16.4155000 +02:00` | +| Difference | ~78 µs (< 0.1 ms) | + +`WithEntityEquivalencyOptions()` applies a **1 ms tolerance** — 10× the maximum observed rounding error — to every `DateTimeOffset` comparison, ensuring tests are stable without masking real bugs. + +## Integration with `DataIntegrationTest` + +This library is designed to be used alongside `Ploch.Data.EFCore.IntegrationTesting`, which provides the `DataIntegrationTest` base class for EF Core integration tests using an in-memory SQLite database. + +```csharp +public class MyRepositoryTests : GenericRepositoryDataIntegrationTest +{ + [Fact] + public async Task GetByIdAsync_should_return_entity_with_includes() + { + using var unitOfWork = CreateUnitOfWork(); + var (blog, blogPost1, _) = await RepositoryHelper.AddTestBlogEntities( + unitOfWork.Repository()); + await unitOfWork.CommitAsync(); + + var repository = CreateReadRepository(); + var result = repository.GetById(blog.Id, + q => q.Include(q => q.BlogPosts).ThenInclude(bp => bp.Tags)); + + // Verify against a fresh DbContext — not the same repository used to write. + var dbContext = CreateRootDbContext(); + var fromDb = await dbContext.Blogs + .Include(q => q.BlogPosts).ThenInclude(bp => bp.Tags) + .FirstAsync(b => b.Id == blog.Id); + + fromDb.Should().BeEquivalentTo(result, options => options.WithEntityEquivalencyOptions()); + result!.BlogPosts.Should().HaveCount(blog.BlogPosts.Count); + } +} +``` diff --git a/src/Data.EFCore.IntegrationTesting/DataIntegrationTest.cs b/src/Data.EFCore.IntegrationTesting/DataIntegrationTest.cs index 01b472f..b368923 100644 --- a/src/Data.EFCore.IntegrationTesting/DataIntegrationTest.cs +++ b/src/Data.EFCore.IntegrationTesting/DataIntegrationTest.cs @@ -14,6 +14,7 @@ namespace Ploch.Data.EFCore.IntegrationTesting; public abstract class DataIntegrationTest : IDisposable where TDbContext : DbContext { private readonly IDbContextConfigurator? _dbContextConfigurator; + private bool _disposed; /// /// Initializes a new instance of the class. @@ -33,7 +34,7 @@ protected DataIntegrationTest(IDbContextConfigurator? dbContextConfigurator = nu dbContextConfigurator ??= new SqLiteDbContextConfigurator(SqLiteConnectionOptions.InMemory); _dbContextConfigurator = dbContextConfigurator; - (ServiceProvider, DbContext, RootServiceProvider) = + (RootServiceProvider, ScopedServiceProvider, DbContext) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection, dbContextConfigurator); } @@ -47,7 +48,7 @@ protected DataIntegrationTest(IDbContextConfigurator? dbContextConfigurator = nu /// Provides access to the configured service provider. /// This is used to resolve dependencies and services required during integration testing. /// - protected IServiceProvider ServiceProvider { get; } + protected IServiceProvider ScopedServiceProvider { get; } /// /// Gets the root (non-scoped) service provider. @@ -55,7 +56,7 @@ protected DataIntegrationTest(IDbContextConfigurator? dbContextConfigurator = nu /// /// Use this when you need to create additional scopes or resolve services /// outside the default test scope. For most test code, prefer - /// instead. + /// instead. /// protected IServiceProvider RootServiceProvider { get; } @@ -69,6 +70,32 @@ public void Dispose() GC.SuppressFinalize(this); } + /// + /// Creates a new instance from the root service provider. + /// + /// + /// + /// Use this when a test needs an additional context instance that is separate from + /// the default scoped exposed by this class. + /// + /// + /// The returned context should be disposed by the caller when no longer needed. + /// + /// + /// + /// using var rootContext = CreateRootDbContext(); + /// var total = await rootContext.Set<MyEntity>().CountAsync(); + /// + /// + /// + /// A resolved from . + protected TDbContext CreateRootDbContext() + { + var dbContextFactory = RootServiceProvider.GetRequiredService>(); + + return dbContextFactory.CreateDbContext(); + } + /// /// Configures the required services for the test. /// @@ -106,13 +133,26 @@ protected virtual void ConfigureServices(IServiceCollection services) /// protected virtual void Dispose(bool disposing) { + if (_disposed) + { + return; + } + if (disposing) { DbContext.Dispose(); - if (ServiceProvider is IDisposable disposableProvider) + // Dispose the scope first for fine-grained ordering, then the root — the root + // would cascade-dispose its scopes anyway, but explicit ordering is cheaper than + // relying on container semantics across providers. + if (ScopedServiceProvider is IDisposable disposableScope) { - disposableProvider.Dispose(); + disposableScope.Dispose(); + } + + if (RootServiceProvider is IDisposable disposableRoot) + { + disposableRoot.Dispose(); } if (_dbContextConfigurator is IDisposable disposableConfigurator) @@ -120,5 +160,7 @@ protected virtual void Dispose(bool disposing) disposableConfigurator.Dispose(); } } + + _disposed = true; } } diff --git a/src/Data.EFCore.IntegrationTesting/DbContextServicesRegistrationHelper.cs b/src/Data.EFCore.IntegrationTesting/DbContextServicesRegistrationHelper.cs index d149891..b3e8ee1 100644 --- a/src/Data.EFCore.IntegrationTesting/DbContextServicesRegistrationHelper.cs +++ b/src/Data.EFCore.IntegrationTesting/DbContextServicesRegistrationHelper.cs @@ -9,16 +9,16 @@ namespace Ploch.Data.EFCore.IntegrationTesting; /// public static class DbContextServicesRegistrationHelper { + /// /// /// Builds a DbContext and IServiceProvider for integration testing. /// /// The type of the DbContext to configure. /// The service collection to which the DbContext is added. - /// The database connection string. Default is in-memory SQLite database. - /// A tuple containing the scoped IServiceProvider, the configured TDbContext, and the root IServiceProvider. - public static (IServiceProvider, TDbContext, IServiceProvider) BuildDbContextAndServiceProvider(IServiceCollection serviceCollection, - string connectionString = "Data Source=:memory:") - where TDbContext : DbContext + /// The database connection string. Default is an in-memory SQLite database. + public static (IServiceProvider RootProvider, IServiceProvider ScopedProvider, TDbContext DbContext) BuildDbContextAndServiceProvider( + IServiceCollection serviceCollection, + string connectionString = "Data Source=:memory:") where TDbContext : DbContext { // Create the connection once and share it across all DbContext instances. // This is critical for SQLite in-memory databases: each new connection to :memory: @@ -38,17 +38,22 @@ public static (IServiceProvider, TDbContext, IServiceProvider) BuildDbContextAnd /// The type of the DbContext to configure. /// The service collection to which the DbContext is added. /// The configurator responsible for setting up the DbContext options. - /// A tuple containing the scoped IServiceProvider, the configured TDbContext, and the root IServiceProvider. - public static (IServiceProvider, TDbContext, IServiceProvider) BuildDbContextAndServiceProvider(IServiceCollection serviceCollection, - IDbContextConfigurator dbContextConfigurator) - where TDbContext : DbContext + /// + /// A tuple containing the root IServiceProvider (RootProvider), the scoped IServiceProvider (ScopedProvider), the configured TDbContext ( + /// DbContext). + /// + public static (IServiceProvider RootProvider, IServiceProvider ScopedProvider, TDbContext DbContext) BuildDbContextAndServiceProvider( + IServiceCollection serviceCollection, + IDbContextConfigurator dbContextConfigurator) where TDbContext : DbContext { serviceCollection.AddDbContext(dbContextConfigurator.Configure); + serviceCollection.AddDbContextFactory(dbContextConfigurator.Configure); return CreateProviderAndPrepareDbContext(serviceCollection); } - private static (IServiceProvider, TDbContext, IServiceProvider) CreateProviderAndPrepareDbContext(IServiceCollection serviceCollection) where TDbContext : DbContext + private static (IServiceProvider RootProvider, IServiceProvider ScopedProvider, TDbContext DbContext) + CreateProviderAndPrepareDbContext(IServiceCollection serviceCollection) where TDbContext : DbContext { var serviceProvider = serviceCollection.BuildServiceProvider(); var scope = serviceProvider.CreateScope(); @@ -60,6 +65,6 @@ private static (IServiceProvider, TDbContext, IServiceProvider) CreateProviderAn // share the same DbContext instance (and its change tracker). // The shared connection in SqLiteDbContextConfigurator ensures all DbContext instances // (including those in UnitOfWork child scopes) access the same in-memory database. - return (scope.ServiceProvider, testDbContext, serviceProvider); + return (serviceProvider, scope.ServiceProvider, testDbContext); } } diff --git a/src/Data.EFCore.IntegrationTesting/Ploch.Data.EFCore.IntegrationTesting.csproj b/src/Data.EFCore.IntegrationTesting/Ploch.Data.EFCore.IntegrationTesting.csproj index d35f343..55b82ca 100644 --- a/src/Data.EFCore.IntegrationTesting/Ploch.Data.EFCore.IntegrationTesting.csproj +++ b/src/Data.EFCore.IntegrationTesting/Ploch.Data.EFCore.IntegrationTesting.csproj @@ -2,9 +2,6 @@ $(TargetFrameworkVersions) - enable - enable - AnyCPU;x64 diff --git a/src/Data.EFCore.SqLite/Ploch.Data.EFCore.SqLite.csproj b/src/Data.EFCore.SqLite/Ploch.Data.EFCore.SqLite.csproj index d3fba2a..0da7c9b 100644 --- a/src/Data.EFCore.SqLite/Ploch.Data.EFCore.SqLite.csproj +++ b/src/Data.EFCore.SqLite/Ploch.Data.EFCore.SqLite.csproj @@ -2,9 +2,6 @@ $(TargetFrameworkVersions) - enable - enable - AnyCPU;x64 diff --git a/src/Data.EFCore.SqlServer/Ploch.Data.EFCore.SqlServer.csproj b/src/Data.EFCore.SqlServer/Ploch.Data.EFCore.SqlServer.csproj index 7c481ae..e7c1c91 100644 --- a/src/Data.EFCore.SqlServer/Ploch.Data.EFCore.SqlServer.csproj +++ b/src/Data.EFCore.SqlServer/Ploch.Data.EFCore.SqlServer.csproj @@ -2,9 +2,6 @@ $(TargetFrameworkVersions) - enable - enable - AnyCPU;x64 diff --git a/src/Data.EFCore/Ploch.Data.EFCore.csproj b/src/Data.EFCore/Ploch.Data.EFCore.csproj index 92f5fe6..c6493ce 100644 --- a/src/Data.EFCore/Ploch.Data.EFCore.csproj +++ b/src/Data.EFCore/Ploch.Data.EFCore.csproj @@ -2,7 +2,6 @@ $(TargetFrameworkVersions) - AnyCPU;x64 diff --git a/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/GenericRepositoryDataIntegrationTest.cs b/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/GenericRepositoryDataIntegrationTest.cs index d41422b..4323e79 100644 --- a/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/GenericRepositoryDataIntegrationTest.cs +++ b/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/GenericRepositoryDataIntegrationTest.cs @@ -13,8 +13,7 @@ namespace Ploch.Data.GenericRepository.EFCore.IntegrationTesting; /// /// The data context type. public abstract class GenericRepositoryDataIntegrationTest(IDbContextConfigurator? dbContextConfigurator = null) - : DataIntegrationTest(dbContextConfigurator) - where TDbContext : DbContext + : DataIntegrationTest(dbContextConfigurator) where TDbContext : DbContext { /// /// Configures the required services for the test. @@ -31,44 +30,78 @@ protected override void ConfigureServices(IServiceCollection services) /// /// Creates a new unit of work. /// + /// + /// to resolve from ; + /// otherwise resolve from . + /// /// The unit of work. - protected IUnitOfWork CreateUnitOfWork() => ServiceProvider.GetRequiredService(); + protected IUnitOfWork CreateUnitOfWork(bool useScopedProvider = true) => GetServiceProvider(useScopedProvider).GetRequiredService(); + + /// + /// Creates an instance of . + /// + /// The entity type. + /// + /// to resolve from ; + /// otherwise resolve from . + /// + /// An instance of . + protected IQueryableRepository CreateQueryableRepository(bool useScopedProvider = true) where TEntity : class => + GetServiceProvider(useScopedProvider).GetRequiredService>(); /// /// Creates an instance of . /// /// The entity type. /// The identifier type. + /// + /// to resolve from ; + /// otherwise resolve from . + /// /// An instance of a . [SuppressMessage("Style", "VSTHRD200:Use \"Async\" suffix for async methods", Justification = "The type name created ends with Async hence the name.")] - protected IReadRepositoryAsync CreateReadRepositoryAsync() - where TEntity : class, IHasId => ServiceProvider.GetRequiredService>(); + protected IReadRepositoryAsync CreateReadRepositoryAsync(bool useScopedProvider = true) where TEntity : class, IHasId => + GetServiceProvider(useScopedProvider).GetRequiredService>(); /// /// Creates a . /// /// The entity type. /// The identifier type. + /// + /// to resolve from ; + /// otherwise resolve from . + /// /// An instance of . - protected IReadRepository CreateReadRepository() - where TEntity : class, IHasId => ServiceProvider.GetRequiredService>(); + protected IReadRepository CreateReadRepository(bool useScopedProvider = true) where TEntity : class, IHasId => + GetServiceProvider(useScopedProvider).GetRequiredService>(); /// /// Creates a . /// /// The entity type. /// The identifier type. + /// + /// to resolve from ; + /// otherwise resolve from . + /// /// An instance of . - protected IReadWriteRepository CreateReadWriteRepository() - where TEntity : class, IHasId => ServiceProvider.GetRequiredService>(); + protected IReadWriteRepository CreateReadWriteRepository(bool useScopedProvider = true) where TEntity : class, IHasId => + GetServiceProvider(useScopedProvider).GetRequiredService>(); /// /// Creates a . /// /// The entity type. /// The identifier type. + /// + /// to resolve from ; + /// otherwise resolve from . + /// /// An instance of . [SuppressMessage("Style", "VSTHRD200:Use \"Async\" suffix for async methods", Justification = "The type name created ends with Async hence the name.")] - protected IReadWriteRepositoryAsync CreateReadWriteRepositoryAsync() - where TEntity : class, IHasId => ServiceProvider.GetRequiredService>(); + protected IReadWriteRepositoryAsync CreateReadWriteRepositoryAsync(bool useScopedProvider = true) where TEntity : class, IHasId => + GetServiceProvider(useScopedProvider).GetRequiredService>(); + + private IServiceProvider GetServiceProvider(bool useScopedProvider) => useScopedProvider ? ScopedServiceProvider : RootServiceProvider; } diff --git a/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/Ploch.Data.GenericRepository.EFCore.IntegrationTesting.csproj b/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/Ploch.Data.GenericRepository.EFCore.IntegrationTesting.csproj index f1f8796..cec3dc9 100644 --- a/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/Ploch.Data.GenericRepository.EFCore.IntegrationTesting.csproj +++ b/src/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTesting/Ploch.Data.GenericRepository.EFCore.IntegrationTesting.csproj @@ -2,10 +2,7 @@ $(TargetFrameworkVersions) - enable - enable README.md - AnyCPU;x64 diff --git a/src/Data.Model/Ploch.Data.Model.csproj b/src/Data.Model/Ploch.Data.Model.csproj index 37ad2aa..4cfbf6a 100644 --- a/src/Data.Model/Ploch.Data.Model.csproj +++ b/src/Data.Model/Ploch.Data.Model.csproj @@ -1,11 +1,9 @@  - enable 12 netstandard2.0 README.md - AnyCPU;x64 diff --git a/src/Data.StandardDataSets/Ploch.Data.StandardDataSets.csproj b/src/Data.StandardDataSets/Ploch.Data.StandardDataSets.csproj index 669148b..9f5c4f4 100644 --- a/src/Data.StandardDataSets/Ploch.Data.StandardDataSets.csproj +++ b/src/Data.StandardDataSets/Ploch.Data.StandardDataSets.csproj @@ -2,7 +2,6 @@ netstandard2.0 - AnyCPU;x64 diff --git a/src/Data.Utilities/DataColumnExtensions.cs b/src/Data.Utilities/DataColumnExtensions.cs index cd1e437..1201e02 100644 --- a/src/Data.Utilities/DataColumnExtensions.cs +++ b/src/Data.Utilities/DataColumnExtensions.cs @@ -1,4 +1,5 @@ -using System.Data; +using System; +using System.Data; namespace Ploch.Data.Utilities; @@ -12,8 +13,21 @@ public static class DataColumnExtensions /// /// The from which to copy properties. /// The to which properties will be copied. + /// + /// Thrown when or is . + /// public static void CopyProperties(this DataColumn sourceColumn, DataColumn targetColumn) { + if (sourceColumn is null) + { + throw new ArgumentNullException(nameof(sourceColumn)); + } + + if (targetColumn is null) + { + throw new ArgumentNullException(nameof(targetColumn)); + } + targetColumn.AllowDBNull = sourceColumn.AllowDBNull; targetColumn.AutoIncrement = sourceColumn.AutoIncrement; targetColumn.Caption = sourceColumn.Caption; diff --git a/tests/.editorconfig b/tests/.editorconfig index b017f4c..3f0b83f 100644 --- a/tests/.editorconfig +++ b/tests/.editorconfig @@ -28,6 +28,7 @@ dotnet_diagnostic.IDE0052.severity = warning # Remove unused private members # SonarAnalyzer settings dotnet_diagnostic.s4487.severity = none # Unread "private" fields should be removed +dotnet_diagnostic.s4261.severity = none # Methods that return Task should end with "Async" - not applicable to xUnit test methods whose names follow the Method_should_do_X convention # StyleCop settings dotnet_diagnostic.sa0001.severity = none # XML comment analysis is disabled due to project configuration diff --git a/tests/Data.EFCore.SqLite.Tests/Ploch.Data.EFCore.SqLite.Tests.csproj b/tests/Data.EFCore.SqLite.Tests/Ploch.Data.EFCore.SqLite.Tests.csproj index 0fb6b7d..3c4f73e 100644 --- a/tests/Data.EFCore.SqLite.Tests/Ploch.Data.EFCore.SqLite.Tests.csproj +++ b/tests/Data.EFCore.SqLite.Tests/Ploch.Data.EFCore.SqLite.Tests.csproj @@ -2,16 +2,7 @@ $(TargetFrameworkVersion) - enable - enable - - false - true Exe - AnyCPU;x64 - - - false diff --git a/tests/Data.EFCore.SqLite.Tests/SqLiteConnectionOptionsTests.cs b/tests/Data.EFCore.SqLite.Tests/SqLiteConnectionOptionsTests.cs new file mode 100644 index 0000000..ec835d3 --- /dev/null +++ b/tests/Data.EFCore.SqLite.Tests/SqLiteConnectionOptionsTests.cs @@ -0,0 +1,67 @@ +using FluentAssertions; +using Microsoft.Data.Sqlite; +using Xunit; + +namespace Ploch.Data.EFCore.SqLite.Tests; + +public class SqLiteConnectionOptionsTests +{ + [Fact] + public void InMemory_should_return_options_with_memory_datasource() + { + var options = SqLiteConnectionOptions.InMemory; + var connectionString = options.BuildConnectionString(); + + connectionString.Should().Contain("Data Source=:memory:"); + } + + [Fact] + public void UsingFile_should_return_options_with_specified_datasource() + { + var dbPath = "test.db"; + var options = SqLiteConnectionOptions.UsingFile(dbPath); + var connectionString = options.BuildConnectionString(); + + connectionString.Should().Contain($"Data Source={dbPath}"); + } + + [Fact] + public void FromConnectionString_should_return_options_with_specified_connection_string() + { + var connectionString = "Data Source=test_cs.db;Mode=ReadOnly"; + var options = SqLiteConnectionOptions.FromConnectionString(connectionString); + + // Microsoft.Data.Sqlite.SqliteConnectionStringBuilder.ToString() does not preserve + // original keyword casing/order, so assert semantic equivalence via its own parser + // rather than string equality against the input. + var builtFromOptions = new SqliteConnectionStringBuilder(options.BuildConnectionString()); + var expectedBuilder = new SqliteConnectionStringBuilder(connectionString); + + builtFromOptions.DataSource.Should().Be(expectedBuilder.DataSource); + builtFromOptions.Mode.Should().Be(expectedBuilder.Mode); + } + + [Fact] + public void Constructor_with_action_should_apply_action_to_builder() + { + var options = new SqLiteConnectionOptions(builder => + { + builder.DataSource = "custom.db"; + builder.Mode = SqliteOpenMode.ReadWriteCreate; + }); + + var connectionString = options.BuildConnectionString(); + connectionString.Should().Contain("Data Source=custom.db"); + connectionString.Should().Contain("Mode=ReadWriteCreate"); + } + + [Fact] + public void BuildConnectionString_should_return_consistent_string() + { + var options = SqLiteConnectionOptions.InMemory; + var cs1 = options.BuildConnectionString(); + var cs2 = options.BuildConnectionString(); + + cs1.Should().Be(cs2); + } +} diff --git a/tests/Data.EFCore.SqlServer.Tests/Ploch.Data.EFCore.SqlServer.Tests.csproj b/tests/Data.EFCore.SqlServer.Tests/Ploch.Data.EFCore.SqlServer.Tests.csproj index b1899e8..3946fb0 100644 --- a/tests/Data.EFCore.SqlServer.Tests/Ploch.Data.EFCore.SqlServer.Tests.csproj +++ b/tests/Data.EFCore.SqlServer.Tests/Ploch.Data.EFCore.SqlServer.Tests.csproj @@ -2,17 +2,7 @@ $(TargetFrameworkVersion) - enable - enable - - false - true Exe - AnyCPU;x64 - - - - false diff --git a/tests/Data.EFCore.Tests/GetStaticPropertyValueTests.cs b/tests/Data.EFCore.Tests/GetStaticPropertyValueTests.cs new file mode 100644 index 0000000..db41715 --- /dev/null +++ b/tests/Data.EFCore.Tests/GetStaticPropertyValueTests.cs @@ -0,0 +1,56 @@ +using FluentAssertions; + +namespace Ploch.Data.EFCore.Tests; + +public class GetStaticPropertyValueTests +{ + [Fact] + public void GetStaticPropertyValue_should_return_value_of_public_static_property() + { + var result = typeof(ClassWithStaticProperties).GetStaticPropertyValue("PublicValue"); + + result.Should().Be("public-value"); + } + + [Fact] + public void GetStaticPropertyValue_should_return_value_of_private_static_property() + { + var result = typeof(ClassWithStaticProperties).GetStaticPropertyValue("PrivateValue"); + + result.Should().Be(42); + } + + [Fact] + public void GetStaticPropertyValue_should_throw_when_property_not_found() + { + var act = () => typeof(ClassWithStaticProperties).GetStaticPropertyValue("NonExistent"); + + act.Should().Throw().WithMessage("*'NonExistent'*not found*"); + } + + [Fact] + public void GetStaticPropertyValue_should_return_default_when_property_value_is_null() + { + var result = typeof(ClassWithStaticProperties).GetStaticPropertyValue("NullValue"); + + result.Should().BeNull(); + } + + [Fact] + public void GetStaticPropertyValue_should_throw_when_property_type_does_not_match() + { + var act = () => typeof(ClassWithStaticProperties).GetStaticPropertyValue("PublicValue"); + + act.Should().Throw().WithMessage("*not of*type*"); + } + + private class ClassWithStaticProperties + { + public static string PublicValue { get; } = "public-value"; + + public static string? NullValue { get; } = null; + + // ReSharper disable once UnusedMember.Local + private static int PrivateValue { get; } = 42; + } +} diff --git a/tests/Data.EFCore.Tests/Ploch.Data.EFCore.Tests.csproj b/tests/Data.EFCore.Tests/Ploch.Data.EFCore.Tests.csproj index 638493f..bedf456 100644 --- a/tests/Data.EFCore.Tests/Ploch.Data.EFCore.Tests.csproj +++ b/tests/Data.EFCore.Tests/Ploch.Data.EFCore.Tests.csproj @@ -2,18 +2,7 @@ $(TargetFrameworkVersion) - enable - enable - - false - true - true Exe - AnyCPU;x64 - - - - false diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/GlobalUsings.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/GlobalUsings.cs index 7b25ddd..26327b5 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/GlobalUsings.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/GlobalUsings.cs @@ -1,2 +1,4 @@ global using FluentAssertions; +global using Ploch.Data.EFCore.IntegrationTesting; +global using Ploch.Data.EFCore.IntegrationTesting.FluentAssertions; global using Xunit; diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/Ploch.Data.GenericRepository.EFCore.IntegrationTests.csproj b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/Ploch.Data.GenericRepository.EFCore.IntegrationTests.csproj index c859623..60ee4d8 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/Ploch.Data.GenericRepository.EFCore.IntegrationTests.csproj +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/Ploch.Data.GenericRepository.EFCore.IntegrationTests.csproj @@ -2,12 +2,6 @@ $(TargetFrameworkVersion) - enable - enable - - false - true - true Exe $(NoWarn);VSTHRD200 @@ -15,6 +9,7 @@ + diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/QueryableRepositoryTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/QueryableRepositoryTests.cs new file mode 100644 index 0000000..19be658 --- /dev/null +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/QueryableRepositoryTests.cs @@ -0,0 +1,123 @@ +using Ploch.Data.GenericRepository.EFCore.IntegrationTesting; +using Ploch.Data.GenericRepository.EFCore.IntegrationTests.Model; + +namespace Ploch.Data.GenericRepository.EFCore.IntegrationTests; + +public class QueryableRepositoryTests : GenericRepositoryDataIntegrationTest +{ + [Fact] + public async Task Entities_should_return_queryable_of_all_entities() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "First" }); + await repository.AddAsync(new() { Id = 2, Name = "Second" }); + await unitOfWork.CommitAsync(); + + var queryableRepo = CreateQueryableRepository(); + var entities = queryableRepo.Entities.ToArray(); + + entities.Should().HaveCount(2); + } + + [Fact] + public async Task GetPageQuery_should_return_paged_queryable() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + for (var i = 1; i <= 15; i++) + { + await repository.AddAsync(new() { Id = i, Name = $"Entity{i:D2}" }); + } + + await unitOfWork.CommitAsync(); + + var queryableRepo = CreateQueryableRepository(); + var pageQuery = queryableRepo.GetPageQuery(2, 5); + + var result = pageQuery.ToList(); + result.Should().HaveCount(5); + } + + [Fact] + public async Task GetPageQuery_with_sort_should_order_results() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Charlie" }); + await repository.AddAsync(new() { Id = 2, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 3, Name = "Bravo" }); + await unitOfWork.CommitAsync(); + + var queryableRepo = CreateQueryableRepository(); + var pageQuery = queryableRepo.GetPageQuery(1, 3, e => e.Name); + + var result = pageQuery.ToList(); + result.Should().HaveCount(3); + result[0].Name.Should().Be("Alpha"); + result[1].Name.Should().Be("Bravo"); + result[2].Name.Should().Be("Charlie"); + } + + [Fact] + public async Task GetPageQuery_with_query_filter_should_filter_results() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + for (var i = 1; i <= 10; i++) + { + await repository.AddAsync(new() { Id = i, Name = $"Entity{i}" }); + } + + await unitOfWork.CommitAsync(); + + var queryableRepo = CreateQueryableRepository(); + var pageQuery = queryableRepo.GetPageQuery(1, 10, query: e => e.Id > 5); + + var result = pageQuery.ToList(); + result.Should().HaveCount(5); + result.Should().OnlyContain(e => e.Id > 5); + } + + [Fact] + public async Task GetPageQuery_with_onDbSet_should_apply_custom_query() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + for (var i = 1; i <= 10; i++) + { + await repository.AddAsync(new() { Id = i, Name = $"Entity{i}" }); + } + + await unitOfWork.CommitAsync(); + + var queryableRepo = CreateQueryableRepository(); + + // Request all 10 items in one page, but the onDbSet filter limits to IDs <= 3 + var pageQuery = queryableRepo.GetPageQuery(1, 10, onDbSet: q => q.Where(e => e.Id <= 3)); + + var result = pageQuery.ToList(); + result.Should().HaveCount(3); + result.Should().OnlyContain(e => e.Id <= 3); + } + + [Fact] + public void GetPageQuery_should_throw_when_page_number_is_zero() + { + var queryableRepo = CreateQueryableRepository(); + + var act = () => queryableRepo.GetPageQuery(0, 5); + + act.Should().Throw(); + } + + [Fact] + public void GetPageQuery_should_throw_when_page_size_is_zero() + { + var queryableRepo = CreateQueryableRepository(); + + var act = () => queryableRepo.GetPageQuery(1, 0); + + act.Should().Throw(); + } +} diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadRepositoryTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadRepositoryTests.cs index e3d1a5d..859451e 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadRepositoryTests.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadRepositoryTests.cs @@ -16,24 +16,24 @@ public async Task GetAll_should_return_entities_with_includes() await unitOfWork.CommitAsync(); var repository = CreateReadRepository(); - var blogPosts = repository.GetAll(query => query.Include(e => e.Tags)); + var blogPosts = repository.GetAll(query => query.Include(e => e.Tags).Include(e => e.Categories).ThenInclude(c => c.Children)); blogPosts.Should().HaveCount(2); - blogPosts.Should() - .ContainEquivalentOf(blogPost1, - options => options.Excluding(p => p.Categories) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + var actualPost1 = blogPosts.Single(p => p.Id == blogPost1.Id); + + actualPost1.Should() + .BeEquivalentTo(blogPost1, + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .WithEntityEquivalencyOptions()); + blogPosts.Should() .ContainEquivalentOf(blogPost2, - options => options.Excluding(p => p.Categories) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .WithEntityEquivalencyOptions()); foreach (var blogPost in blogPosts) { blogPost.Tags.Should().NotBeEmpty(); - blogPost.Tags.Should().NotBeEmpty(); } } @@ -71,11 +71,7 @@ public async Task GetById_should_return_entity_with_includes() var repository = CreateReadRepository(); var blogPost = repository.GetById(blogPost2.Id, query => query.Include(e => e.Tags)); - blogPost.Should() - .BeEquivalentTo(blogPost2, - options => options.IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + blogPost.Should().BeEquivalentTo(blogPost2, options => options.WithEntityEquivalencyOptions()); blogPost2.Tags.Should().NotBeEmpty(); } @@ -90,13 +86,7 @@ public async Task GetById_with_object_key_should_return_entity_with_includes() var repository = CreateReadRepository(); var blogPost = repository.GetById([ blogPost2.Id ]); - blogPost.Should() - .BeEquivalentTo(blogPost2, - options => options.Excluding(p => p.Categories) - .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + blogPost.Should().BeEquivalentTo(blogPost2, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags).WithEntityEquivalencyOptions()); blogPost2.Tags.Should().NotBeEmpty(); } @@ -117,10 +107,12 @@ public async Task GetPage_should_return_a_page_of_entities_with_includes() for (var i = 5; i <= 9; i++) { var blogPost = posts[i]; - var queriedPost = blogPosts.Should().ContainEquivalentOf(blogPost, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags)).Subject; - queriedPost.Tags.Should().BeEquivalentTo(blogPost.Tags, options => options.Excluding(t => t.BlogPosts)); - queriedPost.Categories.Should().HaveCount(blogPost.Categories.Count); - queriedPost.Categories.Should().BeEquivalentTo(blogPost.Categories, options => options.Excluding(c => c.BlogPosts).Excluding(c => c.Parent).Excluding(c => c.Children)); + blogPosts.Should() + .ContainEquivalentOf(blogPost, + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .Excluding(member => member.Path.EndsWith(".Children")) + .WithEntityEquivalencyOptions()); } } @@ -140,17 +132,22 @@ public async Task GetPage_should_return_a_page_of_entities_with_includes_using_q query => query.Name == "Blog post 5" || query.Name == "Blog post 6" || query.Name == "Blog post 7" || query.Name == "Blog post 8" || query.Name == "Blog post 9" || query.Name == "Blog post 10", #pragma warning restore SA1117 - query => query.Include(e => e.Tags).Include(e => e.Categories)); + + // Explicit OrderBy so page contents are deterministic — without it, the + // DB may return filtered rows in any order and the index-based assertion below would be flaky. + query => query.OrderBy(e => e.Id).Include(e => e.Tags).Include(e => e.Categories)); blogPosts.Should().HaveCount(3); for (var i = 7; i <= 9; i++) { var blogPost = posts[i]; - var queriedPost = blogPosts.Should().ContainEquivalentOf(blogPost, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags)).Subject; - queriedPost.Tags.Should().BeEquivalentTo(blogPost.Tags, options => options.Excluding(t => t.BlogPosts)); - queriedPost.Categories.Should().HaveCount(blogPost.Categories.Count); - queriedPost.Categories.Should().BeEquivalentTo(blogPost.Categories, options => options.Excluding(c => c.BlogPosts).Excluding(c => c.Parent).Excluding(c => c.Children)); + blogPosts.Should() + .ContainEquivalentOf(blogPost, + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .Excluding(member => member.Path.EndsWith(".Children")) + .WithEntityEquivalencyOptions()); } } @@ -164,7 +161,10 @@ public async Task GetPage_should_return_a_page_of_entities_without_includes() await unitOfWork.CommitAsync(); var repository = CreateReadRepository(); - var blogPosts = repository.GetPage(2, 5); + + // Explicit OrderBy so the page contents are deterministic and posts[i + 5] below + // reliably match the returned slice. + var blogPosts = repository.GetPage(2, 5, onDbSet: q => q.OrderBy(e => e.Id)); blogPosts.Should().HaveCount(5); @@ -205,13 +205,7 @@ public async Task Find_should_query_repository_for_first_entity_and_return_it() var blogPost = repository.FindFirst(post => post.Name.Contains("Blog post 1")); blogPost.Should().NotBeNull(); - blogPost.Should() - .BeEquivalentTo(testBlogEntities.blogPost1, - options => options.Excluding(p => p.Categories) - .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + blogPost.Should().BeEquivalentTo(testBlogEntities.blogPost1, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags).WithEntityEquivalencyOptions()); } [Fact] diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncAdditionalTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncAdditionalTests.cs new file mode 100644 index 0000000..14e05e5 --- /dev/null +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncAdditionalTests.cs @@ -0,0 +1,288 @@ +using Microsoft.EntityFrameworkCore; +using Ploch.Data.GenericRepository.EFCore.IntegrationTesting; +using Ploch.Data.GenericRepository.EFCore.IntegrationTests.Model; + +namespace Ploch.Data.GenericRepository.EFCore.IntegrationTests; + +public class ReadWriteRepositoryAsyncAdditionalTests : GenericRepositoryDataIntegrationTest +{ + [Fact] + public async Task DeleteAsync_by_id_should_remove_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "ToDelete" }); + await unitOfWork.CommitAsync(); + + await repository.DeleteAsync(1); + await unitOfWork.CommitAsync(); + + // Verify via a fresh DbContext rather than the repository under test, + // so the assertion cannot be served from the repository's change tracker. + await using var rootDbContext = CreateRootDbContext(); + var result = await rootDbContext.Set().FindAsync(1); + result.Should().BeNull(); + } + + [Fact] + public async Task DeleteAsync_by_id_should_throw_EntityNotFoundException_when_entity_does_not_exist() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + + var act = async () => await repository.DeleteAsync(999); + + await act.Should().ThrowAsync(); + } + + [Fact] + public async Task DeleteAsync_by_entity_should_remove_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + var entity = new TestEntity { Id = 1, Name = "ToDelete" }; + await repository.AddAsync(entity); + await unitOfWork.CommitAsync(); + + await repository.DeleteAsync(entity); + await unitOfWork.CommitAsync(); + + await using var rootDbContext = CreateRootDbContext(); + var result = await rootDbContext.Set().FindAsync(entity.Id); + result.Should().BeNull(); + } + + [Fact] + public async Task UpdateAsync_should_throw_EntityNotFoundException_when_entity_does_not_exist() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + var entity = new TestEntity { Id = 999, Name = "NonExistent" }; + + var act = async () => await repository.UpdateAsync(entity); + + await act.Should().ThrowAsync().Where(e => e.Message.Contains("not found")); + } + + [Fact] + public async Task UpdateAsync_should_update_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Original" }); + await unitOfWork.CommitAsync(); + + var updatedEntity = new TestEntity { Id = 1, Name = "Updated" }; + await repository.UpdateAsync(updatedEntity); + await unitOfWork.CommitAsync(); + + await using var rootDbContext = CreateRootDbContext(); + var result = await rootDbContext.Set().FindAsync(1); + result.Should().NotBeNull(); + result!.Name.Should().Be("Updated"); + } + + [Fact] + public async Task AddRangeAsync_should_add_multiple_entities() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + var entities = new List { new() { Id = 1, Name = "First" }, new() { Id = 2, Name = "Second" }, new() { Id = 3, Name = "Third" } }; + + var result = await repository.AddRangeAsync(entities); + await unitOfWork.CommitAsync(); + + result.Should().HaveCount(3); + var all = await repository.GetAllAsync(); + all.Should().HaveCount(3); + } + + [Fact] + public async Task GetByIdAsync_with_onDbSet_should_return_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "WithOnDbSet" }); + await unitOfWork.CommitAsync(); + + var result = await repository.GetByIdAsync(1, q => q.Where(e => e.Name.Contains("WithOnDbSet"))); + + result.Should().NotBeNull(); + result!.Name.Should().Be("WithOnDbSet"); + } + + [Fact] + public async Task GetByIdAsync_with_onDbSet_should_return_null_when_filter_excludes_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Excluded" }); + await unitOfWork.CommitAsync(); + + var result = await repository.GetByIdAsync(1, q => q.Where(e => e.Name == "NonExistent")); + + result.Should().BeNull(); + } + + [Fact] + public async Task GetByIdAsync_with_keyValues_should_return_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "KeyValueFind" }); + await unitOfWork.CommitAsync(); + + var readRepo = CreateReadRepositoryAsync(); + var result = await readRepo.GetByIdAsync([ 1 ]); + + result.Should().NotBeNull(); + result!.Name.Should().Be("KeyValueFind"); + } + + [Fact] + public async Task GetAllAsync_with_query_filter_should_return_filtered_entities() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 2, Name = "Beta" }); + await repository.AddAsync(new() { Id = 3, Name = "AlphaTwo" }); + await unitOfWork.CommitAsync(); + + var result = await repository.GetAllAsync(e => e.Name.Contains("Alpha")); + + result.Should().HaveCount(2); + } + + [Fact] + public async Task GetAllAsync_with_onDbSet_should_apply_custom_query() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "First" }); + await repository.AddAsync(new() { Id = 2, Name = "Second" }); + await unitOfWork.CommitAsync(); + + var result = await repository.GetAllAsync(onDbSet: q => q.OrderByDescending(e => e.Name)); + + result.Should().HaveCount(2); + result[0].Name.Should().Be("Second"); + } + + [Fact] + public async Task FindFirstAsync_should_return_first_matching_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 2, Name = "Beta" }); + await unitOfWork.CommitAsync(); + + var result = await repository.FindFirstAsync(e => e.Name == "Beta"); + + result.Should().NotBeNull(); + result!.Name.Should().Be("Beta"); + } + + [Fact] + public async Task FindFirstAsync_with_onDbSet_should_apply_custom_query() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 2, Name = "Beta" }); + await unitOfWork.CommitAsync(); + + // Predicate matches multiple rows so the onDbSet ordering is observable: + // both "Alpha" and "Beta" contain 'a', OrderByDescending(Name) should yield "Beta" first. + var result = await repository.FindFirstAsync(e => e.Name.Contains('a'), q => q.OrderByDescending(e => e.Name)); + + result.Should().NotBeNull(); + result!.Name.Should().Be("Beta"); + } + + [Fact] + public async Task FindFirstAsync_should_return_null_when_no_match() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await unitOfWork.CommitAsync(); + + var result = await repository.FindFirstAsync(e => e.Name == "NonExistent"); + + result.Should().BeNull(); + } + + [Fact] + public async Task CountAsync_with_filter_should_return_filtered_count() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 2, Name = "Beta" }); + await repository.AddAsync(new() { Id = 3, Name = "AlphaTwo" }); + await unitOfWork.CommitAsync(); + + var readRepo = CreateReadRepositoryAsync(); + var count = await readRepo.CountAsync(e => e.Name.Contains("Alpha")); + + count.Should().Be(2); + } + + [Fact] + public async Task CountAsync_without_filter_should_return_total_count() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + await repository.AddAsync(new() { Id = 1, Name = "Alpha" }); + await repository.AddAsync(new() { Id = 2, Name = "Beta" }); + await unitOfWork.CommitAsync(); + + var readRepo = CreateReadRepositoryAsync(); + var count = await readRepo.CountAsync(); + + count.Should().Be(2); + } + + [Fact] + public async Task GetPageAsync_should_return_paged_results() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + for (var i = 1; i <= 10; i++) + { + await repository.AddAsync(new() { Id = i, Name = $"Entity{i}" }); + } + + await unitOfWork.CommitAsync(); + + // Explicit sort on Id so the page contents are deterministic — without a sort, + // a regression that always returns the first three rows would still pass. + var readRepo = CreateReadRepositoryAsync(); + var page = await readRepo.GetPageAsync(2, 3, e => e.Id); + + page.Should().HaveCount(3); + page.Select(e => e.Id).Should().Equal(4, 5, 6); + } + + [Fact] + public async Task GetPageAsync_with_sort_and_query_should_return_filtered_sorted_results() + { + using var unitOfWork = CreateUnitOfWork(); + var repository = unitOfWork.Repository(); + for (var i = 1; i <= 10; i++) + { + await repository.AddAsync(new() { Id = i, Name = $"Entity{i}" }); + } + + await unitOfWork.CommitAsync(); + + var readRepo = CreateReadRepositoryAsync(); + var page = await readRepo.GetPageAsync(1, 10, e => e.Name, e => e.Id > 7); + + page.Should().HaveCount(3); + page.Should().OnlyContain(e => e.Id > 7); + page.Select(e => e.Name).Should().BeInAscendingOrder(); + } +} diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncTests.cs index ae42386..fcacc22 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncTests.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryAsyncTests.cs @@ -57,20 +57,13 @@ public async Task GetAllAsync_should_return_entities_with_includes() blogPosts.Should().HaveCount(2); blogPosts.Should() .ContainEquivalentOf(blogPost1, - options => options.Excluding(p => p.Categories) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(p => p.Categories).WithEntityEquivalencyOptions()); blogPosts.Should() .ContainEquivalentOf(blogPost2, - options => options.Excluding(p => p.Categories) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(p => p.Categories).WithEntityEquivalencyOptions()); foreach (var blogPost in blogPosts) { blogPost.Tags.Should().NotBeEmpty(); - blogPost.Tags.Should().NotBeEmpty(); } } @@ -107,17 +100,21 @@ public async Task GetPageAsync_should_return_a_page_of_entities_with_includes() await unitOfWork.CommitAsync(); var repository = CreateReadRepositoryAsync(); - var blogPosts = await repository.GetPageAsync(2, 5, onDbSet: query => query.Include(e => e.Tags).Include(e => e.Categories)); + + // Explicit OrderBy so the page contents are deterministic — without it, the DB may return rows in any order. + var blogPosts = await repository.GetPageAsync(2, 5, onDbSet: query => query.OrderBy(e => e.Id).Include(e => e.Tags).Include(e => e.Categories)); blogPosts.Should().HaveCount(5); for (var i = 0; i < 5; i++) { var blogPost = posts[i + 5]; - var queriedPost = blogPosts.Should().ContainEquivalentOf(blogPost, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags)).Subject; - queriedPost.Tags.Should().BeEquivalentTo(blogPost.Tags, options => options.Excluding(t => t.BlogPosts)); - queriedPost.Categories.Should().HaveCount(blogPost.Categories.Count); - queriedPost.Categories.Should().BeEquivalentTo(blogPost.Categories, options => options.Excluding(c => c.BlogPosts).Excluding(c => c.Parent).Excluding(c => c.Children)); + blogPosts.Should() + .ContainEquivalentOf(blogPost, + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .Excluding(member => member.Path.EndsWith(".Children")) + .WithEntityEquivalencyOptions()); } } @@ -138,17 +135,21 @@ public async Task GetPageAsync_should_return_a_page_of_entities_with_includes_us query: query => query.Name == "Blog post 5" || query.Name == "Blog post 6" || query.Name == "Blog post 7" || query.Name == "Blog post 8" || query.Name == "Blog post 9" || query.Name == "Blog post 10", #pragma warning restore SA1117 - onDbSet: query => query.Include(e => e.Tags).Include(e => e.Categories)); + + // Explicit OrderBy so the filtered page is deterministic. + onDbSet: query => query.OrderBy(e => e.Id).Include(e => e.Tags).Include(e => e.Categories)); blogPosts.Should().HaveCount(3); for (var i = 7; i <= 9; i++) { var blogPost = posts[i]; - var queriedPost = blogPosts.Should().ContainEquivalentOf(blogPost, options => options.Excluding(p => p.Categories).Excluding(p => p.Tags)).Subject; - queriedPost.Tags.Should().BeEquivalentTo(blogPost.Tags, options => options.Excluding(t => t.BlogPosts)); - queriedPost.Categories.Should().HaveCount(blogPost.Categories.Count); - queriedPost.Categories.Should().BeEquivalentTo(blogPost.Categories, options => options.Excluding(c => c.BlogPosts).Excluding(c => c.Parent).Excluding(c => c.Children)); + blogPosts.Should() + .ContainEquivalentOf(blogPost, + options => options.Excluding(member => member.Path.EndsWith(".BlogPosts")) + .Excluding(member => member.Path.EndsWith(".Parent")) + .Excluding(member => member.Path.EndsWith(".Children")) + .WithEntityEquivalencyOptions()); } } @@ -162,7 +163,9 @@ public async Task GetPageAsync_should_return_a_page_of_entities_without_includes await unitOfWork.CommitAsync(); var repository = CreateReadRepositoryAsync(); - var blogPosts = await repository.GetPageAsync(2, 5); + + // Explicit OrderBy so posts[i + 5] reliably matches the returned slice. + var blogPosts = await repository.GetPageAsync(2, 5, onDbSet: q => q.OrderBy(e => e.Id)); blogPosts.Should().HaveCount(5); @@ -187,11 +190,7 @@ public async Task GetByIdAsync_should_return_entity_with_includes() var repository = CreateReadRepositoryAsync(); var blogPost = await repository.GetByIdAsync(blogPost2.Id, query => query.Include(e => e.Tags)); - blogPost.Should() - .BeEquivalentTo(blogPost2, - options => options.IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + blogPost.Should().BeEquivalentTo(blogPost2, options => options.WithEntityEquivalencyOptions()); blogPost!.Tags.Should().NotBeEmpty(); } @@ -210,9 +209,7 @@ public async Task GetByIdAsync_with_object_key_should_return_entity_with_include .BeEquivalentTo(blogPost2, options => options.Excluding(p => p.Categories) .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + .WithEntityEquivalencyOptions()); } [Fact] @@ -292,8 +289,6 @@ public async Task FindFirstAsync_should_execute_query_and_return_the_first_hit() .BeEquivalentTo(testBlogEntities.blogPost1, options => options.Excluding(p => p.Categories) .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + .WithEntityEquivalencyOptions()); } } diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryDeleteByIdTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryDeleteByIdTests.cs new file mode 100644 index 0000000..ea58a71 --- /dev/null +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ReadWriteRepositoryDeleteByIdTests.cs @@ -0,0 +1,98 @@ +using System.Globalization; +using Microsoft.EntityFrameworkCore; +using Microsoft.Extensions.DependencyInjection; +using Ploch.Data.GenericRepository.EFCore.IntegrationTesting; +using Ploch.Data.GenericRepository.EFCore.IntegrationTests.Model; + +namespace Ploch.Data.GenericRepository.EFCore.IntegrationTests; + +public class ReadWriteRepositoryDeleteByIdTests : GenericRepositoryDataIntegrationTest +{ + [Fact] + public async Task Delete_by_id_should_remove_entity() + { + const int idToDelete = 10; + + using var unitOfWork = CreateUnitOfWork(); + var asyncRepo = unitOfWork.Repository(); + await asyncRepo.AddAsync(new() { Id = idToDelete, Name = "ToDelete" }); + await unitOfWork.CommitAsync(); + + var repository = unitOfWork.Repository(); + await repository.DeleteAsync(idToDelete); + + // After committing, it should be gone from the database. + await unitOfWork.CommitAsync(); + + var anotherDbContext = RootServiceProvider.GetRequiredService(); + + var result = await anotherDbContext.TestEntities.FindAsync(idToDelete); + result.Should().BeNull(); + } + + [Fact] + public void Delete_by_id_should_throw_EntityNotFoundException_when_entity_does_not_exist() + { + const int nonExistingId = 999; + var repository = CreateReadWriteRepository(); + + var act = () => repository.Delete(nonExistingId); + + act.Should().Throw().Where(e => e.Message.Contains(nonExistingId.ToString(CultureInfo.InvariantCulture))); + } + + [Fact] + public void GetById_should_return_null_when_entity_does_not_exist() + { + var repository = CreateReadRepository(); + + var result = repository.GetById(999); + + result.Should().BeNull(); + } + + [Fact] + public async Task GetById_with_onDbSet_should_return_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var blogRepository1 = unitOfWork.Repository(); + var (blog, blogPost1, _) = await RepositoryHelper.AddTestBlogEntities(blogRepository1); + + await unitOfWork.CommitAsync(); + + var repository = CreateReadRepository(); + var result = repository.GetById(blog.Id, q => q.Include(q => q.BlogPosts).ThenInclude(bp => bp.Tags).Include(q => q.BlogPosts).ThenInclude(bp => bp.Categories)); + + var rootDbContext = CreateRootDbContext(); + + var resultFromDb = await rootDbContext.Blogs.Include(q => q.BlogPosts) + .ThenInclude(bp => bp.Tags) + .Include(q => q.BlogPosts) + .ThenInclude(bp => bp.Categories) + .FirstAsync(b => b.Id == blog.Id); + resultFromDb.Should().BeEquivalentTo(result, options => options.WithEntityEquivalencyOptions()); + result.Should().NotBeNull(); + result!.Id.Should().Be(blog.Id); + result.Name.Should().Be(blog.Name); + result.BlogPosts.Should().HaveCount(blog.BlogPosts.Count); + + // Verify eager-loading: Tags and Categories were included in the onDbSet query + var loadedPost1 = result.BlogPosts.Single(p => p.Name == blogPost1.Name); + loadedPost1.Tags.Should().HaveCount(blogPost1.Tags.Count); + loadedPost1.Categories.Should().HaveCount(blogPost1.Categories.Count); + } + + [Fact] + public async Task GetById_with_onDbSet_should_return_null_when_filter_excludes_entity() + { + using var unitOfWork = CreateUnitOfWork(); + var asyncRepo = unitOfWork.Repository(); + await asyncRepo.AddAsync(new() { Id = 1, Name = "Excluded" }); + await unitOfWork.CommitAsync(); + + var repository = CreateReadRepository(); + var result = repository.GetById(1, q => q.Where(e => e.Name == "NonExistent")); + + result.Should().BeNull(); + } +} diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/RepositoryHelper.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/RepositoryHelper.cs index e68c16e..dd624f8 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/RepositoryHelper.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/RepositoryHelper.cs @@ -2,8 +2,26 @@ namespace Ploch.Data.GenericRepository.EFCore.IntegrationTests; +/// +/// Helpers for seeding repository-backed integration-test fixtures with a known set of blog, blog-post, +/// tag, category, and user-idea entities. The helpers wrap so tests can +/// populate the database in a single call and get typed references back to the seeded entities for +/// later assertion. +/// public static class RepositoryHelper { + /// + /// Seeds a blog with two blog posts via the synchronous repository and returns the seeded entities. + /// + /// The synchronous to add the blog to. + /// + /// A tuple of the seeded and the two instances attached to it. + /// + /// + /// + /// var (blog, post1, post2) = RepositoryHelper.AddTestBlogEntities(blogRepository); + /// + /// public static (Blog, BlogPost, BlogPost) AddTestBlogEntities(IReadWriteRepository blogRepository) { var (blog, blogPost1, blogPost2) = EntitiesBuilder.BuildBlogEntity(); @@ -13,6 +31,28 @@ public static (Blog, BlogPost, BlogPost) AddTestBlogEntities(IReadWriteRepositor return (blog, blogPost1, blogPost2); } + /// + /// Seeds a blog with two blog posts via the asynchronous repository and returns the seeded entities. + /// + /// The asynchronous to add the blog to. + /// + /// A task that resolves to a tuple of the seeded and the two + /// instances attached to it. + /// + /// + /// + /// var (blog, post1, post2) = await RepositoryHelper.AddTestBlogEntities(blogRepository); + /// + /// + public static async Task<(Blog, BlogPost, BlogPost)> AddTestBlogEntities(IReadWriteRepositoryAsync blogRepository) + { + var (blog, blogPost1, blogPost2) = EntitiesBuilder.BuildBlogEntity(); + + await blogRepository.AddAsync(blog); + + return (blog, blogPost1, blogPost2); + } + public static IEnumerable AddTestUserIdeasEntities(IReadWriteRepository userIdeasRepository) { var (userIdea1, userIdea2) = EntitiesBuilder.BuildUserIdeaEntities(); @@ -62,4 +102,4 @@ public static async Task AddBlogPostTagsAsync(IReadWriteRepositor return tags; } -} \ No newline at end of file +} diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ServiceCollectionRegistrationsTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ServiceCollectionRegistrationsTests.cs index 5491e12..e143e8e 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ServiceCollectionRegistrationsTests.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/ServiceCollectionRegistrationsTests.cs @@ -61,7 +61,7 @@ public void AddRepositories_with_configuration_should_register_repositories() var configuration = new ConfigurationBuilder().AddInMemoryCollection(new Dictionary()).Build(); serviceCollection.AddRepositories(configuration); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); serviceProvider.GetRequiredService>().Should().BeOfType>(); } @@ -72,7 +72,7 @@ public void AddRepositories_should_register_repository_types_mapping_them_to_con var serviceCollection = new ServiceCollection(); serviceCollection.AddRepositories(); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); serviceProvider.GetRequiredService>().Should().BeOfType>(); serviceProvider.GetRequiredService>().Should().BeOfType>(); @@ -93,7 +93,7 @@ public void AddCustomAsyncRepository_should_register_custom_repository() serviceCollection.AddScoped, CustomBlogRepository>(); serviceCollection.AddRepositories(); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); // Resolving the custom repository interface serviceProvider.GetRequiredService().Should().BeOfType(); @@ -124,7 +124,7 @@ public void AddCustomReadWriteAsyncRepository_with_registration_function_should_ serviceCollection.AddScoped, CustomBlogRepository>(); serviceCollection.AddRepositories(); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); // Resolving the custom repository interface serviceProvider.GetRequiredService().Should().BeOfType(); @@ -141,7 +141,7 @@ public void AddCustomReadWriteRepository_with_registration_function_should_regis serviceCollection.AddScoped, CustomBlogRepository>(); serviceCollection.AddRepositories(); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); // Resolving the custom repository interface serviceProvider.GetRequiredService().Should().BeOfType(); @@ -158,7 +158,7 @@ public void AddCustomRepository_should_register_custom_repository() serviceCollection.AddScoped, CustomBlogRepository>(); serviceCollection.AddRepositories(); - var (serviceProvider, _, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); + var (_, serviceProvider, _) = DbContextServicesRegistrationHelper.BuildDbContextAndServiceProvider(serviceCollection); // Resolving the custom repository interface serviceProvider.GetRequiredService().Should().BeOfType(); diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkAdditionalTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkAdditionalTests.cs new file mode 100644 index 0000000..2359a69 --- /dev/null +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkAdditionalTests.cs @@ -0,0 +1,84 @@ +using Ploch.Data.GenericRepository.EFCore.IntegrationTesting; +using Ploch.Data.GenericRepository.EFCore.IntegrationTests.Model; + +namespace Ploch.Data.GenericRepository.EFCore.IntegrationTests; + +public class UnitOfWorkAdditionalTests : GenericRepositoryDataIntegrationTest +{ + [Fact] + public async Task RollbackAsync_should_revert_uncommitted_changes() + { + using var unitOfWork = CreateUnitOfWork(); + + var repository = unitOfWork.Repository(); + await repository.AddAsync(new TestEntity { Id = 1, Name = "ToBeRolledBack" }); + await unitOfWork.CommitAsync(); + + // Modify the entity + var entity = await repository.GetByIdAsync(1); + entity.Should().NotBeNull(); + entity!.Name = "Modified"; + + // Rollback before committing + await unitOfWork.RollbackAsync(); + + var reloaded = await repository.GetByIdAsync(1); + reloaded.Should().NotBeNull(); + reloaded!.Name.Should().Be("ToBeRolledBack"); + } + + [Fact] + public void Repository_should_cache_and_return_same_instance() + { + using var unitOfWork = CreateUnitOfWork(); + + var repo1 = unitOfWork.Repository(); + var repo2 = unitOfWork.Repository(); + + repo1.Should().BeSameAs(repo2); + } + + [Fact] + public void Dispose_should_not_throw() + { + var unitOfWork = CreateUnitOfWork(); + + unitOfWork.Invoking(u => u.Dispose()).Should().NotThrow(); + } + + [Fact] + public async Task DisposeAsync_should_not_throw() + { + var unitOfWork = CreateUnitOfWork(); + + if (unitOfWork is IAsyncDisposable asyncDisposable) + { + var act = async () => await asyncDisposable.DisposeAsync(); + await act.Should().NotThrowAsync(); + } + } + + [Fact] + public async Task CommitAsync_with_no_changes_should_return_zero() + { + using var unitOfWork = CreateUnitOfWork(); + + var result = await unitOfWork.CommitAsync(); + + result.Should().Be(0); + } + + [Fact] + public async Task CommitAsync_with_changes_should_return_number_of_affected_entities() + { + using var unitOfWork = CreateUnitOfWork(); + + var repository = unitOfWork.Repository(); + await repository.AddAsync(new TestEntity { Id = 1, Name = "Entity1" }); + await repository.AddAsync(new TestEntity { Id = 2, Name = "Entity2" }); + + var result = await unitOfWork.CommitAsync(); + + result.Should().Be(2); + } +} diff --git a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkRepositoryAsyncSQLiteInMemoryTests.cs b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkRepositoryAsyncSQLiteInMemoryTests.cs index 486fe29..da417c1 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkRepositoryAsyncSQLiteInMemoryTests.cs +++ b/tests/Data.GenericRepository/Data.GenericRepository.EFCore.IntegrationTests/UnitOfWorkRepositoryAsyncSQLiteInMemoryTests.cs @@ -35,17 +35,14 @@ public async Task RepositoryAsync_and_UnitOfWorkAsync_add_and_query_by_id_should await unitOfWork.CommitAsync(); - var unitOfWork2 = CreateUnitOfWork(); + using var unitOfWork2 = CreateUnitOfWork(); var blogRepository = CreateReadRepositoryAsync(); var actualBlog = await blogRepository.GetByIdAsync(blog.Id); actualBlog.Should() .BeEquivalentTo(blog, - options => options.Excluding(p => p.BlogPosts) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(p => p.BlogPosts).WithEntityEquivalencyOptions()); actualBlog!.Name.Should().Be(blog.Name); var actualBlogPost1 = await unitOfWork2.Repository().GetByIdAsync(blogPost1.Id); @@ -53,20 +50,16 @@ public async Task RepositoryAsync_and_UnitOfWorkAsync_add_and_query_by_id_should .BeEquivalentTo(blogPost1, options => options.Excluding(p => p.Categories) .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + .WithEntityEquivalencyOptions()); var actualBlogPost2 = await unitOfWork2.Repository().GetByIdAsync(blogPost2.Id); actualBlogPost2.Should() .BeEquivalentTo(blogPost2, options => options.Excluding(p => p.Categories) .Excluding(p => p.Tags) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + .WithEntityEquivalencyOptions()); - var testUnitOfWork = CreateUnitOfWork(); + using var testUnitOfWork = CreateUnitOfWork(); var actualIdeas = await testUnitOfWork.Repository().GetAllAsync(); @@ -95,10 +88,7 @@ public async Task UpdateAsync_entity() blog.Name = "Updated Blog"; actualBlog.Should() .BeEquivalentTo(blog, - options => options.Excluding(p => p.BlogPosts) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(p => p.BlogPosts).WithEntityEquivalencyOptions()); } [Fact] @@ -116,10 +106,7 @@ public async Task AddAsync_entity() var actualBlog = await blogRepository.GetByIdAsync(blog.Id); actualBlog.Should() .BeEquivalentTo(blog, - options => options.Excluding(p => p.BlogPosts) - .IgnoringCyclicReferences() - .Using(ctx => ctx.Subject.Should().BeCloseTo(ctx.Expectation, TimeSpan.FromMilliseconds(100))) - .WhenTypeIs()); + options => options.Excluding(p => p.BlogPosts).WithEntityEquivalencyOptions()); } [Fact] diff --git a/tests/Data.GenericRepository/Data.GenericRepository.Tests/Ploch.Data.GenericRepository.Tests.csproj b/tests/Data.GenericRepository/Data.GenericRepository.Tests/Ploch.Data.GenericRepository.Tests.csproj index e44f7bf..7f515aa 100644 --- a/tests/Data.GenericRepository/Data.GenericRepository.Tests/Ploch.Data.GenericRepository.Tests.csproj +++ b/tests/Data.GenericRepository/Data.GenericRepository.Tests/Ploch.Data.GenericRepository.Tests.csproj @@ -2,12 +2,6 @@ $(TargetFrameworkVersion) - enable - enable - - false - true - true Exe $(NoWarn);VSTHRD200 diff --git a/tests/Data.Model.Tests/CommonTypes/CategoryTests.cs b/tests/Data.Model.Tests/CommonTypes/CategoryTests.cs new file mode 100644 index 0000000..e723183 --- /dev/null +++ b/tests/Data.Model.Tests/CommonTypes/CategoryTests.cs @@ -0,0 +1,102 @@ +using FluentAssertions; +using Ploch.Data.Model; +using Ploch.Data.Model.CommonTypes; +using Xunit; + +namespace Ploch.Data.Model.Tests.CommonTypes; + +public class CategoryTests +{ + [Fact] + public void Id_should_be_settable_and_gettable() + { + var category = new TestCategory { Id = 42 }; + + category.Id.Should().Be(42); + } + + [Fact] + public void Name_should_be_settable_and_gettable() + { + var category = new TestCategory { Name = "Electronics" }; + + category.Name.Should().Be("Electronics"); + } + + [Fact] + public void Parent_should_be_settable_and_gettable() + { + var parent = new TestCategory { Id = 1, Name = "Root" }; + var child = new TestCategory { Id = 2, Name = "Child", Parent = parent }; + + child.Parent.Should().BeSameAs(parent); + } + + [Fact] + public void Children_should_be_settable_and_gettable() + { + var parent = new TestCategory { Id = 1, Name = "Root" }; + var child1 = new TestCategory { Id = 2, Name = "Child1" }; + var child2 = new TestCategory { Id = 3, Name = "Child2" }; + parent.Children = [child1, child2]; + + parent.Children.Should().HaveCount(2); + parent.Children.Should().Contain(child1); + parent.Children.Should().Contain(child2); + } + + [Fact] + public void Category_should_implement_IHasId() + { + var category = new TestCategory(); + + category.Should().BeAssignableTo>(); + } + + [Fact] + public void Category_should_implement_INamed() + { + var category = new TestCategory(); + + category.Should().BeAssignableTo(); + } + + [Fact] + public void Category_should_implement_IHierarchicalParentChildrenComposite() + { + var category = new TestCategory(); + + category.Should().BeAssignableTo>(); + } + + [Fact] + public void CategoryWithCustomId_should_support_guid_id() + { + var id = Guid.NewGuid(); + var category = new GuidCategory { Id = id, Name = "Test" }; + + category.Id.Should().Be(id); + } + + [Fact] + public void Children_should_be_null_by_default() + { + var category = new TestCategory(); + + category.Children.Should().BeNull(); + } + + [Fact] + public void Parent_should_be_null_by_default() + { + var category = new TestCategory(); + + category.Parent.Should().BeNull(); + } + + private sealed class TestCategory : Category + { } + + private sealed class GuidCategory : Category + { } +} diff --git a/tests/Data.Model.Tests/CommonTypes/ImageTests.cs b/tests/Data.Model.Tests/CommonTypes/ImageTests.cs new file mode 100644 index 0000000..d496b4f --- /dev/null +++ b/tests/Data.Model.Tests/CommonTypes/ImageTests.cs @@ -0,0 +1,82 @@ +using FluentAssertions; +using Ploch.Data.Model; +using Ploch.Data.Model.CommonTypes; +using Xunit; + +namespace Ploch.Data.Model.Tests.CommonTypes; + +public class ImageTests +{ + [Fact] + public void Id_should_be_settable_and_gettable() + { + var image = new Image { Id = 1 }; + + image.Id.Should().Be(1); + } + + [Fact] + public void Name_should_be_settable_and_gettable() + { + var image = new Image { Name = "logo.png" }; + + image.Name.Should().Be("logo.png"); + } + + [Fact] + public void Description_should_be_settable_and_gettable() + { + var image = new Image { Description = "Company logo" }; + + image.Description.Should().Be("Company logo"); + } + + [Fact] + public void Description_should_be_nullable() + { + var image = new Image { Name = "test.png" }; + + image.Description.Should().BeNull(); + } + + [Fact] + public void Contents_should_be_settable_and_gettable() + { + var bytes = new byte[] { 0x89, 0x50, 0x4E, 0x47 }; + var image = new Image { Contents = bytes }; + + image.Contents.Should().BeEquivalentTo(bytes); + } + + [Fact] + public void Contents_should_be_nullable() + { + var image = new Image { Name = "empty.png" }; + + image.Contents.Should().BeNull(); + } + + [Fact] + public void Image_should_implement_IHasId() + { + var image = new Image(); + + image.Should().BeAssignableTo>(); + } + + [Fact] + public void Image_should_implement_INamed() + { + var image = new Image(); + + image.Should().BeAssignableTo(); + } + + [Fact] + public void Image_should_implement_IHasDescription() + { + var image = new Image(); + + image.Should().BeAssignableTo(); + } +} diff --git a/tests/Data.Model.Tests/CommonTypes/PropertyTests.cs b/tests/Data.Model.Tests/CommonTypes/PropertyTests.cs new file mode 100644 index 0000000..7b7509e --- /dev/null +++ b/tests/Data.Model.Tests/CommonTypes/PropertyTests.cs @@ -0,0 +1,109 @@ +using FluentAssertions; +using Ploch.Data.Model; +using Ploch.Data.Model.CommonTypes; +using Xunit; + +namespace Ploch.Data.Model.Tests.CommonTypes; + +public class PropertyTests +{ + [Fact] + public void Id_should_be_settable_and_gettable() + { + var property = new Property { Id = 1 }; + + property.Id.Should().Be(1); + } + + [Fact] + public void Name_should_be_settable_and_gettable() + { + var property = new Property { Name = "colour" }; + + property.Name.Should().Be("colour"); + } + + [Fact] + public void Value_should_be_settable_and_gettable() + { + var property = new Property { Value = "blue" }; + + property.Value.Should().Be("blue"); + } + + [Fact] + public void Property_should_implement_IHasId() + { + var property = new Property(); + + property.Should().BeAssignableTo>(); + } + + [Fact] + public void Property_should_implement_INamed() + { + var property = new Property(); + + property.Should().BeAssignableTo(); + } + + [Fact] + public void Property_should_implement_IHasValue() + { + var property = new Property(); + + property.Should().BeAssignableTo>(); + } + + [Fact] + public void PropertyWithDefaultId_should_use_int_id() + { + var property = new Property { Id = 42, Name = "test", Value = "hello" }; + + property.Id.Should().Be(42); + property.Should().BeAssignableTo>(); + } +} + +public class IntPropertyTests +{ + [Fact] + public void IntProperty_should_have_int_value() + { + var property = new IntProperty { Id = 1, Name = "count", Value = 42 }; + + property.Value.Should().Be(42); + property.Should().BeAssignableTo>(); + } + + [Fact] + public void IntPropertyWithCustomId_should_support_custom_id_type() + { + var id = Guid.NewGuid(); + var property = new IntProperty { Id = id, Name = "count", Value = 10 }; + + property.Id.Should().Be(id); + property.Value.Should().Be(10); + } +} + +public class StringPropertyTests +{ + [Fact] + public void StringProperty_should_have_string_value() + { + var property = new StringProperty { Id = 1, Name = "label", Value = "hello" }; + + property.Value.Should().Be("hello"); + property.Should().BeAssignableTo>(); + } + + [Fact] + public void StringPropertyWithCustomId_should_support_custom_id_type() + { + var property = new StringProperty { Id = 999L, Name = "key", Value = "val" }; + + property.Id.Should().Be(999L); + property.Value.Should().Be("val"); + } +} diff --git a/tests/Data.Model.Tests/CommonTypes/TagTests.cs b/tests/Data.Model.Tests/CommonTypes/TagTests.cs new file mode 100644 index 0000000..b9a681b --- /dev/null +++ b/tests/Data.Model.Tests/CommonTypes/TagTests.cs @@ -0,0 +1,75 @@ +using FluentAssertions; +using Ploch.Data.Model; +using Ploch.Data.Model.CommonTypes; +using Xunit; + +namespace Ploch.Data.Model.Tests.CommonTypes; + +public class TagTests +{ + [Fact] + public void Id_should_be_settable_and_gettable() + { + var tag = new Tag { Id = 5 }; + + tag.Id.Should().Be(5); + } + + [Fact] + public void Name_should_be_settable_and_gettable() + { + var tag = new Tag { Name = "important" }; + + tag.Name.Should().Be("important"); + } + + [Fact] + public void Description_should_be_settable_and_gettable() + { + var tag = new Tag { Description = "This is important" }; + + tag.Description.Should().Be("This is important"); + } + + [Fact] + public void Description_should_be_nullable() + { + var tag = new Tag { Name = "test" }; + + tag.Description.Should().BeNull(); + } + + [Fact] + public void Tag_should_implement_IHasId() + { + var tag = new Tag(); + + tag.Should().BeAssignableTo>(); + } + + [Fact] + public void Tag_should_implement_INamed() + { + var tag = new Tag(); + + tag.Should().BeAssignableTo(); + } + + [Fact] + public void Tag_should_implement_IHasDescription() + { + var tag = new Tag(); + + tag.Should().BeAssignableTo(); + } + + [Fact] + public void TagWithCustomId_should_support_guid_id() + { + var id = Guid.NewGuid(); + var tag = new Tag { Id = id, Name = "guid-tag" }; + + tag.Id.Should().Be(id); + tag.Name.Should().Be("guid-tag"); + } +} diff --git a/tests/Data.Model.Tests/Ploch.Data.Model.Tests.csproj b/tests/Data.Model.Tests/Ploch.Data.Model.Tests.csproj new file mode 100644 index 0000000..39ae001 --- /dev/null +++ b/tests/Data.Model.Tests/Ploch.Data.Model.Tests.csproj @@ -0,0 +1,13 @@ + + + + $(TargetFrameworkVersion) + Exe + + + + + + + + diff --git a/tests/Data.StandardDataSets.Tests/Ploch.Data.StandardDataSets.Tests.csproj b/tests/Data.StandardDataSets.Tests/Ploch.Data.StandardDataSets.Tests.csproj index 9e73a0b..34fead9 100644 --- a/tests/Data.StandardDataSets.Tests/Ploch.Data.StandardDataSets.Tests.csproj +++ b/tests/Data.StandardDataSets.Tests/Ploch.Data.StandardDataSets.Tests.csproj @@ -2,18 +2,7 @@ $(TargetFrameworkVersion) - enable - enable - - false - true - true Exe - AnyCPU;x64 - - - - false diff --git a/tests/Data.Utilities.Tests/DataColumnExtensionsTests.cs b/tests/Data.Utilities.Tests/DataColumnExtensionsTests.cs new file mode 100644 index 0000000..018aeb2 --- /dev/null +++ b/tests/Data.Utilities.Tests/DataColumnExtensionsTests.cs @@ -0,0 +1,118 @@ +using System; +using System.Data; +using FluentAssertions; +using Ploch.Data.Utilities; +using Xunit; + +namespace Ploch.Data.Utilities.Tests; + +public class DataColumnExtensionsTests +{ + [Fact] + public void CopyProperties_should_copy_AllowDBNull() + { + using var source = new DataColumn("Source", typeof(string)) { AllowDBNull = false }; + using var target = new DataColumn("Target", typeof(string)); + + source.CopyProperties(target); + + target.AllowDBNull.Should().Be(source.AllowDBNull); + } + + [Fact] + public void CopyProperties_should_copy_AutoIncrement() + { + using var source = new DataColumn("Source", typeof(int)) { AutoIncrement = true }; + using var target = new DataColumn("Target", typeof(int)); + + source.CopyProperties(target); + + target.AutoIncrement.Should().Be(source.AutoIncrement); + } + + [Fact] + public void CopyProperties_should_copy_Caption() + { + using var source = new DataColumn("Source", typeof(string)) { Caption = "Test Caption" }; + using var target = new DataColumn("Target", typeof(string)); + + source.CopyProperties(target); + + target.Caption.Should().Be("Test Caption"); + } + + [Fact] + public void CopyProperties_should_copy_AutoIncrementSeed() + { + using var source = new DataColumn("Source", typeof(int)) { AutoIncrementSeed = 100 }; + using var target = new DataColumn("Target", typeof(int)); + + source.CopyProperties(target); + + target.AutoIncrementSeed.Should().Be(100); + } + + [Fact] + public void CopyProperties_should_copy_AutoIncrementStep() + { + using var source = new DataColumn("Source", typeof(int)) { AutoIncrementStep = 5 }; + using var target = new DataColumn("Target", typeof(int)); + + source.CopyProperties(target); + + target.AutoIncrementStep.Should().Be(5); + } + + [Fact] + public void CopyProperties_should_copy_all_properties_at_once() + { + using var source = new DataColumn("Source", typeof(int)) + { + AllowDBNull = false, + AutoIncrement = true, + Caption = "My Column", + AutoIncrementSeed = 10, + AutoIncrementStep = 2, + }; + using var target = new DataColumn("Target", typeof(int)); + + source.CopyProperties(target); + + target.AllowDBNull.Should().Be(false); + target.AutoIncrement.Should().Be(true); + target.Caption.Should().Be("My Column"); + target.AutoIncrementSeed.Should().Be(10); + target.AutoIncrementStep.Should().Be(2); + } + + [Fact] + public void CopyProperties_should_not_copy_column_name() + { + using var source = new DataColumn("Source", typeof(string)); + using var target = new DataColumn("Target", typeof(string)); + + source.CopyProperties(target); + + target.ColumnName.Should().Be("Target"); + } + + [Fact] + public void CopyProperties_should_throw_ArgumentNullException_when_source_is_null() + { + using var target = new DataColumn("Target", typeof(string)); + + var act = () => ((DataColumn)null!).CopyProperties(target); + + act.Should().Throw().WithParameterName("sourceColumn"); + } + + [Fact] + public void CopyProperties_should_throw_ArgumentNullException_when_target_is_null() + { + using var source = new DataColumn("Source", typeof(string)); + + var act = () => source.CopyProperties(null!); + + act.Should().Throw().WithParameterName("targetColumn"); + } +} diff --git a/tests/Data.Utilities.Tests/Ploch.Data.Utilities.Tests.csproj b/tests/Data.Utilities.Tests/Ploch.Data.Utilities.Tests.csproj new file mode 100644 index 0000000..da4a75e --- /dev/null +++ b/tests/Data.Utilities.Tests/Ploch.Data.Utilities.Tests.csproj @@ -0,0 +1,13 @@ + + + + $(TargetFrameworkVersion) + Exe + + + + + + + +