From 7aa73663de40bce7f05d6dab02214e9c7cc2793f Mon Sep 17 00:00:00 2001 From: Trey Date: Mon, 4 May 2026 12:47:36 -0700 Subject: [PATCH 1/7] Add persistent DCRCredentialStore types and memory backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 3 sub-issue 1 of #5183. Define the persisted DCRCredentials value type and the storage-level DCRCredentialStore interface in pkg/authserver/storage/, and ship the in-process memory implementation that single-replica deployments and unit tests use. The Redis backend (sub-issue 2) and the wiring change (sub-issue 3) build on this. DCRKey consolidation: chose option (a) from the issue — DCRKey and its ScopesHash constructor move to pkg/authserver/storage/ so any future backend hashes keys identically. The runner package keeps a package-local type alias (type DCRKey = storage.DCRKey) and a var binding for scopesHash so existing call sites compile unchanged; the canonical form has a single source of truth. DCRCredentials carries ClientSecretExpiresAt so the Redis backend can drive a SetEX TTL without re-touching the value type or regenerating mocks. The interface contract documents this as SHOULD honor when backend-supported; MemoryStorage retains entries verbatim for the process lifetime. StoreDCRCredentials rejects nil creds and zero-valued Key.Issuer or Key.RedirectURI with fosite.ErrInvalidRequest, matching the StoreUpstreamTokens validation pattern. Stats reports dcrCredentials count for parity with the other in-memory maps. The runner-side DCRCredentialStore (Get/Put *DCRResolution) is left in place as the thin adapter sub-issue 3 will swap. This sub-issue lands the new storage-level interface, MemoryStorage implementation, and regenerated mock without touching the wire-up. DCR credentials are intentionally excluded from cleanupExpired: RFC 7591 client registrations are long-lived and the authoritative expiry signal is client_secret_expires_at, which the Redis backend will honor as a SetEX TTL. --- pkg/authserver/runner/dcr.go | 38 +-- pkg/authserver/runner/dcr_store.go | 40 +--- pkg/authserver/runner/dcr_store_test.go | 74 +----- pkg/authserver/storage/memory.go | 80 +++++++ pkg/authserver/storage/memory_test.go | 233 +++++++++++++++++++ pkg/authserver/storage/mocks/mock_storage.go | 55 ++++- pkg/authserver/storage/types.go | 167 ++++++++++++- 7 files changed, 555 insertions(+), 132 deletions(-) diff --git a/pkg/authserver/runner/dcr.go b/pkg/authserver/runner/dcr.go index 092af89911..b097bb7d5a 100644 --- a/pkg/authserver/runner/dcr.go +++ b/pkg/authserver/runner/dcr.go @@ -5,8 +5,6 @@ package runner import ( "context" - "crypto/sha256" - "encoding/hex" "errors" "fmt" "log/slog" @@ -15,13 +13,13 @@ import ( "regexp" "runtime/debug" "slices" - "sort" "strings" "time" "golang.org/x/sync/singleflight" "github.com/stacklok/toolhive/pkg/authserver" + "github.com/stacklok/toolhive/pkg/authserver/storage" "github.com/stacklok/toolhive/pkg/authserver/upstream" "github.com/stacklok/toolhive/pkg/networking" "github.com/stacklok/toolhive/pkg/oauthproto" @@ -229,34 +227,12 @@ func applyResolutionToOAuth2Config(cfg *upstream.OAuth2Config, res *DCRResolutio cfg.ClientSecret = res.ClientSecret } -// scopesHash returns the SHA-256 hex digest of the canonical scope set. -// -// Canonicalisation: -// 1. Sort ascending so the digest is order-insensitive — e.g. -// []string{"openid", "profile"} and []string{"profile", "openid"} hash to -// the same value. -// 2. Deduplicate so that []string{"openid"} and []string{"openid", "openid"} -// hash to the same value. An OAuth scope set is a set, not a multiset -// (RFC 6749 §3.3), and without deduplication a caller that accidentally -// duplicated a scope would miss cache entries and trigger redundant -// RFC 7591 registrations. -// 3. Join with newlines (a character not valid in OAuth scope tokens per -// RFC 6749 §3.3) to avoid collision between e.g. ["ab", "c"] and -// ["a", "bc"]. -func scopesHash(scopes []string) string { - sorted := slices.Clone(scopes) - sort.Strings(sorted) - sorted = slices.Compact(sorted) - - h := sha256.New() - for i, s := range sorted { - if i > 0 { - _, _ = h.Write([]byte("\n")) - } - _, _ = h.Write([]byte(s)) - } - return hex.EncodeToString(h.Sum(nil)) -} +// scopesHash is a runner-package shorthand for storage.ScopesHash, kept so the +// resolver and its tests can reference the canonical hash function without an +// explicit storage. qualifier on every call site. The canonical implementation +// lives in the storage package next to DCRKey so any future backend hashes +// keys identically. +var scopesHash = storage.ScopesHash // Step identifiers for structured error logs emitted by the caller of // resolveDCRCredentials. These values flow through the "step" attribute so diff --git a/pkg/authserver/runner/dcr_store.go b/pkg/authserver/runner/dcr_store.go index 74b6009082..3fa1707057 100644 --- a/pkg/authserver/runner/dcr_store.go +++ b/pkg/authserver/runner/dcr_store.go @@ -8,43 +8,23 @@ import ( "fmt" "sync" "time" + + "github.com/stacklok/toolhive/pkg/authserver/storage" ) // dcrStaleAgeThreshold is the age beyond which a cached DCR resolution is // considered stale and logged as such by higher-level wiring. The store itself // does not expire or evict entries — RFC 7591 client registrations are -// long-lived and are only purged by explicit RFC 7592 deregistration. This -// threshold is consumed by Step 2g observability logs introduced in the next -// PR in the DCR stack (sub-issue C, #5039); 5042 only defines the constant -// so the consumer can land without a cross-PR cycle. -// -//nolint:unused // consumed by lookupCachedResolution in #5039 +// long-lived and are only purged by explicit RFC 7592 deregistration. const dcrStaleAgeThreshold = 90 * 24 * time.Hour -// DCRKey is the canonical lookup key for a DCR resolution. The tuple is -// designed so a future Redis-backed store can serialise it into a single key -// segment (Phase 3) without redefining the canonical form. ScopesHash rather -// than the raw scope slice is used so the key is comparable and order- -// insensitive. -type DCRKey struct { - // Issuer is *this* auth server's issuer identifier — the local issuer - // of the embedded authorization server that performed the registration, - // NOT the upstream's. The cache is keyed by this value because two - // different local issuers registering against the same upstream are - // distinct OAuth clients and must not share credentials. The upstream's - // issuer is used only for RFC 8414 §3.3 metadata verification inside - // the resolver and is not part of the cache key. - Issuer string - - // RedirectURI is the redirect URI registered with the upstream - // authorization server. Lives on the local issuer's origin since it is - // where the upstream sends the user back to us after authentication. - RedirectURI string - - // ScopesHash is the SHA-256 hex digest of the sorted scope list. - // See scopesHash in dcr.go for the canonical form. - ScopesHash string -} +// DCRKey is a re-export of storage.DCRKey, kept as a package-local alias so +// existing runner-side callers continue to compile against runner.DCRKey +// while the canonical definition lives in pkg/authserver/storage. The +// canonical form (and its ScopesHash constructor) MUST live in a single place +// so any future Redis backend hashes keys identically to the in-memory +// backend; see storage.DCRKey for the field documentation. +type DCRKey = storage.DCRKey // DCRCredentialStore caches RFC 7591 Dynamic Client Registration resolutions // keyed by the (Issuer, RedirectURI, ScopesHash) tuple. Implementations must diff --git a/pkg/authserver/runner/dcr_store_test.go b/pkg/authserver/runner/dcr_store_test.go index ca40d51f8c..b3209e8834 100644 --- a/pkg/authserver/runner/dcr_store_test.go +++ b/pkg/authserver/runner/dcr_store_test.go @@ -168,75 +168,11 @@ func TestInMemoryDCRCredentialStore_GetReturnsDefensiveCopy(t *testing.T) { assert.Equal(t, "orig", refetched.ClientID) } -func TestScopesHash_StableAcrossPermutation(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - a, b []string - }{ - { - name: "two-element permutation", - a: []string{"openid", "profile"}, - b: []string{"profile", "openid"}, - }, - { - name: "three-element permutation", - a: []string{"openid", "profile", "email"}, - b: []string{"email", "openid", "profile"}, - }, - { - // OAuth scope sets are sets, not multisets (RFC 6749 §3.3). - // scopesHash deduplicates before hashing so a caller who - // accidentally repeats a scope still hits the cache entry - // keyed under the canonical set. - name: "single element equals double element duplicate", - a: []string{"openid"}, - b: []string{"openid", "openid"}, - }, - { - name: "three-element with duplicate equals two-element unique", - a: []string{"openid", "profile", "openid"}, - b: []string{"openid", "profile"}, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - assert.Equal(t, scopesHash(tc.a), scopesHash(tc.b)) - }) - } -} - -func TestScopesHash_DistinctForDistinctScopes(t *testing.T) { - t.Parallel() - - a := scopesHash([]string{"openid"}) - b := scopesHash([]string{"openid", "profile"}) - c := scopesHash([]string{"profile"}) - d := scopesHash(nil) - e := scopesHash([]string{}) - - // Non-empty distinct sets produce distinct hashes. - assert.NotEqual(t, a, b) - assert.NotEqual(t, a, c) - assert.NotEqual(t, b, c) - assert.NotEqual(t, a, d) - // nil and empty slice canonicalise to the same hash (both sort-then-join - // to the empty canonical form). - assert.Equal(t, d, e) -} - -func TestScopesHash_NoCollisionFromBoundaryJoin(t *testing.T) { - t.Parallel() - - // Without a delimiter that cannot appear inside a scope value, - // ["ab", "c"] and ["a", "bc"] would collide. This test exists to - // prevent a regression if the canonical form is ever simplified. - h1 := scopesHash([]string{"ab", "c"}) - h2 := scopesHash([]string{"a", "bc"}) - assert.NotEqual(t, h1, h2) -} +// Tests for the canonical scopes-hash form live next to the canonical +// implementation in pkg/authserver/storage/memory_test.go (TestScopesHash_*). +// The runner-package binding `scopesHash = storage.ScopesHash` would only +// re-exercise the same code, so duplicating the suite here would be redundant +// per .claude/rules/testing.md. // TestInMemoryDCRCredentialStore_ConcurrentAccess fans out N goroutines // performing alternating Put / Get against overlapping and disjoint keys, diff --git a/pkg/authserver/storage/memory.go b/pkg/authserver/storage/memory.go index 3d5a222e59..2f69de391e 100644 --- a/pkg/authserver/storage/memory.go +++ b/pkg/authserver/storage/memory.go @@ -94,6 +94,15 @@ type MemoryStorage struct { // This enables O(1) lookup during authentication callbacks. providerIdentities map[string]*ProviderIdentity + // dcrCredentials maps DCRKey -> DCRCredentials for RFC 7591 Dynamic Client + // Registration credentials. Entries are intentionally excluded from the + // periodic cleanupExpired loop: DCR registrations are long-lived and the + // authoritative expiry signal is RFC 7591 client_secret_expires_at, which + // is honored at read time by callers (and by the future Redis backend's + // SetEX TTL). The map is bounded by the operator-configured upstream + // count, so unbounded growth is not a concern. + dcrCredentials map[DCRKey]*DCRCredentials + // cleanupInterval is how often the background cleanup runs cleanupInterval time.Duration @@ -129,6 +138,7 @@ func NewMemoryStorage(opts ...MemoryStorageOption) *MemoryStorage { clientAssertionJWTs: make(map[string]time.Time), users: make(map[string]*User), providerIdentities: make(map[string]*ProviderIdentity), + dcrCredentials: make(map[DCRKey]*DCRCredentials), cleanupInterval: DefaultCleanupInterval, stopCleanup: make(chan struct{}), cleanupDone: make(chan struct{}), @@ -1187,6 +1197,73 @@ func (s *MemoryStorage) GetUserProviderIdentities(_ context.Context, userID stri return identities, nil } +// ----------------------- +// DCR Credentials Storage +// ----------------------- + +// cloneDCRCredentials returns a field-by-field copy of c, or nil if c is nil. +// All fields are values (no slices, maps, or pointers), so a shallow copy is +// sufficient — adding a new reference-typed field requires updating this +// helper to deep-copy that field. +func cloneDCRCredentials(c *DCRCredentials) *DCRCredentials { + if c == nil { + return nil + } + cp := *c + return &cp +} + +// StoreDCRCredentials persists DCR credentials for the given key. +// The credentials are stored under their own Key field; callers must populate +// it before calling. A defensive copy is made so subsequent caller mutations +// do not affect persisted state. +// +// Overwrites any existing entry for the same Key. The in-memory backend +// applies no native TTL — DCR registrations are long-lived and bounded by +// the operator-configured upstream count, and ClientSecretExpiresAt is +// retained verbatim for callers to re-check on read (see the interface +// docstring's "TTL handling" section). +// +// Rejects nil creds and an unpopulated Key (empty Issuer or empty +// RedirectURI). An empty ScopesHash is permitted because ScopesHash("") +// produces a non-empty canonical digest, and the empty-scope case +// (no scopes configured or discovered) is a real, valid registration. +func (s *MemoryStorage) StoreDCRCredentials(_ context.Context, creds *DCRCredentials) error { + if creds == nil { + return fosite.ErrInvalidRequest.WithHint("dcr credentials cannot be nil") + } + if creds.Key.Issuer == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials key issuer cannot be empty") + } + if creds.Key.RedirectURI == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials key redirect_uri cannot be empty") + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.dcrCredentials[creds.Key] = cloneDCRCredentials(creds) + return nil +} + +// GetDCRCredentials retrieves DCR credentials by key. +// Returns a defensive copy; returns ErrNotFound (wrapped) on miss. +func (s *MemoryStorage) GetDCRCredentials(_ context.Context, key DCRKey) (*DCRCredentials, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + entry, ok := s.dcrCredentials[key] + if !ok { + slog.Debug("dcr credentials not found", + "issuer", key.Issuer, + "redirect_uri", key.RedirectURI, + ) + return nil, fmt.Errorf("%w: %w", ErrNotFound, fosite.ErrNotFound.WithHint("DCR credentials not found")) + } + + return cloneDCRCredentials(entry), nil +} + // ----------------------- // Metrics/Stats (for testing and monitoring) // ----------------------- @@ -1204,6 +1281,7 @@ type Stats struct { ClientAssertionJWTs int Users int ProviderIdentities int + DCRCredentials int } // Stats returns current statistics about storage contents. @@ -1224,6 +1302,7 @@ func (s *MemoryStorage) Stats() Stats { ClientAssertionJWTs: len(s.clientAssertionJWTs), Users: len(s.users), ProviderIdentities: len(s.providerIdentities), + DCRCredentials: len(s.dcrCredentials), } } @@ -1234,4 +1313,5 @@ var ( _ ClientRegistry = (*MemoryStorage)(nil) _ UpstreamTokenStorage = (*MemoryStorage)(nil) _ UserStorage = (*MemoryStorage)(nil) + _ DCRCredentialStore = (*MemoryStorage)(nil) ) diff --git a/pkg/authserver/storage/memory_test.go b/pkg/authserver/storage/memory_test.go index c0791acdc8..47ff0a17f3 100644 --- a/pkg/authserver/storage/memory_test.go +++ b/pkg/authserver/storage/memory_test.go @@ -1717,3 +1717,236 @@ func TestMemoryStorage_ConcurrentAccess(t *testing.T) { }) }) } + +func TestMemoryStorage_DCRCredentials_RoundTrip(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + key := DCRKey{ + Issuer: "https://thv.example.com", + RedirectURI: "https://thv.example.com/oauth/callback", + ScopesHash: ScopesHash([]string{"openid", "profile"}), + } + creds := &DCRCredentials{ + Key: key, + ProviderName: "atlassian", + ClientID: "client-abc", + ClientSecret: "secret-xyz", + TokenEndpointAuthMethod: "client_secret_basic", + RegistrationAccessToken: "rat-123", + RegistrationClientURI: "https://idp.example.com/register/client-abc", + AuthorizationEndpoint: "https://idp.example.com/authorize", + TokenEndpoint: "https://idp.example.com/token", + CreatedAt: time.Date(2025, 5, 1, 12, 0, 0, 0, time.UTC), + ClientSecretExpiresAt: time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC), + } + + require.NoError(t, s.StoreDCRCredentials(ctx, creds)) + + got, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + require.NotNil(t, got) + + // Every field round-trips, including the embedded Key. + assert.Equal(t, *creds, *got) + }) +} + +func TestMemoryStorage_DCRCredentials_DistinctKeysDoNotCollide(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + mkKey := func(issuer, redirect string, scopes []string) DCRKey { + return DCRKey{ + Issuer: issuer, + RedirectURI: redirect, + ScopesHash: ScopesHash(scopes), + } + } + entries := []*DCRCredentials{ + {Key: mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid"}), ClientID: "a"}, + {Key: mkKey("https://idp-b.example.com", "https://x/cb", []string{"openid"}), ClientID: "b"}, + {Key: mkKey("https://idp-a.example.com", "https://y/cb", []string{"openid"}), ClientID: "c"}, + {Key: mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid", "email"}), ClientID: "d"}, + } + for _, e := range entries { + require.NoError(t, s.StoreDCRCredentials(ctx, e)) + } + + for _, want := range entries { + got, err := s.GetDCRCredentials(ctx, want.Key) + require.NoError(t, err) + require.NotNil(t, got) + assert.Equal(t, want.ClientID, got.ClientID) + } + }) +} + +func TestMemoryStorage_DCRCredentials_OverwriteSemantics(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + + require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "first"})) + require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "second"})) + + got, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + assert.Equal(t, "second", got.ClientID) + }) +} + +func TestMemoryStorage_DCRCredentials_NotFound(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + _, err := s.GetDCRCredentials(ctx, DCRKey{Issuer: "https://unknown.example.com"}) + requireNotFoundError(t, err) + }) +} + +// TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected pins the +// fail-loud-on-invalid-input contract: nil creds, an empty Key.Issuer, or +// an empty Key.RedirectURI must be rejected with fosite.ErrInvalidRequest +// rather than producing a working-looking write under a partially-populated +// key. +func TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + creds *DCRCredentials + }{ + { + name: "nil creds", + creds: nil, + }, + { + name: "empty issuer", + creds: &DCRCredentials{ + Key: DCRKey{Issuer: "", RedirectURI: "https://x/cb"}, + }, + }, + { + name: "empty redirect_uri", + creds: &DCRCredentials{ + Key: DCRKey{Issuer: "https://idp.example.com", RedirectURI: ""}, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + err := s.StoreDCRCredentials(ctx, tc.creds) + require.Error(t, err) + assert.ErrorIs(t, err, fosite.ErrInvalidRequest) + // Confirm the rejection did not partially populate the store. + assert.Equal(t, 0, s.Stats().DCRCredentials, + "rejected Store must not leave any entry behind") + }) + }) + } +} + +// TestMemoryStorage_DCRCredentials_GetReturnsDefensiveCopy pins the +// defensive-copy contract: a caller mutating the returned record must not +// affect persisted state. +func TestMemoryStorage_DCRCredentials_GetReturnsDefensiveCopy(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "orig"})) + + got, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + got.ClientID = "tampered-by-caller" + + refetched, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + assert.Equal(t, "orig", refetched.ClientID) + }) +} + +// TestMemoryStorage_DCRCredentials_StoreCopyIsolatesCaller pins the +// store-side defensive-copy contract: a caller mutating the input *after* +// Store must not affect persisted state. +func TestMemoryStorage_DCRCredentials_StoreCopyIsolatesCaller(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + input := &DCRCredentials{Key: key, ClientID: "orig"} + require.NoError(t, s.StoreDCRCredentials(ctx, input)) + + input.ClientID = "tampered-after-store" + + got, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + assert.Equal(t, "orig", got.ClientID) + }) +} + +// TestMemoryStorage_DCRCredentials_ExcludedFromCleanupExpired pins the +// invariant that DCR entries are NOT swept by cleanupExpired. The Redis +// backend applies TTL via SetEX; the in-memory backend keeps entries for the +// process lifetime. +func TestMemoryStorage_DCRCredentials_ExcludedFromCleanupExpired(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{ + Key: key, + ClientID: "abc", + CreatedAt: time.Now().Add(-365 * 24 * time.Hour), + })) + + s.cleanupExpired() + + got, err := s.GetDCRCredentials(ctx, key) + require.NoError(t, err) + assert.Equal(t, "abc", got.ClientID) + }) +} + +// TestScopesHash_StableAcrossPermutationAndDuplicates pins the canonical-form +// invariants of ScopesHash. +func TestScopesHash_StableAcrossPermutationAndDuplicates(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + a, b []string + }{ + {"two-element permutation", []string{"openid", "profile"}, []string{"profile", "openid"}}, + {"three-element permutation", []string{"openid", "profile", "email"}, []string{"email", "openid", "profile"}}, + // OAuth scope sets are sets, not multisets (RFC 6749 §3.3); duplicates + // must canonicalise to the same hash. + {"single equals double duplicate", []string{"openid"}, []string{"openid", "openid"}}, + {"three with duplicate equals two unique", []string{"openid", "profile", "openid"}, []string{"openid", "profile"}}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + assert.Equal(t, ScopesHash(tc.a), ScopesHash(tc.b)) + }) + } +} + +// TestScopesHash_NoCollisionFromBoundaryJoin guards against a regression in +// the canonical form: ["ab", "c"] and ["a", "bc"] must produce different +// hashes. The newline delimiter is what prevents the collision; this test +// exists to fail loudly if the canonical form is ever simplified. +func TestScopesHash_NoCollisionFromBoundaryJoin(t *testing.T) { + t.Parallel() + assert.NotEqual(t, ScopesHash([]string{"ab", "c"}), ScopesHash([]string{"a", "bc"})) +} + +// TestScopesHash_DistinctForDistinctScopes pins that distinct non-empty +// scope sets hash to distinct values, while nil and empty slice canonicalise +// to the same value (both reduce to the empty canonical form). +func TestScopesHash_DistinctForDistinctScopes(t *testing.T) { + t.Parallel() + + a := ScopesHash([]string{"openid"}) + b := ScopesHash([]string{"openid", "profile"}) + c := ScopesHash([]string{"profile"}) + d := ScopesHash(nil) + e := ScopesHash([]string{}) + + assert.NotEqual(t, a, b) + assert.NotEqual(t, a, c) + assert.NotEqual(t, b, c) + assert.NotEqual(t, a, d) + assert.Equal(t, d, e) +} diff --git a/pkg/authserver/storage/mocks/mock_storage.go b/pkg/authserver/storage/mocks/mock_storage.go index 4bb0f83dab..3654b7b784 100644 --- a/pkg/authserver/storage/mocks/mock_storage.go +++ b/pkg/authserver/storage/mocks/mock_storage.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -destination=mocks/mock_storage.go -package=mocks -source=types.go Storage,PendingAuthorizationStorage,ClientRegistry,UpstreamTokenStorage,UpstreamTokenRefresher,UserStorage +// mockgen -destination=mocks/mock_storage.go -package=mocks -source=types.go Storage,PendingAuthorizationStorage,ClientRegistry,UpstreamTokenStorage,UpstreamTokenRefresher,UserStorage,DCRCredentialStore // // Package mocks is a generated GoMock package. @@ -19,6 +19,59 @@ import ( gomock "go.uber.org/mock/gomock" ) +// MockDCRCredentialStore is a mock of DCRCredentialStore interface. +type MockDCRCredentialStore struct { + ctrl *gomock.Controller + recorder *MockDCRCredentialStoreMockRecorder + isgomock struct{} +} + +// MockDCRCredentialStoreMockRecorder is the mock recorder for MockDCRCredentialStore. +type MockDCRCredentialStoreMockRecorder struct { + mock *MockDCRCredentialStore +} + +// NewMockDCRCredentialStore creates a new mock instance. +func NewMockDCRCredentialStore(ctrl *gomock.Controller) *MockDCRCredentialStore { + mock := &MockDCRCredentialStore{ctrl: ctrl} + mock.recorder = &MockDCRCredentialStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDCRCredentialStore) EXPECT() *MockDCRCredentialStoreMockRecorder { + return m.recorder +} + +// GetDCRCredentials mocks base method. +func (m *MockDCRCredentialStore) GetDCRCredentials(ctx context.Context, key storage.DCRKey) (*storage.DCRCredentials, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDCRCredentials", ctx, key) + ret0, _ := ret[0].(*storage.DCRCredentials) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDCRCredentials indicates an expected call of GetDCRCredentials. +func (mr *MockDCRCredentialStoreMockRecorder) GetDCRCredentials(ctx, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDCRCredentials", reflect.TypeOf((*MockDCRCredentialStore)(nil).GetDCRCredentials), ctx, key) +} + +// StoreDCRCredentials mocks base method. +func (m *MockDCRCredentialStore) StoreDCRCredentials(ctx context.Context, creds *storage.DCRCredentials) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreDCRCredentials", ctx, creds) + ret0, _ := ret[0].(error) + return ret0 +} + +// StoreDCRCredentials indicates an expected call of StoreDCRCredentials. +func (mr *MockDCRCredentialStoreMockRecorder) StoreDCRCredentials(ctx, creds any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreDCRCredentials", reflect.TypeOf((*MockDCRCredentialStore)(nil).StoreDCRCredentials), ctx, creds) +} + // MockPendingAuthorizationStorage is a mock of PendingAuthorizationStorage interface. type MockPendingAuthorizationStorage struct { ctrl *gomock.Controller diff --git a/pkg/authserver/storage/types.go b/pkg/authserver/storage/types.go index f57d752ef9..3c422c3085 100644 --- a/pkg/authserver/storage/types.go +++ b/pkg/authserver/storage/types.go @@ -16,11 +16,15 @@ // OAuth authorization server. package storage -//go:generate mockgen -destination=mocks/mock_storage.go -package=mocks -source=types.go Storage,PendingAuthorizationStorage,ClientRegistry,UpstreamTokenStorage,UpstreamTokenRefresher,UserStorage +//go:generate mockgen -destination=mocks/mock_storage.go -package=mocks -source=types.go Storage,PendingAuthorizationStorage,ClientRegistry,UpstreamTokenStorage,UpstreamTokenRefresher,UserStorage,DCRCredentialStore import ( "context" + "crypto/sha256" + "encoding/hex" "errors" + "slices" + "sort" "time" "github.com/ory/fosite" @@ -107,6 +111,167 @@ func (t *UpstreamTokens) IsExpired(now time.Time) bool { return !t.ExpiresAt.IsZero() && now.After(t.ExpiresAt) } +// DCRKey is the canonical lookup key for a DCR registration. The tuple is +// designed so that any backend (in-memory or Redis) serialises it identically +// without redefining the canonical form. ScopesHash is used rather than a raw +// scope slice so the key is comparable, fixed-size, and order-insensitive. +// +// The key lives in the storage package because both MemoryStorage and the +// future Redis backend must hash keys identically; keeping the canonical form +// next to the persistence implementations prevents drift. +type DCRKey struct { + // Issuer is *this* auth server's issuer identifier — the local issuer + // of the embedded authorization server that performed the registration, + // NOT the upstream's. The cache is keyed by this value because two + // different local issuers registering against the same upstream are + // distinct OAuth clients and must not share credentials. + Issuer string + + // RedirectURI is the redirect URI registered with the upstream + // authorization server. Lives on the local issuer's origin since it is + // where the upstream sends the user back after authentication. + RedirectURI string + + // ScopesHash is the SHA-256 hex digest of the sorted, deduplicated scope + // list. Use ScopesHash() to compute this value — do NOT hash scopes by + // hand at call sites; the canonical form must be a single source of truth + // so the key matches across processes and backends. + ScopesHash string +} + +// ScopesHash returns the SHA-256 hex digest of the canonical OAuth scope set, +// suitable for use as DCRKey.ScopesHash. +// +// Canonicalisation: +// 1. Sort ascending so the digest is order-insensitive — e.g. +// []string{"openid", "profile"} and []string{"profile", "openid"} hash to +// the same value. +// 2. Deduplicate so that []string{"openid"} and []string{"openid", "openid"} +// hash to the same value. An OAuth scope set is a set, not a multiset +// (RFC 6749 §3.3), and without deduplication a caller that accidentally +// duplicated a scope would miss cache entries and trigger redundant +// RFC 7591 registrations. +// 3. Join with newlines (a character not valid in OAuth scope tokens per +// RFC 6749 §3.3) to avoid collision between e.g. ["ab", "c"] and +// ["a", "bc"]. +// +// nil and empty slice both canonicalise to the same hash. +func ScopesHash(scopes []string) string { + sorted := slices.Clone(scopes) + sort.Strings(sorted) + sorted = slices.Compact(sorted) + + h := sha256.New() + for i, s := range sorted { + if i > 0 { + _, _ = h.Write([]byte("\n")) + } + _, _ = h.Write([]byte(s)) + } + return hex.EncodeToString(h.Sum(nil)) +} + +// DCRCredentials is the persisted form of an RFC 7591 Dynamic Client +// Registration result. All fields are populated from the upstream's DCR +// response. The RFC 7592 management fields (RegistrationAccessToken, +// RegistrationClientURI) are preserved verbatim so future rotation / +// management flows can use them. +// +// # Defensive copy +// +// Callers receive a defensive copy from the store. Mutations on the returned +// value do not affect persisted state, and mutations on a value passed to +// StoreDCRCredentials are not observed by subsequent reads. This matches the +// UpstreamTokens contract. +// +// # Lifetime +// +// Entries are long-lived — RFC 7591 client registrations do not expire unless +// the upstream asserts client_secret_expires_at. The in-memory backend +// retains entries for the process lifetime and is intentionally excluded from +// the periodic cleanup loop. The Redis backend (future sub-issue) applies +// TTL via SetEX when ClientSecretExpiresAt is non-zero. +type DCRCredentials struct { + // Key is the canonical cache key: (Issuer, RedirectURI, ScopesHash). + Key DCRKey + + // ProviderName is the upstream's UpstreamRunConfig.Name. Debug / audit + // only — never used as a primary key. Two upstreams with different + // ProviderName but identical Key share one credential record. + ProviderName string + + ClientID string + ClientSecret string //nolint:gosec // G117: field legitimately holds sensitive data + TokenEndpointAuthMethod string + + // RegistrationAccessToken and RegistrationClientURI are RFC 7592 fields + // captured for future management operations (rotation, deletion). + RegistrationAccessToken string //nolint:gosec // G117: field legitimately holds sensitive data + RegistrationClientURI string + + AuthorizationEndpoint string + TokenEndpoint string + + // CreatedAt is the wall-clock time at which the registration completed. + // Used to compute staleness for observability — the cache itself does + // not expire entries based on age (see ClientSecretExpiresAt for the + // authoritative expiry signal). + CreatedAt time.Time + + // ClientSecretExpiresAt is the RFC 7591 §3.2.1 client_secret_expires_at + // value converted from int64 epoch seconds to time.Time. The wire + // convention is that 0 means "the secret does not expire"; this struct + // represents that as the zero time.Time so callers can use IsZero() + // rather than special-casing 0. + // + // When non-zero, this is the authoritative signal a backend uses to TTL + // the persisted entry: the Redis backend (sub-issue 2) plumbs it through + // SetEX so the row evicts before the upstream rejects the secret at the + // token endpoint. The in-memory backend ignores this field — entries + // persist for the process lifetime and the resolver re-checks the + // expiry on read. + ClientSecretExpiresAt time.Time +} + +// DCRCredentialStore is a narrow, segregated interface for persisting +// dynamic-client-registration credentials. Both MemoryStorage and a future +// Redis-backed store implement it; an authserver backed by Redis shares DCR +// credentials across replicas and restarts. +// +// # Cross-replica limitation +// +// Sharing DCR credentials does NOT imply cross-replica session / token +// delivery. Callers that need that must still route through the proxy runner +// and (if applicable) pin sessions to a replica. +// +// # Defensive copy +// +// Implementations MUST defensively copy on both Store and Get so caller +// mutations cannot reach persisted state and vice versa, mirroring the +// UpstreamTokens contract. +// +// # TTL handling +// +// Implementations SHOULD honor a non-zero DCRCredentials.ClientSecretExpiresAt +// as a backend-level TTL when the underlying store supports one (e.g. Redis +// SetEX) so an entry evicts before the upstream rejects the secret at the +// token endpoint. Backends without a native TTL (e.g. the in-memory backend) +// retain the field verbatim and rely on the caller — typically the runner's +// resolver — to re-check expiry on read; see MemoryStorage.GetDCRCredentials. +// A zero ClientSecretExpiresAt means the upstream did not assert an expiry +// and no TTL is applied. +type DCRCredentialStore interface { + // GetDCRCredentials returns the credentials for the given key. + // Returns ErrNotFound (wrapped) if no entry exists for the key. + // The returned value is a defensive copy. + GetDCRCredentials(ctx context.Context, key DCRKey) (*DCRCredentials, error) + + // StoreDCRCredentials persists the credentials, overwriting any existing + // entry for the same Key. See the interface-level "TTL handling" section + // for the contract on ClientSecretExpiresAt. + StoreDCRCredentials(ctx context.Context, creds *DCRCredentials) error +} + // User represents a user account in the authorization server. // A user can have multiple linked provider identities. // The User.ID is used as the "sub" claim in JWTs issued by ToolHive, From 96eac7b43385722c4e50f3cdaa7a343d035eb827 Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:33:11 -0700 Subject: [PATCH 2/7] Rename runner-side DCR cache to dcrResolutionCache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Eliminates the post-merge name collision between runner.DCRCredentialStore and storage.DCRCredentialStore. The two interfaces serve different purposes (runtime resolution cache vs persistent credential store), and the same-name collision in sibling packages forces every reader and grep search to mentally disambiguate during the Phase 3 migration. Renaming the runner-side interface to lowercase dcrResolutionCache keeps the canonical DCRCredentialStore name reserved for the storage package, which is where the future Redis backend will plug in. The rename is internal to pkg/authserver/runner — no external callers exist. Renamed symbols: - DCRCredentialStore -> dcrResolutionCache - inMemoryDCRCredentialStore -> inMemoryDCRResolutionCache - NewInMemoryDCRCredentialStore -> newInMemoryDCRResolutionCache - TestInMemoryDCRCredentialStore_* -> TestInMemoryDCRResolutionCache_* Addresses stacklok/toolhive#5186 review F2 (HIGH, score 9): naming collision with runner.DCRCredentialStore. --- pkg/authserver/runner/dcr.go | 10 ++-- pkg/authserver/runner/dcr_store.go | 38 ++++++++------ pkg/authserver/runner/dcr_store_test.go | 34 ++++++------ pkg/authserver/runner/dcr_test.go | 52 +++++++++---------- pkg/authserver/runner/embeddedauthserver.go | 8 +-- .../runner/embeddedauthserver_test.go | 4 +- 6 files changed, 76 insertions(+), 70 deletions(-) diff --git a/pkg/authserver/runner/dcr.go b/pkg/authserver/runner/dcr.go index b097bb7d5a..42a9c60fba 100644 --- a/pkg/authserver/runner/dcr.go +++ b/pkg/authserver/runner/dcr.go @@ -71,7 +71,7 @@ var authMethodPreference = []string{ // successful Dynamic Client Registration, together with the endpoints the // upstream advertises so the caller need not re-discover them. // -// The struct is the unit of storage in DCRCredentialStore and the unit of +// The struct is the unit of storage in dcrResolutionCache and the unit of // application via consumeResolution. type DCRResolution struct { // ClientID is the RFC 7591 "client_id" returned by the authorization @@ -323,7 +323,7 @@ func resolveDCRCredentials( ctx context.Context, rc *authserver.OAuth2UpstreamRunConfig, localIssuer string, - cache DCRCredentialStore, + cache dcrResolutionCache, ) (*DCRResolution, error) { if err := validateResolveInputs(rc, localIssuer, cache); err != nil { return nil, newDCRStepError(dcrStepValidate, localIssuer, "", err) @@ -393,7 +393,7 @@ func registerAndCache( localIssuer, redirectURI string, scopes []string, key DCRKey, - cache DCRCredentialStore, + cache dcrResolutionCache, ) (*DCRResolution, error) { // Recheck cache: another flight that just finished may have populated // it between our initial lookup and our singleflight entry. @@ -590,7 +590,7 @@ var queryStrippingPattern = regexp.MustCompile(`(?i)https?://[^\s"']+`) func validateResolveInputs( rc *authserver.OAuth2UpstreamRunConfig, localIssuer string, - cache DCRCredentialStore, + cache dcrResolutionCache, ) error { if rc == nil { return fmt.Errorf("oauth2 upstream run-config is required") @@ -634,7 +634,7 @@ func validateResolveInputs( // trigger. func lookupCachedResolution( ctx context.Context, - cache DCRCredentialStore, + cache dcrResolutionCache, key DCRKey, localIssuer, redirectURI string, ) (*DCRResolution, bool, error) { diff --git a/pkg/authserver/runner/dcr_store.go b/pkg/authserver/runner/dcr_store.go index 3fa1707057..6b4537968c 100644 --- a/pkg/authserver/runner/dcr_store.go +++ b/pkg/authserver/runner/dcr_store.go @@ -26,15 +26,21 @@ const dcrStaleAgeThreshold = 90 * 24 * time.Hour // backend; see storage.DCRKey for the field documentation. type DCRKey = storage.DCRKey -// DCRCredentialStore caches RFC 7591 Dynamic Client Registration resolutions +// dcrResolutionCache caches RFC 7591 Dynamic Client Registration resolutions // keyed by the (Issuer, RedirectURI, ScopesHash) tuple. Implementations must // be safe for concurrent use. // -// The store is an in-memory cache of long-lived registrations — it is not a -// durable store, and entries are never expired or evicted by the store -// itself. Callers are responsible for invalidating entries when the -// underlying registration is revoked (e.g., via RFC 7592 deregistration). -type DCRCredentialStore interface { +// This is a runner-internal cache of *DCRResolution values; it is distinct +// from the persistent storage.DCRCredentialStore (which holds *DCRCredentials +// and is the durable contract sub-issue 3 wires the resolver to use). Naming +// them differently keeps the two interfaces unambiguous to readers and grep +// tooling while both exist during the Phase 3 migration. +// +// The cache is in-memory and holds long-lived registrations — entries are +// never expired or evicted by the cache itself. Callers are responsible for +// invalidating entries when the underlying registration is revoked (e.g., +// via RFC 7592 deregistration). +type dcrResolutionCache interface { // Get returns the cached resolution for key, or (nil, false, nil) if the // key is not present. An error is returned only on backend failure. Get(ctx context.Context, key DCRKey) (*DCRResolution, bool, error) @@ -46,8 +52,8 @@ type DCRCredentialStore interface { Put(ctx context.Context, key DCRKey, resolution *DCRResolution) error } -// NewInMemoryDCRCredentialStore returns a thread-safe in-memory -// DCRCredentialStore intended for tests and single-replica development +// newInMemoryDCRResolutionCache returns a thread-safe in-memory +// dcrResolutionCache intended for tests and single-replica development // deployments. Production deployments should use the Redis-backed store // introduced in Phase 3, which addresses the cross-replica sharing, // durability, and cross-process coordination gaps documented below. @@ -77,23 +83,23 @@ type DCRCredentialStore interface { // that side, the loser becomes orphaned. The // resolveDCRCredentials-level singleflight in dcr.go only deduplicates // within one process. -func NewInMemoryDCRCredentialStore() DCRCredentialStore { - return &inMemoryDCRCredentialStore{ +func newInMemoryDCRResolutionCache() dcrResolutionCache { + return &inMemoryDCRResolutionCache{ entries: make(map[DCRKey]*DCRResolution), } } -// inMemoryDCRCredentialStore is the default DCRCredentialStore backed by a +// inMemoryDCRResolutionCache is the default dcrResolutionCache backed by a // plain map guarded by sync.RWMutex. Modelled on // pkg/authserver/storage/memory.go but stripped of TTL bookkeeping — DCR // resolutions are long-lived. -type inMemoryDCRCredentialStore struct { +type inMemoryDCRResolutionCache struct { mu sync.RWMutex entries map[DCRKey]*DCRResolution } -// Get implements DCRCredentialStore. -func (s *inMemoryDCRCredentialStore) Get(_ context.Context, key DCRKey) (*DCRResolution, bool, error) { +// Get implements dcrResolutionCache. +func (s *inMemoryDCRResolutionCache) Get(_ context.Context, key DCRKey) (*DCRResolution, bool, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -108,13 +114,13 @@ func (s *inMemoryDCRCredentialStore) Get(_ context.Context, key DCRKey) (*DCRRes return &cp, true, nil } -// Put implements DCRCredentialStore. +// Put implements dcrResolutionCache. // // A nil resolution is rejected rather than silently no-oped: a caller // passing nil would otherwise get a successful return, observe a miss on // the next Get, and have no error trail to debug from. Failing loudly at // the boundary makes such bugs visible at the first call. -func (s *inMemoryDCRCredentialStore) Put(_ context.Context, key DCRKey, resolution *DCRResolution) error { +func (s *inMemoryDCRResolutionCache) Put(_ context.Context, key DCRKey, resolution *DCRResolution) error { if resolution == nil { return fmt.Errorf("dcr: resolution must not be nil") } diff --git a/pkg/authserver/runner/dcr_store_test.go b/pkg/authserver/runner/dcr_store_test.go index b3209e8834..03184a811e 100644 --- a/pkg/authserver/runner/dcr_store_test.go +++ b/pkg/authserver/runner/dcr_store_test.go @@ -15,10 +15,10 @@ import ( "github.com/stretchr/testify/require" ) -func TestInMemoryDCRCredentialStore_PutGet_RoundTrip(t *testing.T) { +func TestInMemoryDCRResolutionCache_PutGet_RoundTrip(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() key := DCRKey{ @@ -51,10 +51,10 @@ func TestInMemoryDCRCredentialStore_PutGet_RoundTrip(t *testing.T) { assert.Equal(t, resolution.TokenEndpointAuthMethod, got.TokenEndpointAuthMethod) } -func TestInMemoryDCRCredentialStore_Get_MissingKey(t *testing.T) { +func TestInMemoryDCRResolutionCache_Get_MissingKey(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() got, ok, err := store.Get(ctx, DCRKey{Issuer: "https://unknown.example.com"}) @@ -63,10 +63,10 @@ func TestInMemoryDCRCredentialStore_Get_MissingKey(t *testing.T) { assert.Nil(t, got) } -func TestInMemoryDCRCredentialStore_DistinctKeysDoNotCollide(t *testing.T) { +func TestInMemoryDCRResolutionCache_DistinctKeysDoNotCollide(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() keyA := DCRKey{ @@ -111,10 +111,10 @@ func TestInMemoryDCRCredentialStore_DistinctKeysDoNotCollide(t *testing.T) { } } -func TestInMemoryDCRCredentialStore_Put_OverwritesExisting(t *testing.T) { +func TestInMemoryDCRResolutionCache_Put_OverwritesExisting(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x.example.com/cb"} @@ -127,14 +127,14 @@ func TestInMemoryDCRCredentialStore_Put_OverwritesExisting(t *testing.T) { assert.Equal(t, "second", got.ClientID) } -// TestInMemoryDCRCredentialStore_Put_RejectsNilResolution pins the +// TestInMemoryDCRResolutionCache_Put_RejectsNilResolution pins the // fail-loud-on-invalid-input contract: passing nil must error rather than // silently no-op. A silent no-op would leave the caller with a successful // Put followed by a Get miss and no debug trail to explain it. -func TestInMemoryDCRCredentialStore_Put_RejectsNilResolution(t *testing.T) { +func TestInMemoryDCRResolutionCache_Put_RejectsNilResolution(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x.example.com/cb"} @@ -148,10 +148,10 @@ func TestInMemoryDCRCredentialStore_Put_RejectsNilResolution(t *testing.T) { assert.False(t, ok, "rejected Put must not leave any entry behind") } -func TestInMemoryDCRCredentialStore_GetReturnsDefensiveCopy(t *testing.T) { +func TestInMemoryDCRResolutionCache_GetReturnsDefensiveCopy(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() ctx := context.Background() key := DCRKey{Issuer: "https://idp.example.com"} @@ -174,19 +174,19 @@ func TestInMemoryDCRCredentialStore_GetReturnsDefensiveCopy(t *testing.T) { // re-exercise the same code, so duplicating the suite here would be redundant // per .claude/rules/testing.md. -// TestInMemoryDCRCredentialStore_ConcurrentAccess fans out N goroutines +// TestInMemoryDCRResolutionCache_ConcurrentAccess fans out N goroutines // performing alternating Put / Get against overlapping and disjoint keys, -// exercising the sync.RWMutex guard advertised in the DCRCredentialStore +// exercising the sync.RWMutex guard advertised in the dcrResolutionCache // interface doc. With go test -race this catches any future change that // drops the lock or introduces a data race in the map access. // // The test is bounded by a fail-fast deadline so a regression that // deadlocks fails loudly with a clear message rather than hanging until // the global Go test timeout. -func TestInMemoryDCRCredentialStore_ConcurrentAccess(t *testing.T) { +func TestInMemoryDCRResolutionCache_ConcurrentAccess(t *testing.T) { t.Parallel() - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() const ( workers = 16 diff --git a/pkg/authserver/runner/dcr_test.go b/pkg/authserver/runner/dcr_test.go index 7be979aa45..81c6611a8d 100644 --- a/pkg/authserver/runner/dcr_test.go +++ b/pkg/authserver/runner/dcr_test.go @@ -139,7 +139,7 @@ func TestResolveDCRCredentials_CacheHitShortCircuits(t *testing.T) { })) t.Cleanup(server.Close) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL // Pre-populate the cache with a resolution matching the key we will @@ -187,7 +187,7 @@ func TestResolveDCRCredentials_RegistersOnCacheMiss(t *testing.T) { }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid", "profile"}, @@ -227,7 +227,7 @@ func TestResolveDCRCredentials_ExplicitEndpointsOverride(t *testing.T) { t.Parallel() server := newDCRTestServer(t, dcrTestHandlerConfig{}) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ @@ -262,7 +262,7 @@ func TestResolveDCRCredentials_InitialAccessTokenAsBearer(t *testing.T) { tokenPath := filepath.Join(t.TempDir(), "iat") require.NoError(t, os.WriteFile(tokenPath, []byte("iat-secret-value\n"), 0o600)) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -326,7 +326,7 @@ func TestResolveDCRCredentials_DoesNotForwardBearerOnRedirect(t *testing.T) { tokenPath := filepath.Join(t.TempDir(), "iat") require.NoError(t, os.WriteFile(tokenPath, []byte("iat-secret-value\n"), 0o600)) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := upstream.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -395,7 +395,7 @@ func TestResolveDCRCredentials_AuthMethodPreference(t *testing.T) { tokenEndpointAuthMethodsSupported: tc.supported, codeChallengeMethodsSupported: tc.codeChallenge, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -434,7 +434,7 @@ func TestResolveDCRCredentials_RefusesNoneWithoutS256(t *testing.T) { tokenEndpointAuthMethodsSupported: []string{"none"}, codeChallengeMethodsSupported: tc.codeChallenge, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -460,7 +460,7 @@ func TestResolveDCRCredentials_EmptyAuthMethodIntersectionErrors(t *testing.T) { server := newDCRTestServer(t, dcrTestHandlerConfig{ tokenEndpointAuthMethodsSupported: []string{"tls_client_auth"}, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -486,7 +486,7 @@ func TestResolveDCRCredentials_SynthesisedRegistrationEndpoint(t *testing.T) { gotPath = r.URL.Path }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -519,7 +519,7 @@ func TestResolveDCRCredentials_RegistrationEndpointDirectBypassesDiscovery(t *te server := httptest.NewServer(mux) t.Cleanup(server.Close) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ AuthorizationEndpoint: issuer + "/authorize", @@ -553,35 +553,35 @@ func TestResolveDCRCredentials_RejectsInvalidInputs(t *testing.T) { name string rc *authserver.OAuth2UpstreamRunConfig issuer string - cache DCRCredentialStore + cache dcrResolutionCache wantErrSub string }{ { name: "nil run-config", rc: nil, issuer: "https://example.com", - cache: NewInMemoryDCRCredentialStore(), + cache: newInMemoryDCRResolutionCache(), wantErrSub: "oauth2 upstream run-config is required", }, { name: "pre-provisioned client_id", rc: &authserver.OAuth2UpstreamRunConfig{ClientID: "preprovisioned", DCRConfig: validCfg}, issuer: "https://example.com", - cache: NewInMemoryDCRCredentialStore(), + cache: newInMemoryDCRResolutionCache(), wantErrSub: "pre-provisioned", }, { name: "missing dcr_config", rc: &authserver.OAuth2UpstreamRunConfig{}, issuer: "https://example.com", - cache: NewInMemoryDCRCredentialStore(), + cache: newInMemoryDCRResolutionCache(), wantErrSub: "no dcr_config", }, { name: "empty issuer", rc: &authserver.OAuth2UpstreamRunConfig{DCRConfig: validCfg}, issuer: "", - cache: NewInMemoryDCRCredentialStore(), + cache: newInMemoryDCRResolutionCache(), wantErrSub: "issuer is required", }, { @@ -781,7 +781,7 @@ func TestResolveDCRCredentials_DiscoveryURLHonoured(t *testing.T) { server = httptest.NewServer(mux) t.Cleanup(server.Close) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -822,7 +822,7 @@ func TestResolveDCRCredentials_DiscoveryURLIssuerMismatchRejected(t *testing.T) server := httptest.NewServer(mux) t.Cleanup(server.Close) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -849,7 +849,7 @@ func TestResolveDCRCredentials_DiscoveredScopesFallback(t *testing.T) { gotBody = body }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ // Scopes intentionally left empty so the resolver falls back to @@ -881,7 +881,7 @@ func TestResolveDCRCredentials_EmptyScopesOmitted(t *testing.T) { gotBody = body }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ DCRConfig: &authserver.DCRUpstreamConfig{ @@ -918,7 +918,7 @@ func TestResolveDCRCredentials_UpstreamIssuerDerivedFromDiscoveryURL(t *testing. server := newDCRTestServer(t, dcrTestHandlerConfig{ tokenEndpointAuthMethodsSupported: []string{"client_secret_basic"}, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() // Caller-supplied issuer names this auth server, NOT the upstream. // Production wiring always passes its own issuer here (see @@ -1009,14 +1009,14 @@ func TestDeriveExpectedIssuerFromDiscoveryURL(t *testing.T) { } } -// countingStore is a DCRCredentialStore decorator that counts the number of +// countingStore is a dcrResolutionCache decorator that counts the number of // Get calls that returned a hit. The singleflight coalescing test uses it // to assert that no concurrent caller observed a cache hit during the run: // a hit during the test would mean a goroutine raced past the gate, took // the cache-lookup short-circuit instead of joining the singleflight, and // silently weakened the test's coverage. type countingStore struct { - inner DCRCredentialStore + inner dcrResolutionCache hits atomic.Int32 } @@ -1060,7 +1060,7 @@ func TestResolveDCRCredentials_SingleflightCoalescesConcurrentCallers(t *testing }, }) - cache := &countingStore{inner: NewInMemoryDCRCredentialStore()} + cache := &countingStore{inner: newInMemoryDCRResolutionCache()} issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid", "profile"}, @@ -1471,7 +1471,7 @@ func TestResolveDCRCredentials_RefetchesOnExpiredCachedSecret(t *testing.T) { }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -1526,7 +1526,7 @@ func TestResolveDCRCredentials_HonoursFutureExpiryAndZero(t *testing.T) { atomic.AddInt32(®istrationCalls, 1) }, }) - cache := NewInMemoryDCRCredentialStore() + cache := newInMemoryDCRResolutionCache() issuer := server.URL rc := &authserver.OAuth2UpstreamRunConfig{ Scopes: []string{"openid"}, @@ -1677,7 +1677,7 @@ func TestDcrStepError(t *testing.T) { // Precondition failure → dcrStepValidate. _, err := resolveDCRCredentials(context.Background(), nil, "https://as", - NewInMemoryDCRCredentialStore()) + newInMemoryDCRResolutionCache()) require.Error(t, err) var stepErr *dcrStepError require.True(t, errors.As(err, &stepErr)) diff --git a/pkg/authserver/runner/embeddedauthserver.go b/pkg/authserver/runner/embeddedauthserver.go index c01d55c34a..963e16e26a 100644 --- a/pkg/authserver/runner/embeddedauthserver.go +++ b/pkg/authserver/runner/embeddedauthserver.go @@ -53,11 +53,11 @@ type EmbeddedAuthServer struct { // the flight prevents N concurrent /register calls during a thundering // herd. The asymmetry is by design. // - // DCRCredentialStore has no Close method today because the in-memory + // dcrResolutionCache has no Close method today because the in-memory // implementation needs no release. A future Phase 3 backend (Redis, // sqlite) with handles will need Close added to the interface and // invoked from EmbeddedAuthServer.Close. - dcrStore DCRCredentialStore + dcrStore dcrResolutionCache closeOnce sync.Once closeErr error } @@ -94,7 +94,7 @@ func NewEmbeddedAuthServer(ctx context.Context, cfg *authserver.RunConfig) (*Emb // 4. Build upstream configurations (resolves DCR credentials for any // upstream configured with DCRConfig, caching resolutions in dcrStore). - dcrStore := NewInMemoryDCRCredentialStore() + dcrStore := newInMemoryDCRResolutionCache() upstreams, err := buildUpstreamConfigs(ctx, cfg.Upstreams, cfg.Issuer, dcrStore) if err != nil { return nil, fmt.Errorf("failed to build upstream configs: %w", err) @@ -312,7 +312,7 @@ func buildUpstreamConfigs( ctx context.Context, runConfigs []authserver.UpstreamRunConfig, issuer string, - dcrStore DCRCredentialStore, + dcrStore dcrResolutionCache, ) ([]authserver.UpstreamConfig, error) { configs := make([]authserver.UpstreamConfig, 0, len(runConfigs)) diff --git a/pkg/authserver/runner/embeddedauthserver_test.go b/pkg/authserver/runner/embeddedauthserver_test.go index 0ae2a20375..228191f0d5 100644 --- a/pkg/authserver/runner/embeddedauthserver_test.go +++ b/pkg/authserver/runner/embeddedauthserver_test.go @@ -1526,7 +1526,7 @@ func TestBuildUpstreamConfigs_DCR(t *testing.T) { AllowedAudiences: []string{"https://mcp.example.com"}, } - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() got, err := buildUpstreamConfigs(context.Background(), cfg.Upstreams, cfg.Issuer, store) require.NoError(t, err) require.Len(t, got, 1) @@ -1588,7 +1588,7 @@ func TestBuildUpstreamConfigs_DCR(t *testing.T) { AllowedAudiences: []string{"https://mcp.example.com"}, } - store := NewInMemoryDCRCredentialStore() + store := newInMemoryDCRResolutionCache() // First call: populates the store. _, err := buildUpstreamConfigs(context.Background(), cfg.Upstreams, cfg.Issuer, store) From 29d1c917abfd16ee4a317c4c9888b985c3cc0347 Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:34:47 -0700 Subject: [PATCH 3/7] Inline storage.ScopesHash at runner call sites Replace `var scopesHash = storage.ScopesHash` with direct calls to storage.ScopesHash. The variable was a mutable runtime binding (any package code could reassign it, no compile-time guard) and an extra indirection layer for tooling and refactors. Qualifying call sites makes the storage dependency visible and matches the type-alias strategy already used for DCRKey in dcr_store.go. Addresses stacklok/toolhive#5186 review F3 (MEDIUM, score 8): mutable runtime binding, not a true alias. --- pkg/authserver/runner/dcr.go | 9 +-------- pkg/authserver/runner/dcr_store_test.go | 17 +++++++++-------- pkg/authserver/runner/dcr_test.go | 5 +++-- .../runner/embeddedauthserver_test.go | 4 ++-- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/pkg/authserver/runner/dcr.go b/pkg/authserver/runner/dcr.go index 42a9c60fba..a850e5fa65 100644 --- a/pkg/authserver/runner/dcr.go +++ b/pkg/authserver/runner/dcr.go @@ -227,13 +227,6 @@ func applyResolutionToOAuth2Config(cfg *upstream.OAuth2Config, res *DCRResolutio cfg.ClientSecret = res.ClientSecret } -// scopesHash is a runner-package shorthand for storage.ScopesHash, kept so the -// resolver and its tests can reference the canonical hash function without an -// explicit storage. qualifier on every call site. The canonical implementation -// lives in the storage package next to DCRKey so any future backend hashes -// keys identically. -var scopesHash = storage.ScopesHash - // Step identifiers for structured error logs emitted by the caller of // resolveDCRCredentials. These values flow through the "step" attribute so // operators can narrow failures to a specific phase without parsing error @@ -339,7 +332,7 @@ func resolveDCRCredentials( key := DCRKey{ Issuer: localIssuer, RedirectURI: redirectURI, - ScopesHash: scopesHash(scopes), + ScopesHash: storage.ScopesHash(scopes), } // Cache lookup short-circuits before any network I/O. diff --git a/pkg/authserver/runner/dcr_store_test.go b/pkg/authserver/runner/dcr_store_test.go index 03184a811e..40668879ed 100644 --- a/pkg/authserver/runner/dcr_store_test.go +++ b/pkg/authserver/runner/dcr_store_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/stacklok/toolhive/pkg/authserver/storage" ) func TestInMemoryDCRResolutionCache_PutGet_RoundTrip(t *testing.T) { @@ -24,7 +26,7 @@ func TestInMemoryDCRResolutionCache_PutGet_RoundTrip(t *testing.T) { key := DCRKey{ Issuer: "https://idp.example.com", RedirectURI: "https://toolhive.example.com/oauth/callback", - ScopesHash: scopesHash([]string{"openid", "profile"}), + ScopesHash: storage.ScopesHash([]string{"openid", "profile"}), } resolution := &DCRResolution{ ClientID: "client-abc", @@ -72,22 +74,22 @@ func TestInMemoryDCRResolutionCache_DistinctKeysDoNotCollide(t *testing.T) { keyA := DCRKey{ Issuer: "https://idp-a.example.com", RedirectURI: "https://toolhive.example.com/oauth/callback", - ScopesHash: scopesHash([]string{"openid"}), + ScopesHash: storage.ScopesHash([]string{"openid"}), } keyB := DCRKey{ Issuer: "https://idp-b.example.com", RedirectURI: "https://toolhive.example.com/oauth/callback", - ScopesHash: scopesHash([]string{"openid"}), + ScopesHash: storage.ScopesHash([]string{"openid"}), } keyC := DCRKey{ Issuer: "https://idp-a.example.com", RedirectURI: "https://other.example.com/callback", - ScopesHash: scopesHash([]string{"openid"}), + ScopesHash: storage.ScopesHash([]string{"openid"}), } keyD := DCRKey{ Issuer: "https://idp-a.example.com", RedirectURI: "https://toolhive.example.com/oauth/callback", - ScopesHash: scopesHash([]string{"openid", "email"}), + ScopesHash: storage.ScopesHash([]string{"openid", "email"}), } require.NoError(t, store.Put(ctx, keyA, &DCRResolution{ClientID: "a"})) @@ -170,9 +172,8 @@ func TestInMemoryDCRResolutionCache_GetReturnsDefensiveCopy(t *testing.T) { // Tests for the canonical scopes-hash form live next to the canonical // implementation in pkg/authserver/storage/memory_test.go (TestScopesHash_*). -// The runner-package binding `scopesHash = storage.ScopesHash` would only -// re-exercise the same code, so duplicating the suite here would be redundant -// per .claude/rules/testing.md. +// Duplicating the suite here would re-exercise the same code, which is +// redundant per .claude/rules/testing.md. // TestInMemoryDCRResolutionCache_ConcurrentAccess fans out N goroutines // performing alternating Put / Get against overlapping and disjoint keys, diff --git a/pkg/authserver/runner/dcr_test.go b/pkg/authserver/runner/dcr_test.go index 81c6611a8d..5711a6ffb0 100644 --- a/pkg/authserver/runner/dcr_test.go +++ b/pkg/authserver/runner/dcr_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stacklok/toolhive/pkg/authserver" + "github.com/stacklok/toolhive/pkg/authserver/storage" "github.com/stacklok/toolhive/pkg/oauthproto" ) @@ -148,7 +149,7 @@ func TestResolveDCRCredentials_CacheHitShortCircuits(t *testing.T) { key := DCRKey{ Issuer: issuer, RedirectURI: redirectURI, - ScopesHash: scopesHash([]string{"openid", "profile"}), + ScopesHash: storage.ScopesHash([]string{"openid", "profile"}), } preloaded := &DCRResolution{ ClientID: "preloaded-id", @@ -217,7 +218,7 @@ func TestResolveDCRCredentials_RegistersOnCacheMiss(t *testing.T) { // Cache was populated. cached, ok, err := cache.Get(context.Background(), - DCRKey{Issuer: issuer, RedirectURI: issuer + "/oauth/callback", ScopesHash: scopesHash([]string{"openid", "profile"})}) + DCRKey{Issuer: issuer, RedirectURI: issuer + "/oauth/callback", ScopesHash: storage.ScopesHash([]string{"openid", "profile"})}) require.NoError(t, err) require.True(t, ok) assert.Equal(t, "test-client-id", cached.ClientID) diff --git a/pkg/authserver/runner/embeddedauthserver_test.go b/pkg/authserver/runner/embeddedauthserver_test.go index 228191f0d5..4a051973e9 100644 --- a/pkg/authserver/runner/embeddedauthserver_test.go +++ b/pkg/authserver/runner/embeddedauthserver_test.go @@ -1541,7 +1541,7 @@ func TestBuildUpstreamConfigs_DCR(t *testing.T) { key := DCRKey{ Issuer: server.URL, RedirectURI: redirectURI, - ScopesHash: scopesHash([]string{"openid", "profile"}), + ScopesHash: storage.ScopesHash([]string{"openid", "profile"}), } cached, ok, err := store.Get(context.Background(), key) require.NoError(t, err) @@ -1676,7 +1676,7 @@ func TestNewEmbeddedAuthServer_DCRBoot(t *testing.T) { key := DCRKey{ Issuer: server.URL, RedirectURI: redirectURI, - ScopesHash: scopesHash([]string{"openid", "profile"}), + ScopesHash: storage.ScopesHash([]string{"openid", "profile"}), } cached, ok, err := embed.dcrStore.Get(context.Background(), key) require.NoError(t, err) From 012e2b308fb4b0428f7b871b89f4f7f8e3cc22a2 Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:46:56 -0700 Subject: [PATCH 4/7] Tighten StoreDCRCredentials validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reject empty Key.ScopesHash, ClientID, AuthorizationEndpoint, and TokenEndpoint with fosite.ErrInvalidRequest, matching the broader "required field" contract documented on the DCRCredentials struct. The previous validation only checked Key.Issuer and Key.RedirectURI, which silently accepted partial registrations: a caller forgetting to populate ScopesHash would Store under the empty-string slot while a sibling that did compute ScopesHash(nil) (a non-empty digest) would land on a different cache cell, splitting one logical record across two slots. Likewise, accepting an empty ClientID, AuthorizationEndpoint, or TokenEndpoint would Store a working-looking record that fails downstream at the upstream's token endpoint with no audit trail. ClientSecret is left permissive because RFC 7591 §2 public clients (token_endpoint_auth_method = "none") legitimately have no secret. Tests: replaced the three-case nil/issuer/redirect_uri table with a seven-case table that constructs a fully valid DCRCredentials and isolates one missing field per subtest; updated the existing RoundTrip / DistinctKeys / Overwrite / DefensiveCopy / StoreCopyIsolates / ExcludedFromCleanupExpired tests to populate the newly-required fields so they exercise the success path under the tightened contract. Addresses stacklok/toolhive#5186 review F4 (MEDIUM, score 9): validation narrower than the documented contract. --- pkg/authserver/storage/memory.go | 26 ++++- pkg/authserver/storage/memory_test.go | 148 +++++++++++++++++++++----- 2 files changed, 142 insertions(+), 32 deletions(-) diff --git a/pkg/authserver/storage/memory.go b/pkg/authserver/storage/memory.go index 2f69de391e..0b4796986b 100644 --- a/pkg/authserver/storage/memory.go +++ b/pkg/authserver/storage/memory.go @@ -1224,10 +1224,16 @@ func cloneDCRCredentials(c *DCRCredentials) *DCRCredentials { // retained verbatim for callers to re-check on read (see the interface // docstring's "TTL handling" section). // -// Rejects nil creds and an unpopulated Key (empty Issuer or empty -// RedirectURI). An empty ScopesHash is permitted because ScopesHash("") -// produces a non-empty canonical digest, and the empty-scope case -// (no scopes configured or discovered) is a real, valid registration. +// Validation rejects nil creds, an unpopulated Key (empty Issuer, +// RedirectURI, or ScopesHash), and missing RFC 7591 mandatory response +// fields (ClientID, AuthorizationEndpoint, TokenEndpoint). An empty +// ScopesHash is rejected because the canonical digest of any scope set — +// including the empty-scope set via ScopesHash(nil) — is non-empty, so an +// empty string can only be a caller bug; accepting it would silently +// route a forgotten-hash record to a different cache slot than a sibling +// caller that did compute ScopesHash. ClientSecret is left permissive +// because RFC 7591 §2 public clients (auth method "none") legitimately +// register without a secret. func (s *MemoryStorage) StoreDCRCredentials(_ context.Context, creds *DCRCredentials) error { if creds == nil { return fosite.ErrInvalidRequest.WithHint("dcr credentials cannot be nil") @@ -1238,6 +1244,18 @@ func (s *MemoryStorage) StoreDCRCredentials(_ context.Context, creds *DCRCredent if creds.Key.RedirectURI == "" { return fosite.ErrInvalidRequest.WithHint("dcr credentials key redirect_uri cannot be empty") } + if creds.Key.ScopesHash == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials key scopes_hash cannot be empty") + } + if creds.ClientID == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials client_id cannot be empty") + } + if creds.AuthorizationEndpoint == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials authorization_endpoint cannot be empty") + } + if creds.TokenEndpoint == "" { + return fosite.ErrInvalidRequest.WithHint("dcr credentials token_endpoint cannot be empty") + } s.mu.Lock() defer s.mu.Unlock() diff --git a/pkg/authserver/storage/memory_test.go b/pkg/authserver/storage/memory_test.go index 47ff0a17f3..307fe3b100 100644 --- a/pkg/authserver/storage/memory_test.go +++ b/pkg/authserver/storage/memory_test.go @@ -1759,11 +1759,19 @@ func TestMemoryStorage_DCRCredentials_DistinctKeysDoNotCollide(t *testing.T) { ScopesHash: ScopesHash(scopes), } } + mkCreds := func(key DCRKey, clientID string) *DCRCredentials { + return &DCRCredentials{ + Key: key, + ClientID: clientID, + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + } + } entries := []*DCRCredentials{ - {Key: mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid"}), ClientID: "a"}, - {Key: mkKey("https://idp-b.example.com", "https://x/cb", []string{"openid"}), ClientID: "b"}, - {Key: mkKey("https://idp-a.example.com", "https://y/cb", []string{"openid"}), ClientID: "c"}, - {Key: mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid", "email"}), ClientID: "d"}, + mkCreds(mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid"}), "a"), + mkCreds(mkKey("https://idp-b.example.com", "https://x/cb", []string{"openid"}), "b"), + mkCreds(mkKey("https://idp-a.example.com", "https://y/cb", []string{"openid"}), "c"), + mkCreds(mkKey("https://idp-a.example.com", "https://x/cb", []string{"openid", "email"}), "d"), } for _, e := range entries { require.NoError(t, s.StoreDCRCredentials(ctx, e)) @@ -1780,10 +1788,22 @@ func TestMemoryStorage_DCRCredentials_DistinctKeysDoNotCollide(t *testing.T) { func TestMemoryStorage_DCRCredentials_OverwriteSemantics(t *testing.T) { withStorage(t, func(ctx context.Context, s *MemoryStorage) { - key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + key := DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://x/cb", + ScopesHash: ScopesHash([]string{"openid"}), + } + mkCreds := func(clientID string) *DCRCredentials { + return &DCRCredentials{ + Key: key, + ClientID: clientID, + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + } + } - require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "first"})) - require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "second"})) + require.NoError(t, s.StoreDCRCredentials(ctx, mkCreds("first"))) + require.NoError(t, s.StoreDCRCredentials(ctx, mkCreds("second"))) got, err := s.GetDCRCredentials(ctx, key) require.NoError(t, err) @@ -1799,31 +1819,79 @@ func TestMemoryStorage_DCRCredentials_NotFound(t *testing.T) { } // TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected pins the -// fail-loud-on-invalid-input contract: nil creds, an empty Key.Issuer, or -// an empty Key.RedirectURI must be rejected with fosite.ErrInvalidRequest -// rather than producing a working-looking write under a partially-populated -// key. +// fail-loud-on-invalid-input contract: nil creds, an unpopulated Key +// (empty Issuer, RedirectURI, or ScopesHash), and missing RFC 7591 +// mandatory response fields (ClientID, AuthorizationEndpoint, +// TokenEndpoint) must be rejected with fosite.ErrInvalidRequest rather +// than producing a working-looking write that fails downstream at the +// upstream's token endpoint. func TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected(t *testing.T) { t.Parallel() + // validCreds returns a fully-populated DCRCredentials that subtests + // mutate to isolate a single missing field. Keeping every other field + // valid ensures the assertion proves which field was rejected. + validCreds := func() *DCRCredentials { + return &DCRCredentials{ + Key: DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://x/cb", + ScopesHash: ScopesHash([]string{"openid"}), + }, + ClientID: "abc", + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + } + } + tests := []struct { - name string - creds *DCRCredentials + name string + mutator func(*DCRCredentials) *DCRCredentials }{ { - name: "nil creds", - creds: nil, + name: "nil creds", + mutator: func(*DCRCredentials) *DCRCredentials { return nil }, }, { name: "empty issuer", - creds: &DCRCredentials{ - Key: DCRKey{Issuer: "", RedirectURI: "https://x/cb"}, + mutator: func(c *DCRCredentials) *DCRCredentials { + c.Key.Issuer = "" + return c }, }, { name: "empty redirect_uri", - creds: &DCRCredentials{ - Key: DCRKey{Issuer: "https://idp.example.com", RedirectURI: ""}, + mutator: func(c *DCRCredentials) *DCRCredentials { + c.Key.RedirectURI = "" + return c + }, + }, + { + name: "empty scopes_hash", + mutator: func(c *DCRCredentials) *DCRCredentials { + c.Key.ScopesHash = "" + return c + }, + }, + { + name: "empty client_id", + mutator: func(c *DCRCredentials) *DCRCredentials { + c.ClientID = "" + return c + }, + }, + { + name: "empty authorization_endpoint", + mutator: func(c *DCRCredentials) *DCRCredentials { + c.AuthorizationEndpoint = "" + return c + }, + }, + { + name: "empty token_endpoint", + mutator: func(c *DCRCredentials) *DCRCredentials { + c.TokenEndpoint = "" + return c }, }, } @@ -1831,7 +1899,7 @@ func TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { withStorage(t, func(ctx context.Context, s *MemoryStorage) { - err := s.StoreDCRCredentials(ctx, tc.creds) + err := s.StoreDCRCredentials(ctx, tc.mutator(validCreds())) require.Error(t, err) assert.ErrorIs(t, err, fosite.ErrInvalidRequest) // Confirm the rejection did not partially populate the store. @@ -1847,8 +1915,17 @@ func TestMemoryStorage_DCRCredentials_StoreInvalidInputRejected(t *testing.T) { // affect persisted state. func TestMemoryStorage_DCRCredentials_GetReturnsDefensiveCopy(t *testing.T) { withStorage(t, func(ctx context.Context, s *MemoryStorage) { - key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} - require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{Key: key, ClientID: "orig"})) + key := DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://x/cb", + ScopesHash: ScopesHash([]string{"openid"}), + } + require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{ + Key: key, + ClientID: "orig", + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + })) got, err := s.GetDCRCredentials(ctx, key) require.NoError(t, err) @@ -1865,8 +1942,17 @@ func TestMemoryStorage_DCRCredentials_GetReturnsDefensiveCopy(t *testing.T) { // Store must not affect persisted state. func TestMemoryStorage_DCRCredentials_StoreCopyIsolatesCaller(t *testing.T) { withStorage(t, func(ctx context.Context, s *MemoryStorage) { - key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} - input := &DCRCredentials{Key: key, ClientID: "orig"} + key := DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://x/cb", + ScopesHash: ScopesHash([]string{"openid"}), + } + input := &DCRCredentials{ + Key: key, + ClientID: "orig", + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + } require.NoError(t, s.StoreDCRCredentials(ctx, input)) input.ClientID = "tampered-after-store" @@ -1883,11 +1969,17 @@ func TestMemoryStorage_DCRCredentials_StoreCopyIsolatesCaller(t *testing.T) { // process lifetime. func TestMemoryStorage_DCRCredentials_ExcludedFromCleanupExpired(t *testing.T) { withStorage(t, func(ctx context.Context, s *MemoryStorage) { - key := DCRKey{Issuer: "https://idp.example.com", RedirectURI: "https://x/cb"} + key := DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://x/cb", + ScopesHash: ScopesHash([]string{"openid"}), + } require.NoError(t, s.StoreDCRCredentials(ctx, &DCRCredentials{ - Key: key, - ClientID: "abc", - CreatedAt: time.Now().Add(-365 * 24 * time.Hour), + Key: key, + ClientID: "abc", + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + CreatedAt: time.Now().Add(-365 * 24 * time.Hour), })) s.cleanupExpired() From 22ab153e2d8e8a0c9ac0d839f723fe92232a99e8 Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:47:47 -0700 Subject: [PATCH 5/7] Correct DCR-cache bounding-claim comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous comments asserted the cache is "bounded by the operator-configured upstream count". This holds only when (Issuer, RedirectURI, ScopesHash) is invariant per upstream over the process lifetime. ScopesHash changes whenever the operator widens or narrows the scope set on MCPServer.Spec.OAuthConfig.Scopes, or when an upstream's RFC 8414 scopes_supported rotates and the resolver re-derives the scope set. Each distinct ScopesHash produces a fresh entry; nothing evicts the prior one (cleanupExpired deliberately skips this map), so in a long-lived process with rotating scope sets growth is upstream count × distinct-scope-sets-ever-registered, not upstream count alone. Each retained entry holds a ClientSecret, so this is also a (slow) sensitive-data accumulation. The Redis backend's SetEX TTL mitigates this in production. Updated parallel comments at memory.go:97 and runner/dcr_store.go:61 to state the actual bound and the production mitigation. Addresses stacklok/toolhive#5186 review F5 (MEDIUM, score 8): "bounded by upstream count" claim incorrect when scopes change. --- pkg/authserver/runner/dcr_store.go | 12 ++++++++---- pkg/authserver/storage/memory.go | 9 +++++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/pkg/authserver/runner/dcr_store.go b/pkg/authserver/runner/dcr_store.go index 6b4537968c..07ef0a2985 100644 --- a/pkg/authserver/runner/dcr_store.go +++ b/pkg/authserver/runner/dcr_store.go @@ -59,10 +59,14 @@ type dcrResolutionCache interface { // durability, and cross-process coordination gaps documented below. // // Entries are retained for the process lifetime; there is no TTL and no -// background cleanup goroutine. The usual concern about an unbounded -// cache leaking memory does not apply here because the key space is -// bounded by the operator-configured upstream count, and this -// implementation is not the production answer. +// background cleanup goroutine. Growth is bounded by upstream count × +// distinct scope sets ever registered for each upstream during the +// process lifetime — for a stable configuration this collapses to the +// upstream count, but rotating scope sets (operator-driven scope +// changes, or upstream scopes_supported rotations re-derived by the +// resolver) accumulate stale entries that survive until restart. This +// implementation is not the production answer; the Redis backend +// introduced in Phase 3 mitigates the rotation case via SetEX TTL. // // What this enables: serialises Get/Put against a single in-process map so // concurrent callers within one authserver process see a consistent view of diff --git a/pkg/authserver/storage/memory.go b/pkg/authserver/storage/memory.go index 0b4796986b..584c4d3a8a 100644 --- a/pkg/authserver/storage/memory.go +++ b/pkg/authserver/storage/memory.go @@ -99,8 +99,13 @@ type MemoryStorage struct { // periodic cleanupExpired loop: DCR registrations are long-lived and the // authoritative expiry signal is RFC 7591 client_secret_expires_at, which // is honored at read time by callers (and by the future Redis backend's - // SetEX TTL). The map is bounded by the operator-configured upstream - // count, so unbounded growth is not a concern. + // SetEX TTL). Growth is bounded by upstream count × distinct scope sets + // ever registered for each upstream during the process lifetime; for a + // stable configuration this collapses to the upstream count, but rotating + // scope sets (operator-driven scope changes, or upstream + // scopes_supported rotations re-derived by the resolver) accumulate + // stale entries that survive until process restart. The Redis backend's + // SetEX TTL mitigates this in production deployments. dcrCredentials map[DCRKey]*DCRCredentials // cleanupInterval is how often the background cleanup runs From 72337f47c150504c8d8933f495ce465d42001a4b Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:48:21 -0700 Subject: [PATCH 6/7] Document why DCRKey is embedded in DCRCredentials MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DCRCredentialStore interface takes (ctx, creds) on Store rather than the (ctx, key, value) shape used by sibling methods on Storage. The asymmetry is intentional — embedding DCRKey in DCRCredentials.Key makes the persisted blob self-describing, so a Redis SCAN, an admin dump, or a cross-replica reconciliation path can identify a record's logical cache slot from the value alone — but it diverges from the package's "key + value" convention enough that future readers will ask why. Add a "Why the key is embedded" section to the interface doc that states the rationale and points at the Memory backend's validation contract for the populate-Key requirement. Addresses stacklok/toolhive#5186 review F6 (MEDIUM, score 7): signature/naming inconsistent with sibling Storage methods. The reviewer offered two paths — rename to GetDCR/StoreDCR + drop Key, or document the rationale. Documenting was selected because the embedded-Key shape is load-bearing for the Phase 3 Redis backend. --- pkg/authserver/storage/types.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pkg/authserver/storage/types.go b/pkg/authserver/storage/types.go index 3c422c3085..19231212f3 100644 --- a/pkg/authserver/storage/types.go +++ b/pkg/authserver/storage/types.go @@ -260,6 +260,19 @@ type DCRCredentials struct { // resolver — to re-check expiry on read; see MemoryStorage.GetDCRCredentials. // A zero ClientSecretExpiresAt means the upstream did not assert an expiry // and no TTL is applied. +// +// # Why the key is embedded in DCRCredentials +// +// StoreDCRCredentials takes a single (ctx, creds) argument rather than the +// (ctx, key, value) shape used by sibling Store* methods on Storage. The +// DCRKey is embedded as DCRCredentials.Key so the persisted blob is +// self-describing: a Redis SCAN, an admin-tool dump, or a cross-replica +// reconciliation path can identify a record's logical cache slot +// (Issuer, RedirectURI, ScopesHash) from the value alone, without +// reconstructing it from a separately-passed key. This is a deliberate +// asymmetry with the rest of the package — callers must populate creds.Key +// before Store, and implementations validate it (see MemoryStorage docs +// for the rejected-input list). type DCRCredentialStore interface { // GetDCRCredentials returns the credentials for the given key. // Returns ErrNotFound (wrapped) if no entry exists for the key. From 409a66e098d03a6b6ea8cd0d3dec0ba0ba5be20f Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 6 May 2026 09:50:07 -0700 Subject: [PATCH 7/7] Add concurrent-access test for DCR Store/Get methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The existing TestMemoryStorage_ConcurrentAccess fan-out covers authcodes, tokens, and clients but not the new DCRCredentialStore methods, and every other DCR test runs single-goroutine. A regression that dropped the RWMutex or returned an internal pointer instead of a defensive copy would not be caught by the suite under -race. Add a dedicated TestMemoryStorage_DCRCredentials_ConcurrentAccess that fans out 16 workers × 200 ops, alternating Store / Get against an overlapping key space (lock must serialise writes) and a disjoint key space (reads must always hit). Bounded by a 10s fail-fast deadline so a deadlock regression surfaces with a clear message rather than the global test timeout. Mirrors the runner-side TestInMemoryDCRResolutionCache_ConcurrentAccess pattern. Addresses stacklok/toolhive#5186 review F7 (LOW-MEDIUM, score 7): missing concurrent-access test for new DCR Store/Get methods. --- pkg/authserver/storage/memory_test.go | 85 +++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/pkg/authserver/storage/memory_test.go b/pkg/authserver/storage/memory_test.go index 307fe3b100..b2cd204aa6 100644 --- a/pkg/authserver/storage/memory_test.go +++ b/pkg/authserver/storage/memory_test.go @@ -23,6 +23,7 @@ import ( "fmt" "net/url" "sync" + "sync/atomic" "testing" "time" @@ -1990,6 +1991,90 @@ func TestMemoryStorage_DCRCredentials_ExcludedFromCleanupExpired(t *testing.T) { }) } +// TestMemoryStorage_DCRCredentials_ConcurrentAccess fans out N goroutines +// performing alternating StoreDCRCredentials / GetDCRCredentials against +// overlapping and disjoint keys, exercising the sync.RWMutex guard +// advertised in the DCRCredentialStore contract. With go test -race this +// catches a future change that drops the lock or returns an internal +// pointer instead of a defensive copy. +// +// The test is bounded by a fail-fast deadline so a regression that +// deadlocks fails loudly rather than hanging until the global Go test +// timeout, per .claude/rules/testing.md. +func TestMemoryStorage_DCRCredentials_ConcurrentAccess(t *testing.T) { + withStorage(t, func(ctx context.Context, s *MemoryStorage) { + const ( + workers = 16 + opsPerWorker = 200 + ) + + // Two key spaces: overlapping (every worker writes the same keys, so + // the lock must serialise their writes) and disjoint (each worker has + // its own key space, so reads never see another worker's writes). + overlappingKey := func(i int) DCRKey { + return DCRKey{ + Issuer: "https://idp.example.com", + RedirectURI: "https://thv.example.com/oauth/callback", + ScopesHash: fmt.Sprintf("overlap-%d", i%4), + } + } + disjointKey := func(worker, i int) DCRKey { + return DCRKey{ + Issuer: fmt.Sprintf("https://idp-%d.example.com", worker), + RedirectURI: "https://thv.example.com/oauth/callback", + ScopesHash: fmt.Sprintf("disjoint-%d", i), + } + } + mkCreds := func(key DCRKey, clientID string) *DCRCredentials { + return &DCRCredentials{ + Key: key, + ClientID: clientID, + AuthorizationEndpoint: "https://idp.example.com/auth", + TokenEndpoint: "https://idp.example.com/token", + } + } + + var errCount int32 + var wg sync.WaitGroup + wg.Add(workers) + for w := 0; w < workers; w++ { + go func(worker int) { + defer wg.Done() + for i := 0; i < opsPerWorker; i++ { + var key DCRKey + if i%2 == 0 { + key = overlappingKey(i) + } else { + key = disjointKey(worker, i) + } + if err := s.StoreDCRCredentials(ctx, mkCreds(key, fmt.Sprintf("worker-%d-op-%d", worker, i))); err != nil { + atomic.AddInt32(&errCount, 1) + } + // The disjoint Get must always hit (the goroutine that + // just wrote owns this key); the overlapping Get may + // miss if another worker happens to be mid-Store, but + // that's not an error. + if _, err := s.GetDCRCredentials(ctx, key); err != nil && i%2 != 0 { + atomic.AddInt32(&errCount, 1) + } + } + }(w) + } + + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for concurrent DCR Store/Get to finish; possible deadlock") + } + + assert.Zero(t, atomic.LoadInt32(&errCount), + "no Store or disjoint-Get should have errored under concurrent access") + }) +} + // TestScopesHash_StableAcrossPermutationAndDuplicates pins the canonical-form // invariants of ScopesHash. func TestScopesHash_StableAcrossPermutationAndDuplicates(t *testing.T) {