-
Notifications
You must be signed in to change notification settings - Fork 695
fix: resolve 5 memory leak issues causing heap OOM (Issue #598) #603
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -213,6 +213,9 @@ export function computeEffectiveHalfLife( | |
| */ | ||
| export class AccessTracker { | ||
| private readonly pending: Map<string, number> = new Map(); | ||
| // Tracks retry count per ID so that delta is never amplified across failures. | ||
| private readonly _retryCount = new Map<string, number>(); | ||
| private readonly _maxRetries = 5; | ||
| private debounceTimer: ReturnType<typeof setTimeout> | null = null; | ||
| private flushPromise: Promise<void> | null = null; | ||
| private readonly debounceMs: number; | ||
|
|
@@ -291,10 +294,22 @@ export class AccessTracker { | |
| this.clearTimer(); | ||
| if (this.pending.size > 0) { | ||
| this.logger.warn( | ||
| `access-tracker: destroying with ${this.pending.size} pending writes`, | ||
| `access-tracker: destroying with ${this.pending.size} pending writes — attempting final flush (3s timeout)`, | ||
| ); | ||
| // Fire-and-forget final flush with a hard 3s timeout. Uses Promise.race | ||
| // to guarantee we always clear pending/_retryCount even if flush hangs. | ||
| const flushWithTimeout = Promise.race([ | ||
| this.doFlush(), | ||
| new Promise<void>((resolve) => setTimeout(resolve, 3_000)), | ||
| ]); | ||
| void flushWithTimeout.finally(() => { | ||
| this.pending.clear(); | ||
| this._retryCount.clear(); | ||
| }); | ||
| } else { | ||
| this.pending.clear(); | ||
| this._retryCount.clear(); | ||
| } | ||
| this.pending.clear(); | ||
| } | ||
|
|
||
| // -------------------------------------------------------------------------- | ||
|
|
@@ -308,18 +323,33 @@ export class AccessTracker { | |
| for (const [id, delta] of batch) { | ||
| try { | ||
| const current = await this.store.getById(id); | ||
| if (!current) continue; | ||
| if (!current) { | ||
| // ID not found — memory was deleted or outside current scope. | ||
| // Do NOT retry or warn; just drop silently and clear any retry counter. | ||
| this._retryCount.delete(id); | ||
| continue; | ||
| } | ||
|
|
||
| const updatedMeta = buildUpdatedMetadata(current.metadata, delta); | ||
| await this.store.update(id, { metadata: updatedMeta }); | ||
| this._retryCount.delete(id); // success — clear retry counter | ||
| } catch (err) { | ||
| // Requeue failed delta for retry on next flush | ||
| const existing = this.pending.get(id) ?? 0; | ||
| this.pending.set(id, existing + delta); | ||
| this.logger.warn( | ||
| `access-tracker: write-back failed for ${id.slice(0, 8)}:`, | ||
| err, | ||
| ); | ||
| const retryCount = (this._retryCount.get(id) ?? 0) + 1; | ||
| if (retryCount > this._maxRetries) { | ||
| // Exceeded max retries — drop and log error. | ||
| this._retryCount.delete(id); | ||
| this.logger.error( | ||
| `access-tracker: dropping ${id.slice(0, 8)} after ${retryCount} failed retries`, | ||
| ); | ||
| } else { | ||
| this._retryCount.set(id, retryCount); | ||
| // Requeue with the original delta only (NOT accumulated) for next flush. | ||
| this.pending.set(id, delta); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
When Useful? React with 👍 / 👎. |
||
| this.logger.warn( | ||
| `access-tracker: write-back failed for ${id.slice(0, 8)} (attempt ${retryCount}/${this._maxRetries}):`, | ||
| err, | ||
| ); | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,40 @@ | ||
| /** | ||
| * Smoke test for: skip before_prompt_build hooks for subagent sessions | ||
| * Bug: sub-agent sessions cause gateway blocking — hooks without subagent skip | ||
| * run LanceDB I/O sequentially, blocking all other user sessions. | ||
| * | ||
| * Run: node test/issue598_smoke.mjs | ||
| * Expected: all 3 hooks PASS | ||
| */ | ||
|
|
||
| import { readFileSync } from "fs"; | ||
|
|
||
| const FILE = "C:\\Users\\admin\\.openclaw\\extensions\\memory-lancedb-pro\\index.ts"; | ||
| const content = readFileSync(FILE, "utf-8"); | ||
| const lines = content.split("\n"); | ||
|
|
||
| // [hook_opens_line, guard_line, name] | ||
| const checks = [ | ||
| [2223, 2226, "auto-recall before_prompt_build"], | ||
| [3084, 3087, "reflection-injector inheritance"], | ||
| [3113, 3116, "reflection-injector derived"], | ||
| ]; | ||
|
|
||
| let pass = 0, fail = 0; | ||
| for (const [hookLine, guardLine, name] of checks) { | ||
| const hookContent = (lines[hookLine - 1] || "").trim(); | ||
| const guardContent = (lines[guardLine - 1] || "").trim(); | ||
| if (hookContent.includes("before_prompt_build") && guardContent.includes(":subagent:")) { | ||
| console.log(`PASS ${name.padEnd(40)} hook@${hookLine} guard@${guardLine}`); | ||
| pass++; | ||
| } else { | ||
| console.log(`FAIL ${name}`); | ||
| console.log(` hook@${hookLine}: ${hookContent}`); | ||
| console.log(` guard@${guardLine}: ${guardContent}`); | ||
| fail++; | ||
| } | ||
| } | ||
|
|
||
| console.log(`\n${pass}/${pass + fail} checks passed`); | ||
| if (fail > 0) process.exit(1); | ||
| else console.log("ALL PASSED — subagent sessions skipped before async work"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
AccessTrackerOptionslogger contract only requireswarn(with optionalinfo), but this new retry-drop branch callsthis.logger.error(...)unconditionally. Any caller that provides the documented minimal logger will throw at runtime once retries exceed_maxRetries, converting a handled write-back failure into an unexpected flush failure.Useful? React with 👍 / 👎.