Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,14 @@ function App() {
* deferred until Framer Motion finishes the exit transition.
*/
const requestHideOverlay = useCallback(() => {
// Cancel any in-flight work: active Ollama streaming, image processing,
// or screen capture. This ensures hiding the overlay (via double-tap
// Control, Escape, Cmd+W, or the X button) behaves like pressing Stop.
cancel();
pendingSubmitRef.current = null;
setIsSubmitPending(false);
setPendingUserMessage(null);

windowAnchorRef.current = null;
isPreExpandedRef.current = false;
/* v8 ignore start -- DOM ref null guard: always set when overlay is visible */
Expand All @@ -407,7 +415,7 @@ function App() {
}
return 'hiding';
});
}, []);
}, [cancel]);

/** Ref attached to the chat-mode history dropdown for click-outside detection. */
const historyDropdownRef = useRef<HTMLDivElement>(null);
Expand Down
112 changes: 112 additions & 0 deletions src/hooks/__tests__/useOllama.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -573,6 +573,118 @@ describe('useOllama', () => {
});
});

// ─── Stale channel after reset ───────────────────────────────────────────────

describe('stale channel after reset', () => {
it('ignores Token chunks arriving after reset()', async () => {
const { result } = renderHook(() => useOllama());

await act(async () => {
await result.current.ask('hello');
});

const channel = getChannel();

act(() => {
channel!.simulateMessage({ type: 'Token', data: 'Partial' });
});
expect(result.current.streamingContent).toBe('Partial');

// Reset clears state and bumps the epoch
act(() => {
result.current.reset();
});
expect(result.current.messages).toEqual([]);
expect(result.current.streamingContent).toBe('');

// Old channel sends more chunks after the reset
act(() => {
channel!.simulateMessage({ type: 'Token', data: ' stale token' });
});

// Should remain empty; the stale token must be discarded
expect(result.current.streamingContent).toBe('');
expect(result.current.messages).toEqual([]);
});

it('ignores Cancelled chunk arriving after reset()', async () => {
const { result } = renderHook(() => useOllama());

await act(async () => {
await result.current.ask('hello');
});

const channel = getChannel();

act(() => {
channel!.simulateMessage({ type: 'Token', data: 'Partial' });
});

act(() => {
result.current.reset();
});

// Old channel delivers Cancelled after reset
act(() => {
channel!.simulateMessage({ type: 'Cancelled' });
});

// The partial content must NOT reappear as a finalized message
expect(result.current.messages).toEqual([]);
expect(result.current.isGenerating).toBe(false);
});

it('ignores Done chunk arriving after reset()', async () => {
const onTurnComplete = vi.fn();
const { result } = renderHook(() => useOllama(onTurnComplete));

await act(async () => {
await result.current.ask('hello');
});

const channel = getChannel();

act(() => {
channel!.simulateMessage({ type: 'Token', data: 'Full answer' });
});

act(() => {
result.current.reset();
});

// Old channel delivers Done after reset
act(() => {
channel!.simulateMessage({ type: 'Done' });
});

expect(result.current.messages).toEqual([]);
expect(onTurnComplete).not.toHaveBeenCalled();
});

it('ignores Error chunk arriving after reset()', async () => {
const { result } = renderHook(() => useOllama());

await act(async () => {
await result.current.ask('hello');
});

const channel = getChannel();

act(() => {
result.current.reset();
});

act(() => {
channel!.simulateMessage({
type: 'Error',
data: { kind: 'Other', message: 'Something went wrong\nHTTP 500' },
});
});

expect(result.current.messages).toEqual([]);
});
});

// ─── onTurnComplete callback ─────────────────────────────────────────────────

describe('onTurnComplete callback', () => {
Expand Down
14 changes: 13 additions & 1 deletion src/hooks/useOllama.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { useState, useCallback } from 'react';
import { useState, useCallback, useRef } from 'react';
import { invoke, Channel } from '@tauri-apps/api/core';

/** Mirrors the Rust OllamaErrorKind enum sent over IPC. */
Expand Down Expand Up @@ -46,6 +46,10 @@ export function useOllama(
const [streamingContent, setStreamingContent] = useState('');
const [isGenerating, setIsGenerating] = useState(false);

// Epoch counter: bumped on every reset so that stale channel callbacks from
// a previous generation can detect they are outdated and bail out.
const epochRef = useRef(0);

/**
* Submits a message to the Ollama backend and initiates the streaming response.
* The backend manages conversation history — only the new user message is sent.
Expand Down Expand Up @@ -82,12 +86,19 @@ export function useOllama(
setStreamingContent('');
setIsGenerating(true);

// Snapshot the epoch so this channel's callbacks can detect a reset.
const epochAtStart = epochRef.current;

const channel = new Channel<StreamChunk>();
// Use block-scoped variable to accumulate the stream and occasionally flush to React state,
// mitigating rendering lag from hundreds of fast chunk events.
let currentContent = '';

channel.onmessage = (chunk) => {
// A reset occurred since this generation started; discard all
// remaining chunks so stale content never re-populates the UI.
if (epochRef.current !== epochAtStart) return;

if (chunk.type === 'Token') {
currentContent += chunk.data;
setStreamingContent(currentContent);
Expand Down Expand Up @@ -165,6 +176,7 @@ export function useOllama(

/** Resets all conversation state to prepare for a fresh session. */
const reset = useCallback(() => {
epochRef.current += 1;
setMessages([]);
setStreamingContent('');
setIsGenerating(false);
Expand Down