= ({
}
return (
-
- {
- rangeRef.current = newRange
- }}
- data={flattenedItems}
- itemContent={(index) => renderRow(index)}
- style={{ height: "100%" }}
- />
-
+ <>
+
+ {
+ rangeRef.current = newRange
+ }}
+ data={flattenedItems}
+ itemContent={(index) => renderRow(index)}
+ style={{ height: "100%" }}
+ />
+
+ >
)
}
diff --git a/src/services/search.ts b/src/services/search.ts
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/store/Query/actions.ts b/src/store/Query/actions.ts
index 158b7d13b..dcb6ce64a 100644
--- a/src/store/Query/actions.ts
+++ b/src/store/Query/actions.ts
@@ -126,6 +126,13 @@ const setQueriesToRun = (payload: QueriesToRun): QueryAction => ({
payload,
})
+const setAISuggestionRequest = (
+ payload: { query: string; startOffset: number } | null,
+): QueryAction => ({
+ type: QueryAT.SET_AI_SUGGESTION_REQUEST,
+ payload,
+})
+
export default {
addNotification,
cleanupNotifications,
@@ -139,4 +146,5 @@ export default {
setColumns,
setActiveNotification,
setQueriesToRun,
+ setAISuggestionRequest,
}
diff --git a/src/store/Query/reducers.ts b/src/store/Query/reducers.ts
index 4019cfea7..e59f368f1 100644
--- a/src/store/Query/reducers.ts
+++ b/src/store/Query/reducers.ts
@@ -33,6 +33,7 @@ export const initialState: QueryStateShape = {
queryNotifications: {},
activeNotification: null,
queriesToRun: [],
+ aiSuggestionRequest: null,
}
const query = (state = initialState, action: QueryAction): QueryStateShape => {
@@ -265,6 +266,14 @@ const query = (state = initialState, action: QueryAction): QueryStateShape => {
queriesToRun: action.payload,
}
}
+
+ case QueryAT.SET_AI_SUGGESTION_REQUEST: {
+ return {
+ ...state,
+ aiSuggestionRequest: action.payload,
+ }
+ }
+
default:
return state
}
diff --git a/src/store/Query/selectors.ts b/src/store/Query/selectors.ts
index 5152616c8..2acfb55df 100644
--- a/src/store/Query/selectors.ts
+++ b/src/store/Query/selectors.ts
@@ -28,6 +28,7 @@ import {
StoreShape,
QueryNotifications,
QueriesToRun,
+ AISuggestionRequest,
} from "types"
import type {
QueryRawResult,
@@ -68,6 +69,10 @@ const getColumns: (
store: StoreShape,
) => Record = (store) => store.query.columns
+const getAISuggestionRequest: (
+ store: StoreShape,
+) => AISuggestionRequest | null = (store) => store.query.aiSuggestionRequest
+
export default {
getNotifications,
getQueryNotifications,
@@ -78,4 +83,5 @@ export default {
getRunning,
getTables,
getColumns,
+ getAISuggestionRequest,
}
diff --git a/src/store/Query/types.ts b/src/store/Query/types.ts
index b12c82098..718c7135d 100644
--- a/src/store/Query/types.ts
+++ b/src/store/Query/types.ts
@@ -1,27 +1,3 @@
-/*******************************************************************************
- * ___ _ ____ ____
- * / _ \ _ _ ___ ___| |_| _ \| __ )
- * | | | | | | |/ _ \/ __| __| | | | _ \
- * | |_| | |_| | __/\__ \ |_| |_| | |_) |
- * \__\_\\__,_|\___||___/\__|____/|____/
- *
- * Copyright (c) 2014-2019 Appsicle
- * Copyright (c) 2019-2022 QuestDB
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- ******************************************************************************/
-
import type { ReactNode } from "react"
import type {
@@ -46,6 +22,7 @@ export enum RunningType {
EXPLAIN = "explain",
REFRESH = "refresh",
QUERY = "query",
+ AI_SUGGESTION = "ai_suggestion",
NONE = "none",
}
@@ -70,6 +47,11 @@ export type QueryNotifications = Readonly<{
explain?: NotificationShape
}>
+export type AISuggestionRequest = Readonly<{
+ query: string
+ startOffset: number
+}>
+
export type QueryStateShape = Readonly<{
notifications: NotificationShape[]
tables: Table[]
@@ -79,6 +61,7 @@ export type QueryStateShape = Readonly<{
queryNotifications: Record>
activeNotification: NotificationShape | null
queriesToRun: QueriesToRun
+ aiSuggestionRequest: AISuggestionRequest | null
}>
export enum QueryAT {
@@ -94,6 +77,7 @@ export enum QueryAT {
SET_COLUMNS = "QUERY/SET_COLUMNS",
SET_ACTIVE_NOTIFICATION = "QUERY/SET_ACTIVE_NOTIFICATION",
SET_QUERIES_TO_RUN = "QUERY/SET_QUERIES_TO_RUN",
+ SET_AI_SUGGESTION_REQUEST = "QUERY/SET_AI_SUGGESTION_REQUEST",
}
type AddNotificationAction = Readonly<{
@@ -165,6 +149,11 @@ type SetQueriesToRunAction = Readonly<{
payload: QueriesToRun
}>
+type SetAISuggestionRequestAction = Readonly<{
+ type: QueryAT.SET_AI_SUGGESTION_REQUEST
+ payload: AISuggestionRequest | null
+}>
+
export type QueryAction =
| AddNotificationAction
| CleanupNotificationsAction
@@ -178,3 +167,4 @@ export type QueryAction =
| SetColumnsActions
| SetActiveNotificationAction
| SetQueriesToRunAction
+ | SetAISuggestionRequestAction
diff --git a/src/store/buffers.ts b/src/store/buffers.ts
index 518844663..a1657ab81 100644
--- a/src/store/buffers.ts
+++ b/src/store/buffers.ts
@@ -64,6 +64,13 @@ export type Buffer = {
editorViewState?: editor.ICodeEditorViewState
metricsViewState?: MetricsViewState
isTemporary?: boolean
+ isDiffBuffer?: boolean
+ diffContent?: {
+ original: string
+ modified: string
+ queryStartOffset: number
+ conversationId?: string
+ }
}
const defaultEditorViewState: editor.ICodeEditorViewState = {
@@ -109,6 +116,8 @@ export const makeBuffer = ({
archived,
archivedAt,
isTemporary,
+ isDiffBuffer,
+ diffContent,
}: {
label: string
value?: string
@@ -118,6 +127,13 @@ export const makeBuffer = ({
archived?: boolean
archivedAt?: number
isTemporary?: boolean
+ isDiffBuffer?: boolean
+ diffContent?: {
+ original: string
+ modified: string
+ queryStartOffset: number
+ conversationId?: string
+ }
}): Omit => ({
label,
value: value ?? "",
@@ -127,6 +143,8 @@ export const makeBuffer = ({
archived,
archivedAt,
isTemporary,
+ isDiffBuffer,
+ diffContent,
})
export const makeFallbackBuffer = (bufferType: BufferType): Buffer => {
diff --git a/src/theme/index.ts b/src/theme/index.ts
index 618212065..1cd67466c 100644
--- a/src/theme/index.ts
+++ b/src/theme/index.ts
@@ -30,18 +30,22 @@ const color: ColorShape = {
black70: "rgba(25, 26, 33, 0.7)",
black40: "rgba(25, 26, 33, 0.4)",
black20: "rgba(25, 26, 33, 0.2)",
+ overlayBackground: "rgba(44, 46, 61, 0.48)",
gray1: "#585858",
gray2: "#bbbbbb",
backgroundDarker: "#21222c",
+ chatBackground: "#1D1E25",
backgroundLighter: "#282a36",
- background: "#21222c",
+ background: "#2d303e",
foreground: "#f8f8f2",
selection: "#44475a",
selectionDarker: "#333544",
+ midnight: "#141725",
comment: "#6272a4",
red: "#ff5555",
redDark: "#5a1d1d",
loginBackground: "#1D070E",
+ orangeDark: "#ff7f2a",
orange: "#ffb86c",
yellow: "#f1fa8c",
green: "#50fa7b",
@@ -85,4 +89,9 @@ export const theme: DefaultThemeShape = {
borderRadius: "0.8rem",
}
+export const pinkLinearGradientHorizontal =
+ "linear-gradient(90deg, #D14671 0%, #892C6C 100%)"
+export const pinkLinearGradientVertical =
+ "linear-gradient(180deg, #D14671 0%, #892C6C 100%)"
+
export type ThemeShape = typeof theme
diff --git a/src/types/styled.d.ts b/src/types/styled.d.ts
index 4c3837750..81b27c841 100644
--- a/src/types/styled.d.ts
+++ b/src/types/styled.d.ts
@@ -27,20 +27,24 @@ import "styled-components"
export type ColorShape = {
black: string
black70: string
+ overlayBackground: string
black40: string
black20: string
gray1: string
gray2: string
backgroundLighter: string
+ chatBackground: string
backgroundDarker: string
background: string
foreground: string
selection: string
selectionDarker: string
comment: string
+ midnight: string
red: string
redDark: string
loginBackground: string
+ orangeDark: string
orange: string
yellow: string
green: string
diff --git a/src/utils/aiAssistant.ts b/src/utils/aiAssistant.ts
new file mode 100644
index 000000000..7d7a1badd
--- /dev/null
+++ b/src/utils/aiAssistant.ts
@@ -0,0 +1,1703 @@
+import Anthropic from "@anthropic-ai/sdk"
+import OpenAI from "openai"
+import { Client } from "./questdb/client"
+import { Type } from "./questdb/types"
+import { getModelProps, MODEL_OPTIONS } from "./aiAssistantSettings"
+import type { ModelOption, Provider } from "./aiAssistantSettings"
+import { formatSql } from "./formatSql"
+import { AIOperationStatus, StatusArgs } from "../providers/AIStatusProvider"
+import {
+ getQuestDBTableOfContents,
+ getSpecificDocumentation,
+ parseDocItems,
+ DocCategory,
+} from "./questdbDocsRetrieval"
+import { MessageParam } from "@anthropic-ai/sdk/resources/messages"
+import type {
+ ResponseOutputItem,
+ ResponseTextConfig,
+} from "openai/resources/responses/responses"
+import type { Tool as AnthropicTool } from "@anthropic-ai/sdk/resources/messages"
+import type { ConversationId } from "../providers/AIConversationProvider/types"
+
+export type ActiveProviderSettings = {
+ model: string
+ provider: Provider
+ apiKey: string
+}
+
+export interface AiAssistantAPIError {
+ type: "rate_limit" | "invalid_key" | "network" | "unknown" | "aborted"
+ message: string
+ details?: string
+}
+
+export interface AiAssistantExplanation {
+ explanation: string
+ tokenUsage?: TokenUsage
+}
+
+export type AiAssistantValidateQueryResult =
+ | { valid: true }
+ | { valid: false; error: string; position: number }
+
+export interface TableSchemaExplanation {
+ explanation: string
+ columns: Array<{
+ name: string
+ description: string
+ data_type: string
+ }>
+ storage_details: string[]
+ tokenUsage?: TokenUsage
+}
+
+export const schemaExplanationToMarkdown = (
+ explanation: TableSchemaExplanation,
+): string => {
+ let md = ""
+
+ md += `${explanation.explanation}\n\n`
+
+ if (explanation.columns.length > 0) {
+ md += `## Columns\n\n`
+ md += `| Column | Type | Description |\n`
+ md += `|--------|------|-------------|\n`
+ for (const col of explanation.columns) {
+ md += `| ${col.name} | \`${col.data_type}\` | ${col.description} |\n`
+ }
+ md += `\n`
+ }
+
+ if (explanation.storage_details.length > 0) {
+ md += `## Storage Details\n\n`
+ for (const detail of explanation.storage_details) {
+ md += `- ${detail}\n`
+ }
+ }
+
+ return md
+}
+
+export interface TokenUsage {
+ inputTokens: number
+ outputTokens: number
+}
+
+export interface GeneratedSQL {
+ sql: string | null
+ explanation?: string
+ tokenUsage?: TokenUsage
+}
+
+export interface ModelToolsClient {
+ validateQuery: (query: string) => Promise
+ getTables?: () => Promise>
+ getTableSchema?: (tableName: string) => Promise
+}
+
+type StatusCallback = (
+ status: AIOperationStatus | null,
+ args?: StatusArgs,
+) => void
+
+type ProviderClients =
+ | {
+ provider: "anthropic"
+ anthropic: Anthropic
+ }
+ | {
+ provider: "openai"
+ openai: OpenAI
+ }
+
+const ExplainFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "explain_format",
+ schema: {
+ type: "object",
+ properties: {
+ explanation: { type: "string" },
+ },
+ required: ["explanation"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const FixSQLFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "fix_sql_format",
+ schema: {
+ type: "object",
+ properties: {
+ sql: { type: ["string", "null"] },
+ explanation: { type: "string" },
+ },
+ required: ["explanation", "sql"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const ExplainTableSchemaFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "explain_table_schema_format",
+ schema: {
+ type: "object",
+ properties: {
+ explanation: { type: "string" },
+ columns: {
+ type: "array",
+ items: {
+ type: "object",
+ properties: {
+ name: { type: "string" },
+ description: { type: "string" },
+ data_type: { type: "string" },
+ },
+ required: ["name", "description", "data_type"],
+ additionalProperties: false,
+ },
+ },
+ storage_details: {
+ type: "array",
+ items: { type: "string" },
+ },
+ },
+ required: ["explanation", "columns", "storage_details"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const ConversationResponseFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "conversation_response_format",
+ schema: {
+ type: "object",
+ properties: {
+ sql: { type: ["string", "null"] },
+ explanation: { type: "string" },
+ },
+ required: ["explanation", "sql"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+const inferProviderFromModel = (model: string): Provider => {
+ const found: ModelOption | undefined = MODEL_OPTIONS.find(
+ (m) => m.value === model,
+ )
+ if (found) return found.provider
+ return model.startsWith("claude") ? "anthropic" : "openai"
+}
+
+const createProviderClients = (
+ settings: ActiveProviderSettings,
+): ProviderClients => {
+ if (!settings.apiKey) {
+ throw new Error(`No API key found for ${settings.provider}`)
+ }
+
+ if (settings.provider === "openai") {
+ return {
+ provider: settings.provider,
+ openai: new OpenAI({
+ apiKey: settings.apiKey,
+ dangerouslyAllowBrowser: true,
+ }),
+ }
+ }
+ return {
+ provider: settings.provider,
+ anthropic: new Anthropic({
+ apiKey: settings.apiKey,
+ dangerouslyAllowBrowser: true,
+ }),
+ }
+}
+
+const SCHEMA_TOOLS: Array = [
+ {
+ name: "get_tables",
+ description:
+ "Get a list of all tables and materialized views in the QuestDB database",
+ input_schema: {
+ type: "object" as const,
+ properties: {},
+ },
+ },
+ {
+ name: "get_table_schema",
+ description:
+ "Get the full schema definition (DDL) for a specific table or materialized view",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ table_name: {
+ type: "string" as const,
+ description:
+ "The name of the table or materialized view to get schema for",
+ },
+ },
+ required: ["table_name"],
+ },
+ },
+]
+
+const REFERENCE_TOOLS = [
+ {
+ name: "validate_query",
+ description:
+ "Validate the syntax correctness of a SQL query using QuestDB's SQL syntax validator. All generated SQL queries should be validated using this tool before responding to the user.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ query: {
+ type: "string" as const,
+ description: "The SQL query to validate",
+ },
+ },
+ required: ["query"],
+ },
+ },
+ {
+ name: "get_questdb_toc",
+ description:
+ "Get a table of contents listing all available QuestDB functions, operators, and SQL keywords. Use this first to see what documentation is available before requesting specific items.",
+ input_schema: {
+ type: "object" as const,
+ properties: {},
+ },
+ },
+ {
+ name: "get_questdb_documentation",
+ description:
+ "Get documentation for specific QuestDB functions, operators, or SQL keywords. This is much more efficient than loading all documentation.",
+ input_schema: {
+ type: "object" as const,
+ properties: {
+ category: {
+ type: "string" as const,
+ enum: ["functions", "operators", "sql", "concepts", "schema"],
+ description: "The category of documentation to retrieve",
+ },
+ items: {
+ type: "array" as const,
+ items: {
+ type: "string" as const,
+ },
+ description:
+ "List of specific docs items in the category. IMPORTANT: Category of these items must match the category parameter. Name of these items should exactly match the entry in the table of contents you get with get_questdb_toc.",
+ },
+ },
+ required: ["category", "items"],
+ },
+ },
+]
+
+const ALL_TOOLS = [...SCHEMA_TOOLS, ...REFERENCE_TOOLS]
+
+const toOpenAIFunctions = (
+ tools: Array<{
+ name: string
+ description?: string
+ input_schema: AnthropicTool["input_schema"]
+ }>,
+) => {
+ return tools.map((t) => ({
+ type: "function" as const,
+ name: t.name,
+ description: t.description,
+ parameters: { ...t.input_schema, additionalProperties: false },
+ strict: true,
+ })) as OpenAI.Responses.Tool[]
+}
+
+export const normalizeSql = (sql: string, insertSemicolon: boolean = true) => {
+ if (!sql) return ""
+ let result = sql.trim()
+ if (result.endsWith(";")) {
+ result = result.slice(0, -1)
+ }
+ return formatSql(result) + (insertSemicolon ? ";" : "")
+}
+
+export function isAiAssistantError(
+ response:
+ | AiAssistantAPIError
+ | AiAssistantExplanation
+ | GeneratedSQL
+ | Partial,
+): response is AiAssistantAPIError {
+ if ("type" in response && "message" in response) {
+ return true
+ }
+ return false
+}
+
+export function createModelToolsClient(
+ questClient: Client,
+ tables?: Array<{ table_name: string; matView: boolean }>,
+): ModelToolsClient {
+ return {
+ async validateQuery(
+ query: string,
+ ): Promise {
+ try {
+ const response = await questClient.validateQuery(query)
+ if ("error" in response) {
+ const errorResponse = response as {
+ error: string
+ position: number
+ query: string
+ }
+ return {
+ valid: false,
+ error: String(errorResponse.error),
+ position: Number(errorResponse.position),
+ }
+ }
+ return {
+ valid: true,
+ }
+ } catch (err) {
+ const errorMessage =
+ err instanceof Error
+ ? err.message
+ : "Failed to validate query. Something went wrong with the server."
+ return {
+ valid: false,
+ error: errorMessage,
+ position: -1,
+ }
+ }
+ },
+ ...(tables
+ ? {
+ getTables(): Promise<
+ Array<{ name: string; type: "table" | "matview" }>
+ > {
+ return Promise.resolve(
+ tables.map((table) => ({
+ name: table.table_name,
+ type: table.matView ? "matview" : ("table" as const),
+ })),
+ )
+ },
+
+ async getTableSchema(tableName: string): Promise {
+ try {
+ const table = tables.find((t) => t.table_name === tableName)
+ if (!table) {
+ return null
+ }
+
+ const ddlResponse = table.matView
+ ? await questClient.showMatViewDDL(tableName)
+ : await questClient.showTableDDL(tableName)
+
+ if (
+ ddlResponse?.type === Type.DQL &&
+ ddlResponse.data?.[0]?.ddl
+ ) {
+ return ddlResponse.data[0].ddl
+ }
+
+ return null
+ } catch (error) {
+ console.error(
+ `Failed to fetch schema for table ${tableName}:`,
+ error,
+ )
+ return null
+ }
+ },
+ }
+ : {}),
+ }
+}
+
+const DOCS_INSTRUCTION_ANTHROPIC = `
+CRITICAL: Always follow this two-phase documentation approach:
+1. Use get_questdb_toc to see available functions/keywords/operators
+2. Use get_questdb_documentation to get details for specific items you'll use`
+
+const getUnifiedPrompt = (grantSchemaAccess?: boolean) => {
+ const base = `You are a SQL expert assistant specializing in QuestDB, a high-performance time-series database. You help users with:
+- Generating QuestDB SQL queries from natural language descriptions
+- Explaining what QuestDB SQL queries do
+- Fixing errors in QuestDB SQL queries
+- Refining and modifying existing queries based on user requests
+
+## When Explaining Queries
+- Focus on the business logic and what the query achieves, not the SQL syntax itself
+- Pay special attention to QuestDB-specific features:
+ - Time-series operations (SAMPLE BY, LATEST ON, designated timestamp columns)
+ - Time-based filtering and aggregations
+ - Real-time data ingestion patterns
+ - Performance optimizations specific to time-series data
+
+## When Generating SQL
+- Always validate the query using the validate_query tool before returning the generated SQL query
+- Generate only valid QuestDB SQL syntax referring to the documentation about functions, operators, and SQL keywords
+- Use appropriate time-series functions (SAMPLE BY, LATEST ON, etc.) and common table expressions when relevant
+- Use \`IN\` with \`today()\`, \`tomorrow()\`, \`yesterday()\` interval functions when relevant
+- Follow QuestDB best practices for performance referring to the documentation
+- Use proper timestamp handling for time-series data
+- Use correct data types and functions specific to QuestDB referring to the documentation. Do not use any word that is not in the documentation.
+
+## When Fixing Queries
+- Always validate the query using the validate_query tool before returning the fixed SQL query
+- Analyze the error message carefully to understand what went wrong
+- Generate only valid QuestDB SQL syntax by always referring to the documentation about functions, operators, and SQL keywords
+- Preserve the original intent of the query while fixing the error
+- Follow QuestDB best practices and syntax rules referring to the documentation
+- Consider common issues like:
+ - Missing or incorrect column names
+ - Invalid syntax for time-series operations
+ - Data type mismatches
+ - Incorrect function usage
+
+## Response Guidelines
+- Modify a query by returning "sql" field only if the user asks you to generate, fix, or make changes to the query. If the user does not ask for fixing/changing/generating a query, return null in the "sql" field. Every time you provide a SQL query, the current SQL is updated.
+- Always provide the "explanation" field, which should be a 2-4 sentence explanation in markdown format.
+
+## Tools
+
+`
+ const schemaAccess = grantSchemaAccess
+ ? `You have access to schema tools:
+- Use the get_tables tool to retrieve all tables and materialized views in the database instance
+- Use the get_table_schema tool to get detailed schema information for a specific table or a materialized view
+`
+ : ""
+ return base + schemaAccess + DOCS_INSTRUCTION_ANTHROPIC
+}
+
+export const getExplainSchemaPrompt = (
+ tableName: string,
+ schema: string,
+ isMatView: boolean,
+) => `You are a SQL expert assistant specializing in QuestDB, a high-performance time-series database.
+Briefly explain the following ${isMatView ? "materialized view" : "table"} schema in detail. Include:
+- The purpose of the ${isMatView ? "materialized view" : "table"}
+- What each column represents and its data type
+- Any important properties like WAL enablement, partitioning strategy, designated timestamps
+- Any performance or storage considerations
+
+${isMatView ? "Materialized View" : "Table"} Name: ${tableName}
+
+Schema:
+\`\`\`sql
+${schema}
+\`\`\`
+
+Provide a short explanation that helps developers understand how to use this ${isMatView ? "materialized view" : "table"}.
+
+Return a JSON string with the following structure:
+{ "explanation": "The purpose of the table/materialized view", "columns": [ { "name": "Column Name", "description": "Column Description", "data_type": "Data Type" } ], "storage_details": ["Storage detail 1", "Storage detail 2"] }`
+
+const MAX_RETRIES = 2
+const RETRY_DELAY = 1000
+
+let lastRequestTime = 0
+const MIN_REQUEST_INTERVAL = 2000
+
+const handleRateLimit = async () => {
+ const now = Date.now()
+ const timeSinceLastRequest = now - lastRequestTime
+ if (timeSinceLastRequest < MIN_REQUEST_INTERVAL) {
+ await new Promise((resolve) =>
+ setTimeout(resolve, MIN_REQUEST_INTERVAL - timeSinceLastRequest),
+ )
+ }
+ lastRequestTime = Date.now()
+}
+
+const isNonRetryableError = (error: unknown) => {
+ return (
+ error instanceof RefusalError ||
+ error instanceof MaxTokensError ||
+ error instanceof Anthropic.AuthenticationError ||
+ (typeof OpenAI !== "undefined" &&
+ error instanceof OpenAI.AuthenticationError) ||
+ // @ts-expect-error no proper rate limit error type
+ ("status" in error && error.status === 429)
+ )
+}
+
+const executeTool = async (
+ toolName: string,
+ input: unknown,
+ modelToolsClient: ModelToolsClient,
+ setStatus: StatusCallback,
+): Promise<{ content: string; is_error?: boolean }> => {
+ try {
+ switch (toolName) {
+ case "get_tables": {
+ setStatus(AIOperationStatus.RetrievingTables)
+ if (!modelToolsClient.getTables) {
+ return {
+ content:
+ "Error: Schema access is not granted. This tool is not available.",
+ is_error: true,
+ }
+ }
+ const result = await modelToolsClient.getTables()
+ return { content: JSON.stringify(result, null, 2) }
+ }
+ case "get_table_schema": {
+ const tableName = (input as { table_name: string })?.table_name
+ if (!modelToolsClient.getTableSchema) {
+ return {
+ content:
+ "Error: Schema access is not granted. This tool is not available.",
+ is_error: true,
+ }
+ }
+ if (!tableName) {
+ return {
+ content: "Error: table_name parameter is required",
+ is_error: true,
+ }
+ }
+ setStatus(AIOperationStatus.InvestigatingTableSchema, {
+ name: tableName,
+ })
+ const result = await modelToolsClient.getTableSchema(tableName)
+ return {
+ content:
+ result || `Table '${tableName}' not found or schema unavailable`,
+ }
+ }
+ case "validate_query": {
+ setStatus(AIOperationStatus.ValidatingQuery)
+ const query = (input as { query: string })?.query
+ if (!query) {
+ return {
+ content: "Error: query parameter is required",
+ is_error: true,
+ }
+ }
+ const result = await modelToolsClient.validateQuery(query)
+ const content = {
+ valid: result.valid,
+ error: result.valid ? undefined : result.error,
+ position: result.valid ? undefined : result.position,
+ }
+ return { content: JSON.stringify(content, null, 2) }
+ }
+ case "get_questdb_toc": {
+ setStatus(AIOperationStatus.RetrievingDocumentation)
+ const tocContent = await getQuestDBTableOfContents()
+ return { content: tocContent }
+ }
+ case "get_questdb_documentation": {
+ const { category, items } =
+ (input as { category: string; items: string[] }) || {}
+ if (!category || !items || !Array.isArray(items)) {
+ return {
+ content: "Error: category and items parameters are required",
+ is_error: true,
+ }
+ }
+ const parsedItems = parseDocItems(items)
+
+ if (parsedItems.length > 0) {
+ setStatus(AIOperationStatus.InvestigatingDocs, { items: parsedItems })
+ } else {
+ setStatus(AIOperationStatus.InvestigatingDocs)
+ }
+ const documentation = await getSpecificDocumentation(
+ category as DocCategory,
+ items,
+ )
+ return { content: documentation }
+ }
+ default:
+ return { content: `Unknown tool: ${toolName}`, is_error: true }
+ }
+ } catch (error) {
+ return {
+ content: `Tool execution error: ${error instanceof Error ? error.message : "Unknown error"}`,
+ is_error: true,
+ }
+ }
+}
+
+interface AnthropicToolCallResult {
+ message: Anthropic.Messages.Message
+ accumulatedTokens: TokenUsage
+}
+
+async function handleToolCalls(
+ message: Anthropic.Messages.Message,
+ anthropic: Anthropic,
+ modelToolsClient: ModelToolsClient,
+ conversationHistory: Array,
+ model: string,
+ setStatus: StatusCallback,
+ responseFormat: ResponseTextConfig,
+ abortSignal?: AbortSignal,
+ accumulatedTokens: TokenUsage = { inputTokens: 0, outputTokens: 0 },
+): Promise {
+ const toolUseBlocks = message.content.filter(
+ (block) => block.type === "tool_use",
+ )
+ const toolResults = []
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ for (const toolUse of toolUseBlocks) {
+ if ("name" in toolUse) {
+ const exec = await executeTool(
+ toolUse.name,
+ toolUse.input,
+ modelToolsClient,
+ setStatus,
+ )
+ toolResults.push({
+ type: "tool_result" as const,
+ tool_use_id: toolUse.id,
+ content: exec.content,
+ is_error: exec.is_error,
+ })
+ }
+ }
+
+ const updatedHistory = [
+ ...conversationHistory,
+ {
+ role: "assistant" as const,
+ content: message.content,
+ },
+ {
+ role: "user" as const,
+ content: toolResults,
+ },
+ ]
+
+ const followUpParams: Parameters[1] = {
+ model,
+ tools: modelToolsClient ? ALL_TOOLS : REFERENCE_TOOLS,
+ messages: updatedHistory,
+ temperature: 0.3,
+ }
+
+ const format = responseFormat.format as { type: string; schema?: object }
+ if (format.type === "json_schema" && format.schema) {
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ followUpParams.output_format = {
+ type: "json_schema",
+ schema: format.schema,
+ }
+ }
+
+ const followUpMessage = await createAnthropicMessage(
+ anthropic,
+ followUpParams,
+ )
+
+ // Accumulate tokens from this response
+ const newAccumulatedTokens: TokenUsage = {
+ inputTokens:
+ accumulatedTokens.inputTokens +
+ (followUpMessage.usage?.input_tokens || 0),
+ outputTokens:
+ accumulatedTokens.outputTokens +
+ (followUpMessage.usage?.output_tokens || 0),
+ }
+
+ if (followUpMessage.stop_reason === "tool_use") {
+ return handleToolCalls(
+ followUpMessage,
+ anthropic,
+ modelToolsClient,
+ updatedHistory,
+ model,
+ setStatus,
+ responseFormat,
+ abortSignal,
+ newAccumulatedTokens,
+ )
+ }
+
+ return {
+ message: followUpMessage,
+ accumulatedTokens: newAccumulatedTokens,
+ }
+}
+
+const extractOpenAIToolCalls = (
+ response: OpenAI.Responses.Response,
+): { id?: string; name: string; arguments: unknown; call_id: string }[] => {
+ const calls = []
+ for (const item of response.output) {
+ if (item?.type === "function_call") {
+ const args =
+ typeof item.arguments === "string"
+ ? safeJsonParse(item.arguments)
+ : item.arguments || {}
+ calls.push({
+ id: item.id,
+ name: item.name,
+ arguments: args,
+ call_id: item.call_id,
+ })
+ }
+ }
+ return calls
+}
+
+const getOpenAIText = (
+ response: OpenAI.Responses.Response,
+): { type: "refusal" | "text"; message: string } => {
+ const out = response.output || []
+ if (
+ out.find(
+ (item: ResponseOutputItem) =>
+ item.type === "message" &&
+ item.content.some((c) => c.type === "refusal"),
+ )
+ ) {
+ return {
+ type: "refusal",
+ message: "The model refused to generate a response for this request.",
+ }
+ }
+ return { type: "text", message: response.output_text }
+}
+
+const safeJsonParse = (text: string): T | object => {
+ try {
+ return JSON.parse(text) as T
+ } catch {
+ return {}
+ }
+}
+
+const tryWithRetries = async (
+ fn: () => Promise,
+ setStatus: StatusCallback,
+ abortSignal?: AbortSignal,
+): Promise => {
+ let retries = 0
+ while (retries <= MAX_RETRIES) {
+ try {
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ return await fn()
+ } catch (error) {
+ retries++
+ if (retries > MAX_RETRIES || isNonRetryableError(error)) {
+ setStatus(null)
+ return handleAiAssistantError(error)
+ }
+
+ await new Promise((resolve) => setTimeout(resolve, RETRY_DELAY * retries))
+ }
+ }
+
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: `Failed to get response after ${retries} retries`,
+ }
+}
+
+interface OpenAIFlowConfig {
+ systemInstructions: string
+ initialUserContent: string
+ conversationHistory?: Array<{ role: "user" | "assistant"; content: string }>
+ responseFormat: ResponseTextConfig
+ postProcess?: (formatted: T) => T
+}
+
+interface AnthropicFlowConfig {
+ systemInstructions: string
+ initialUserContent: string
+ conversationHistory?: Array<{ role: "user" | "assistant"; content: string }>
+ responseFormat: ResponseTextConfig
+ postProcess?: (formatted: T) => T
+}
+
+interface ExecuteAnthropicFlowParams {
+ anthropic: Anthropic
+ model: string
+ config: AnthropicFlowConfig
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+}
+
+interface ExecuteOpenAIFlowParams {
+ openai: OpenAI
+ model: string
+ config: OpenAIFlowConfig
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+}
+
+const executeOpenAIFlow = async ({
+ openai,
+ model,
+ config,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+}: ExecuteOpenAIFlowParams): Promise => {
+ let input: OpenAI.Responses.ResponseInput = []
+ if (config.conversationHistory && config.conversationHistory.length > 0) {
+ for (const msg of config.conversationHistory) {
+ input.push({
+ role: msg.role,
+ content: msg.content,
+ })
+ }
+ }
+
+ input.push({
+ role: "user",
+ content: config.initialUserContent,
+ })
+
+ const grantSchemaAccess = !!modelToolsClient.getTables
+ const openaiTools = toOpenAIFunctions(
+ grantSchemaAccess ? ALL_TOOLS : REFERENCE_TOOLS,
+ )
+
+ // Accumulate tokens across all iterations
+ let totalInputTokens = 0
+ let totalOutputTokens = 0
+
+ let lastResponse = await openai.responses.create({
+ ...getModelProps(model),
+ instructions: config.systemInstructions,
+ input,
+ tools: openaiTools,
+ text: config.responseFormat,
+ } as OpenAI.Responses.ResponseCreateParamsNonStreaming)
+ input = [...input, ...lastResponse.output]
+
+ // Add tokens from first response
+ totalInputTokens += lastResponse.usage?.input_tokens ?? 0
+ totalOutputTokens += lastResponse.usage?.output_tokens ?? 0
+
+ while (true) {
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const toolCalls = extractOpenAIToolCalls(lastResponse)
+ if (!toolCalls.length) break
+ const tool_outputs: OpenAI.Responses.ResponseFunctionToolCallOutputItem[] =
+ []
+ for (const tc of toolCalls) {
+ const exec = await executeTool(
+ tc.name,
+ tc.arguments,
+ modelToolsClient,
+ setStatus,
+ )
+ tool_outputs.push({
+ type: "function_call_output",
+ call_id: tc.call_id,
+ output: exec.content,
+ } as OpenAI.Responses.ResponseFunctionToolCallOutputItem)
+ }
+ input = [...input, ...tool_outputs]
+ lastResponse = await openai.responses.create({
+ ...getModelProps(model),
+ instructions: config.systemInstructions,
+ input,
+ tools: openaiTools,
+ text: config.responseFormat,
+ })
+ input = [...input, ...lastResponse.output]
+
+ // Accumulate tokens from each iteration
+ totalInputTokens += lastResponse.usage?.input_tokens ?? 0
+ totalOutputTokens += lastResponse.usage?.output_tokens ?? 0
+ }
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const text = getOpenAIText(lastResponse)
+ if (text.type === "refusal") {
+ return {
+ type: "unknown",
+ message: text.message,
+ } as AiAssistantAPIError
+ }
+
+ const rawOutput = text.message
+
+ try {
+ const json = JSON.parse(rawOutput) as T
+ setStatus(null)
+
+ const resultWithTokens = {
+ ...json,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+
+ if (config.postProcess) {
+ const processed = config.postProcess(json)
+ return {
+ ...processed,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+ }
+ return resultWithTokens
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+}
+
+const executeAnthropicFlow = async ({
+ anthropic,
+ model,
+ config,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+}: ExecuteAnthropicFlowParams): Promise => {
+ const initialMessages: MessageParam[] = []
+ if (config.conversationHistory && config.conversationHistory.length > 0) {
+ for (const msg of config.conversationHistory) {
+ initialMessages.push({
+ role: msg.role,
+ content: msg.content,
+ })
+ }
+ }
+
+ initialMessages.push({
+ role: "user" as const,
+ content: config.initialUserContent,
+ })
+
+ const grantSchemaAccess = !!modelToolsClient.getTables
+
+ const messageParams: Parameters[1] = {
+ model,
+ system: config.systemInstructions,
+ tools: grantSchemaAccess ? ALL_TOOLS : REFERENCE_TOOLS,
+ messages: initialMessages,
+ temperature: 0.3,
+ }
+
+ if (config.responseFormat?.format) {
+ const format = config.responseFormat.format as {
+ type: string
+ schema?: object
+ }
+ if (format.type === "json_schema" && format.schema) {
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: format.schema,
+ }
+ }
+ }
+
+ const message = await createAnthropicMessage(anthropic, messageParams)
+
+ let totalInputTokens = message.usage?.input_tokens || 0
+ let totalOutputTokens = message.usage?.output_tokens || 0
+
+ let responseMessage: Anthropic.Messages.Message
+
+ if (message.stop_reason === "tool_use") {
+ const toolCallResult = await handleToolCalls(
+ message,
+ anthropic,
+ modelToolsClient,
+ initialMessages,
+ model,
+ setStatus,
+ config.responseFormat,
+ abortSignal,
+ { inputTokens: 0, outputTokens: 0 }, // Start fresh, we already counted initial message
+ )
+
+ if ("type" in toolCallResult && "message" in toolCallResult) {
+ return toolCallResult
+ }
+
+ const result = toolCallResult
+ responseMessage = result.message
+ totalInputTokens += result.accumulatedTokens.inputTokens
+ totalOutputTokens += result.accumulatedTokens.outputTokens
+ } else {
+ responseMessage = message
+ }
+
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ } as AiAssistantAPIError
+ }
+
+ const textBlock = responseMessage.content.find(
+ (block) => block.type === "text",
+ )
+ if (!textBlock || !("text" in textBlock)) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "No text response received from assistant.",
+ } as AiAssistantAPIError
+ }
+
+ try {
+ const json = JSON.parse(textBlock.text) as T
+ setStatus(null)
+
+ const resultWithTokens = {
+ ...json,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+
+ if (config.postProcess) {
+ const processed = config.postProcess(json)
+ return {
+ ...processed,
+ tokenUsage: {
+ inputTokens: totalInputTokens,
+ outputTokens: totalOutputTokens,
+ },
+ } as T & { tokenUsage: TokenUsage }
+ }
+ return resultWithTokens
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+}
+
+export const explainTableSchema = async ({
+ tableName,
+ schema,
+ isMatView,
+ settings,
+ setStatus,
+ conversationId,
+}: {
+ tableName: string
+ schema: string
+ isMatView: boolean
+ settings: ActiveProviderSettings
+ setStatus: StatusCallback
+ conversationId?: ConversationId
+}): Promise => {
+ if (!settings.apiKey || !settings.model) {
+ return {
+ type: "invalid_key",
+ message: "API key is missing",
+ }
+ }
+ if (!tableName || !schema) {
+ return {
+ type: "unknown",
+ message: "Cannot find schema for the table",
+ }
+ }
+
+ await handleRateLimit()
+ setStatus(AIOperationStatus.Processing, { type: "explain", conversationId })
+
+ return tryWithRetries(async () => {
+ const clients = createProviderClients(settings)
+
+ if (clients.provider === "openai") {
+ const prompt = getExplainSchemaPrompt(tableName, schema, isMatView)
+
+ const formattingOutput = await clients.openai.responses.parse({
+ ...getModelProps(settings.model),
+ instructions: getExplainSchemaPrompt(tableName, schema, isMatView),
+ input: [{ role: "user", content: prompt }],
+ text: ExplainTableSchemaFormat,
+ })
+
+ const formatted =
+ formattingOutput.output_parsed as TableSchemaExplanation | null
+ setStatus(null)
+ if (!formatted) {
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+ const openAIUsage = formattingOutput.usage
+ return {
+ explanation: formatted.explanation || "",
+ columns: formatted.columns || [],
+ storage_details: formatted.storage_details || [],
+ tokenUsage: openAIUsage
+ ? {
+ inputTokens: openAIUsage.input_tokens,
+ outputTokens: openAIUsage.output_tokens,
+ }
+ : undefined,
+ }
+ }
+
+ const anthropic = clients.anthropic
+ const messageParams: Parameters[1] = {
+ model: getModelProps(settings.model).model,
+ messages: [
+ {
+ role: "user" as const,
+ content: getExplainSchemaPrompt(tableName, schema, isMatView),
+ },
+ ],
+ temperature: 0.3,
+ }
+ const schemaFormat = ExplainTableSchemaFormat.format as {
+ type: string
+ schema?: object
+ }
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: schemaFormat.schema,
+ }
+
+ const message = await createAnthropicMessage(anthropic, messageParams)
+
+ const textBlock = message.content.find((block) => block.type === "text")
+ if (!textBlock || !("text" in textBlock)) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "No text response received from assistant.",
+ } as AiAssistantAPIError
+ }
+
+ try {
+ const json = JSON.parse(textBlock.text) as TableSchemaExplanation
+ setStatus(null)
+ const anthropicUsage = message.usage
+ return {
+ explanation: json.explanation || "",
+ columns: json.columns || [],
+ storage_details: json.storage_details || [],
+ tokenUsage: anthropicUsage
+ ? {
+ inputTokens: anthropicUsage.input_tokens,
+ outputTokens: anthropicUsage.output_tokens,
+ }
+ : undefined,
+ }
+ } catch (error) {
+ setStatus(null)
+ return {
+ type: "unknown",
+ message: "Failed to parse assistant response.",
+ } as AiAssistantAPIError
+ }
+ }, setStatus)
+}
+
+class RefusalError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = "RefusalError"
+ }
+}
+
+class MaxTokensError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = "MaxTokensError"
+ }
+}
+
+async function createAnthropicMessage(
+ anthropic: Anthropic,
+ params: Omit & {
+ max_tokens?: number
+ },
+): Promise {
+ const message = await anthropic.messages.create(
+ {
+ ...params,
+ stream: false,
+ max_tokens: params.max_tokens ?? 8192,
+ },
+ {
+ headers: {
+ "anthropic-beta": "structured-outputs-2025-11-13",
+ },
+ },
+ )
+
+ if (message.stop_reason === "refusal") {
+ throw new RefusalError(
+ "The model refused to generate a response for this request.",
+ )
+ }
+ if (message.stop_reason === "max_tokens") {
+ throw new MaxTokensError(
+ "The response exceeded the maximum token limit. Please try generating shorter queries or increase token limits.",
+ )
+ }
+
+ return message
+}
+
+function handleAiAssistantError(error: unknown): AiAssistantAPIError {
+ if (error instanceof RefusalError) {
+ return {
+ type: "unknown",
+ message: "The model refused to generate a response for this request.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof MaxTokensError) {
+ return {
+ type: "unknown",
+ message:
+ "The response exceeded the maximum token limit. Please try generating shorter queries or increase token limits.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.AuthenticationError) {
+ return {
+ type: "invalid_key",
+ message: "Invalid API key. Please check your Anthropic API key.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.RateLimitError) {
+ return {
+ type: "rate_limit",
+ message: "Rate limit exceeded. Please try again later.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.APIConnectionError) {
+ return {
+ type: "network",
+ message: "Network error. Please check your internet connection.",
+ details: error.message,
+ }
+ }
+
+ if (error instanceof Anthropic.APIError) {
+ return {
+ type: "unknown",
+ message: `Anthropic API error: ${error.message}`,
+ }
+ }
+
+ if (error instanceof OpenAI.APIError) {
+ return {
+ type: "unknown",
+ message: `OpenAI API error: ${error.message}`,
+ }
+ }
+
+ return {
+ type: "unknown",
+ message: "An unexpected error occurred. Please try again.",
+ details: error as string,
+ }
+}
+
+export const testApiKey = async (
+ apiKey: string,
+ model: string,
+): Promise<{ valid: boolean; error?: string }> => {
+ try {
+ if (inferProviderFromModel(model) === "anthropic") {
+ const anthropic = new Anthropic({
+ apiKey,
+ dangerouslyAllowBrowser: true,
+ })
+
+ await createAnthropicMessage(anthropic, {
+ model,
+ messages: [
+ {
+ role: "user",
+ content: "ping",
+ },
+ ],
+ })
+ } else {
+ const openai = new OpenAI({ apiKey, dangerouslyAllowBrowser: true })
+ await openai.responses.create({
+ model: getModelProps(model).model,
+ input: [{ role: "user", content: "ping" }],
+ max_output_tokens: 16,
+ })
+ }
+
+ return { valid: true }
+ } catch (error: unknown) {
+ if (error instanceof Anthropic.AuthenticationError) {
+ return {
+ valid: false,
+ error: "Invalid API key",
+ }
+ }
+
+ if (error instanceof Anthropic.RateLimitError) {
+ return {
+ valid: true,
+ }
+ }
+
+ const status =
+ (error as { status?: number })?.status ||
+ (error as { error?: { status?: number } })?.error?.status
+ if (status === 401) {
+ return { valid: false, error: "Invalid API key" }
+ }
+ if (status === 429) {
+ return { valid: true }
+ }
+
+ return {
+ valid: false,
+ error:
+ error instanceof Error ? error.message : "Failed to validate API key",
+ }
+ }
+}
+
+const ChatTitleFormat: ResponseTextConfig = {
+ format: {
+ type: "json_schema" as const,
+ name: "chat_title_format",
+ schema: {
+ type: "object",
+ properties: {
+ title: { type: "string" },
+ },
+ required: ["title"],
+ additionalProperties: false,
+ },
+ strict: true,
+ },
+}
+
+export const generateChatTitle = async ({
+ firstUserMessage,
+ settings,
+}: {
+ firstUserMessage: string
+ settings: ActiveProviderSettings
+}): Promise => {
+ if (!settings.apiKey || !settings.model) {
+ return null
+ }
+
+ try {
+ const clients = createProviderClients(settings)
+
+ const prompt = `Generate a concise chat title (max 30 characters) for this conversation. The title should capture the main topic or intent.
+
+User's message:
+${firstUserMessage}
+
+Return a JSON object with the following structure: { "title": "Your title here" }`
+
+ if (clients.provider === "openai") {
+ const response = await clients.openai.responses.create({
+ ...getModelProps(settings.model),
+ input: [{ role: "user", content: prompt }],
+ text: ChatTitleFormat,
+ max_output_tokens: 100,
+ })
+ try {
+ const parsed = JSON.parse(response.output_text) as { title: string }
+ return parsed.title || null
+ } catch {
+ return null
+ }
+ }
+
+ const messageParams: Parameters[1] = {
+ model: settings.model,
+ messages: [{ role: "user", content: prompt }],
+ max_tokens: 100,
+ temperature: 0.3,
+ }
+ const titleFormat = ChatTitleFormat.format as {
+ type: string
+ schema?: object
+ }
+ // @ts-expect-error - output_format is a new field not yet in the type definitions
+ messageParams.output_format = {
+ type: "json_schema",
+ schema: titleFormat.schema,
+ }
+
+ const message = await createAnthropicMessage(
+ clients.anthropic,
+ messageParams,
+ )
+
+ const textBlock = message.content.find((block) => block.type === "text")
+ if (textBlock && "text" in textBlock) {
+ try {
+ const parsed = JSON.parse(textBlock.text) as { title: string }
+ return parsed.title?.slice(0, 40) || null
+ } catch {
+ return null
+ }
+ }
+ return null
+ } catch (error) {
+ // Silently fail - title generation is not critical
+ console.warn("Failed to generate chat title:", error)
+ return null
+ }
+}
+
+export type AIOperation = "explain" | "fix" | "followup"
+
+export const continueConversation = async ({
+ userMessage,
+ conversationHistory,
+ currentSQL,
+ settings,
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ operation = "followup",
+ conversationId,
+}: {
+ userMessage: string
+ conversationHistory: Array<{ role: "user" | "assistant"; content: string }>
+ currentSQL?: string
+ settings: ActiveProviderSettings
+ modelToolsClient: ModelToolsClient
+ setStatus: StatusCallback
+ abortSignal?: AbortSignal
+ operation?: AIOperation
+ conversationId?: ConversationId
+}): Promise => {
+ if (!settings.apiKey || !settings.model) {
+ return {
+ type: "invalid_key",
+ message: "API key or model is missing",
+ }
+ }
+
+ await handleRateLimit()
+ if (abortSignal?.aborted) {
+ return {
+ type: "aborted",
+ message: "Operation was cancelled",
+ }
+ }
+
+ const responseFormat = {
+ explain: ExplainFormat,
+ fix: FixSQLFormat,
+ followup: ConversationResponseFormat,
+ }[operation]
+
+ const statusType = {
+ explain: "explain" as const,
+ fix: "fix" as const,
+ followup: "followup" as const,
+ }[operation]
+
+ setStatus(AIOperationStatus.Processing, { type: statusType, conversationId })
+
+ return tryWithRetries(
+ async () => {
+ const clients = createProviderClients(settings)
+ const grantSchemaAccess = !!modelToolsClient.getTables
+
+ const hasAssistantMessages = conversationHistory.some(
+ (msg) => msg.role === "assistant",
+ )
+
+ let userMessageWithContext = userMessage
+ if (hasAssistantMessages) {
+ // This is a true follow-up message (has previous assistant responses)
+ // Check if userMessage already contains SQL context (from stored enriched message)
+ // If it does, use it as-is; otherwise add follow-up prefix
+ if (
+ userMessage.includes("Current SQL query:") ||
+ userMessage.includes("```sql")
+ ) {
+ // Already enriched, use as-is
+ userMessageWithContext = userMessage
+ } else {
+ // Plain follow-up, add prefix
+ userMessageWithContext = `Follow-up message on top of your latest changes: ${userMessage}`
+ }
+ }
+ // If userMessage already contains SQL context (from stored enriched message), use it as-is
+ // Otherwise, if it's the first message and we have currentSQL, add context
+ else if (
+ currentSQL &&
+ !userMessage.includes("Current SQL query:") &&
+ !userMessage.includes("```sql")
+ ) {
+ // First message with SQL context (like "Ask AI" flow)
+ userMessageWithContext = `Current SQL query:\n\`\`\`sql\n${currentSQL}\n\`\`\`\n\nUser request: ${userMessage}`
+ }
+
+ // Build the conversation history to pass to execute functions
+ // This should exclude the last message since it will be added as initialUserContent
+ const historyWithoutLastMessage =
+ conversationHistory && conversationHistory.length > 0
+ ? conversationHistory.slice(0, -1)
+ : []
+
+ const postProcess = (formatted: {
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }): GeneratedSQL => {
+ // If SQL is explicitly null, preserve that (no SQL change)
+ // If SQL is undefined or empty, fall back to currentSQL
+ // Otherwise normalize and use the provided SQL
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ }
+
+ if (clients.provider === "openai") {
+ const result = await executeOpenAIFlow<{
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }>({
+ openai: clients.openai,
+ model: settings.model,
+ config: {
+ systemInstructions: getUnifiedPrompt(grantSchemaAccess),
+ initialUserContent: userMessageWithContext,
+ conversationHistory: historyWithoutLastMessage,
+ responseFormat,
+ postProcess: (formatted) => {
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ },
+ },
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ })
+ if (isAiAssistantError(result)) {
+ return result
+ }
+ return postProcess(result)
+ }
+
+ const result = await executeAnthropicFlow<{
+ sql?: string | null
+ explanation: string
+ tokenUsage?: TokenUsage
+ }>({
+ anthropic: clients.anthropic,
+ model: settings.model,
+ config: {
+ systemInstructions: getUnifiedPrompt(grantSchemaAccess),
+ initialUserContent: userMessageWithContext,
+ conversationHistory: historyWithoutLastMessage,
+ responseFormat,
+ postProcess: (formatted) => {
+ const sql =
+ formatted?.sql === null
+ ? null
+ : formatted?.sql
+ ? normalizeSql(formatted.sql)
+ : currentSQL || ""
+ return {
+ sql,
+ explanation: formatted?.explanation || "",
+ tokenUsage: formatted.tokenUsage,
+ }
+ },
+ },
+ modelToolsClient,
+ setStatus,
+ abortSignal,
+ })
+ if (isAiAssistantError(result)) {
+ return result
+ }
+ return postProcess(result)
+ },
+ setStatus,
+ abortSignal,
+ )
+}
diff --git a/src/utils/aiAssistantSettings.ts b/src/utils/aiAssistantSettings.ts
new file mode 100644
index 000000000..7892e29b5
--- /dev/null
+++ b/src/utils/aiAssistantSettings.ts
@@ -0,0 +1,193 @@
+import { ReasoningEffort } from "openai/resources/shared"
+import type { AiAssistantSettings } from "../providers/LocalStorageProvider/types"
+
+export type Provider = "anthropic" | "openai"
+
+export type ModelOption = {
+ label: string
+ value: string
+ provider: Provider
+ isSlow?: boolean
+ isTestModel?: boolean
+ default?: boolean
+ defaultEnabled?: boolean
+}
+
+export const MODEL_OPTIONS: ModelOption[] = [
+ {
+ label: "Claude Sonnet 4.5",
+ value: "claude-sonnet-4-5",
+ provider: "anthropic",
+ default: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "Claude Opus 4.5",
+ value: "claude-opus-4-5",
+ provider: "anthropic",
+ isSlow: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "Claude Sonnet 4",
+ value: "claude-sonnet-4",
+ provider: "anthropic",
+ },
+ {
+ label: "Claude Haiku 4.5",
+ value: "claude-haiku-4-5",
+ provider: "anthropic",
+ isTestModel: true,
+ },
+ {
+ label: "GPT-5.1 (High Reasoning)",
+ value: "gpt-5.1@reasoning=high",
+ provider: "openai",
+ isSlow: true,
+ },
+ {
+ label: "GPT-5.1 (Medium Reasoning)",
+ value: "gpt-5.1@reasoning=medium",
+ provider: "openai",
+ isSlow: true,
+ defaultEnabled: true,
+ },
+ {
+ label: "GPT-5.1 (No Reasoning)",
+ value: "gpt-5.1",
+ provider: "openai",
+ defaultEnabled: true,
+ isTestModel: true,
+ },
+ {
+ label: "GPT-5",
+ value: "gpt-5",
+ provider: "openai",
+ defaultEnabled: true,
+ },
+ {
+ label: "GPT-5 mini",
+ value: "gpt-5-mini",
+ provider: "openai",
+ default: true,
+ defaultEnabled: true,
+ },
+]
+
+export const providerForModel = (model: ModelOption["value"]): Provider => {
+ return MODEL_OPTIONS.find((m) => m.value === model)!.provider
+}
+
+export const getModelProps = (
+ model: ModelOption["value"],
+): {
+ model: string
+ reasoning?: { effort: ReasoningEffort }
+} => {
+ const modelOption = MODEL_OPTIONS.find((m) => m.value === model)
+ if (!modelOption) {
+ return { model }
+ }
+ const parts = modelOption.value.split("@")
+ const modelName = parts[0]
+ const extraParams = parts[1]
+ if (extraParams) {
+ const params = extraParams.split("=")
+ const paramName = params[0]
+ const paramValue = params[1]
+ if (paramName === "reasoning" && paramValue) {
+ return {
+ model: modelName,
+ reasoning: { effort: paramValue as ReasoningEffort },
+ }
+ }
+ }
+ return { model: modelName }
+}
+
+export const getAllProviders = (): Provider[] => {
+ const providers = new Set()
+ MODEL_OPTIONS.forEach((model) => {
+ providers.add(model.provider)
+ })
+ return Array.from(providers)
+}
+
+export const getSelectedModel = (
+ settings: AiAssistantSettings,
+): string | null => {
+ const selectedModel = settings.selectedModel
+ if (
+ selectedModel &&
+ typeof selectedModel === "string" &&
+ MODEL_OPTIONS.find((m) => m.value === selectedModel)
+ ) {
+ return selectedModel
+ }
+
+ return MODEL_OPTIONS.find((m) => m.default)?.value ?? null
+}
+
+export const getNextModel = (
+ currentModel: string | undefined,
+ enabledModels: Record,
+): string | null => {
+ let nextModel: string | null | undefined = currentModel
+
+ const modelProvider = currentModel ? providerForModel(currentModel) : null
+ if (modelProvider && enabledModels[modelProvider].length > 0) {
+ // Current model is still enabled, so we can use it
+ if (currentModel && enabledModels[modelProvider].includes(currentModel)) {
+ return currentModel
+ }
+ // Take the default model of this provider, otherwise the first enabled model of this provider
+ nextModel =
+ enabledModels[modelProvider].find(
+ (m) => MODEL_OPTIONS.find((mo) => mo.value === m)?.default,
+ ) ?? enabledModels[modelProvider][0]
+ } else {
+ // No other enabled models for this provider, we have to choose from another provider if exists
+ const otherProviderWithEnabledModel = getAllProviders().find(
+ (p) => enabledModels[p].length > 0,
+ )
+ if (otherProviderWithEnabledModel) {
+ nextModel =
+ enabledModels[otherProviderWithEnabledModel].find(
+ (m) => MODEL_OPTIONS.find((mo) => mo.value === m)?.default,
+ ) ?? enabledModels[otherProviderWithEnabledModel][0]
+ } else {
+ nextModel = null
+ }
+ }
+ return nextModel ?? null
+}
+
+export const isAiAssistantConfigured = (
+ settings: AiAssistantSettings,
+): boolean => {
+ return getAllProviders().some(
+ (provider) => !!settings.providers?.[provider]?.apiKey,
+ )
+}
+
+export const canUseAiAssistant = (settings: AiAssistantSettings): boolean => {
+ return isAiAssistantConfigured(settings) && !!settings.selectedModel
+}
+
+export const hasSchemaAccess = (settings: AiAssistantSettings): boolean => {
+ const selectedModel = getSelectedModel(settings)
+ if (!selectedModel) return false
+
+ const anthropicModels = settings.providers?.anthropic?.enabledModels || []
+ const openaiModels = settings.providers?.openai?.enabledModels || []
+
+ if (anthropicModels.includes(selectedModel)) {
+ return settings.providers?.anthropic?.grantSchemaAccess === true
+ }
+
+ if (openaiModels.includes(selectedModel)) {
+ return settings.providers?.openai?.grantSchemaAccess === true
+ }
+
+ return false
+}
diff --git a/src/utils/formatSql.ts b/src/utils/formatSql.ts
index 51cdfb58e..dbbcd0d6c 100644
--- a/src/utils/formatSql.ts
+++ b/src/utils/formatSql.ts
@@ -2,7 +2,7 @@ import { format, FormatOptions } from "sql-formatter"
export const formatSql = (statement: string, options?: FormatOptions) => {
return format(statement, {
- language: "postgresql",
+ language: "mysql",
...options,
})
}
diff --git a/src/utils/hashString.ts b/src/utils/hashString.ts
new file mode 100644
index 000000000..079edd82c
--- /dev/null
+++ b/src/utils/hashString.ts
@@ -0,0 +1,9 @@
+export const hashString = (str: string): string => {
+ let hash = 0
+ for (let i = 0; i < str.length; i++) {
+ const char = str.charCodeAt(i)
+ hash = (hash << 5) - hash + char
+ hash &= hash
+ }
+ return new Uint32Array([hash])[0].toString(36)
+}
diff --git a/src/utils/index.ts b/src/utils/index.ts
index 2b5a234a0..3fdf53168 100644
--- a/src/utils/index.ts
+++ b/src/utils/index.ts
@@ -36,3 +36,4 @@ export * from "./pick"
export * from "./fetchUserLocale"
export * from "./getLocaleFromLanguage"
export * from "./uniq"
+export * from "./hashString"
diff --git a/src/utils/localStorage/types.ts b/src/utils/localStorage/types.ts
index 308d50a6d..3777aa6a6 100644
--- a/src/utils/localStorage/types.ts
+++ b/src/utils/localStorage/types.ts
@@ -39,4 +39,6 @@ export enum StoreKey {
AUTO_REFRESH_TABLES = "auto.refresh.tables",
SSO_USERNAME = "sso.username",
LEFT_PANEL_STATE = "left.panel.state",
+ AI_ASSISTANT_SETTINGS = "ai.assistant.settings",
+ AI_CHAT_PANEL_WIDTH = "ai.chat.panel.width",
}
diff --git a/src/utils/monacoInit.ts b/src/utils/monacoInit.ts
new file mode 100644
index 000000000..865e3fdae
--- /dev/null
+++ b/src/utils/monacoInit.ts
@@ -0,0 +1,19 @@
+import { loader } from "@monaco-editor/react"
+import dracula from "../scenes/Editor/Monaco/dracula"
+import { registerLanguageAddons } from "../scenes/Editor/Monaco/editor-addons"
+
+loader.config({
+ paths: {
+ vs: "assets/vs",
+ },
+})
+
+// This runs once at app startup, before any editor mounts
+export const monacoPromise = loader.init().then((monaco) => {
+ registerLanguageAddons(monaco)
+
+ monaco.editor.defineTheme("dracula", dracula)
+ monaco.editor.setTheme("dracula")
+
+ return monaco
+})
diff --git a/src/utils/questdb/client.ts b/src/utils/questdb/client.ts
index e8b11d2ba..38d6eff21 100644
--- a/src/utils/questdb/client.ts
+++ b/src/utils/questdb/client.ts
@@ -24,6 +24,9 @@ import {
Preferences,
Permission,
SymbolColumnDetails,
+ ValidateQueryResult,
+ ValidateQuerySuccessResult,
+ ValidateQueryErrorResult,
} from "./types"
import { ssoAuthState } from "../../modules/OAuth2/ssoAuthState"
@@ -307,6 +310,27 @@ export class Client {
return Promise.reject(errorPayload)
}
+ async validateQuery(query: string): Promise {
+ const response = await fetch(
+ `api/v1/sql/validate?${Client.encodeParams({ query })}`,
+ {
+ headers: this.commonHeaders,
+ },
+ )
+ if (response.ok) {
+ return (await response.json()) as ValidateQuerySuccessResult
+ }
+
+ if (response.status === 400 || response.status === 403) {
+ return (await response.json()) as ValidateQueryErrorResult
+ }
+
+ return Promise.reject({
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+
async showTables(): Promise> {
const response = await this.query("tables();")
diff --git a/src/utils/questdb/types.ts b/src/utils/questdb/types.ts
index b3126eedc..3e15a3146 100644
--- a/src/utils/questdb/types.ts
+++ b/src/utils/questdb/types.ts
@@ -128,6 +128,56 @@ export type QueryResult> =
| DdlResult
| NoticeResult
+type QueryType =
+ | "INSERT"
+ | "TRUNCATE"
+ | "ALTER TABLE"
+ | "SET"
+ | "DROP"
+ | "COPY"
+ | "CREATE TABLE"
+ | "INSERT AS SELECT"
+ | "COPY REMOTE"
+ | "RENAME TABLE"
+ | "REPAIR"
+ | "BACKUP TABLE"
+ | "UPDATE"
+ | "VACUUM"
+ | "BEGIN"
+ | "COMMIT"
+ | "ROLLBACK"
+ | "CREATE AS SELECT"
+ | "CHECKPOINT CREATE"
+ | "CHECKPOINT RELEASE"
+ | "DEALLOCATE"
+ | "EXPLAIN"
+ | "TABLE RESUME"
+
+export type ValidateQuerySuccessResult =
+ | {
+ query: string
+ columns: Array<{
+ name: string
+ type: string
+ dim?: number
+ elemType?: string
+ }>
+ timestamp: number
+ }
+ | {
+ queryType: QueryType
+ }
+
+export type ValidateQueryErrorResult = {
+ query: string
+ position: number
+ error: string
+}
+
+export type ValidateQueryResult =
+ | ValidateQuerySuccessResult
+ | ValidateQueryErrorResult
+
export type PartitionBy = "HOUR" | "DAY" | "WEEK" | "MONTH" | "YEAR" | "NONE"
export type Table = {
diff --git a/src/utils/questdbDocsRetrieval.ts b/src/utils/questdbDocsRetrieval.ts
new file mode 100644
index 000000000..b47038594
--- /dev/null
+++ b/src/utils/questdbDocsRetrieval.ts
@@ -0,0 +1,311 @@
+export type DocCategory =
+ | "functions"
+ | "operators"
+ | "sql"
+ | "concepts"
+ | "schema"
+
+export type ParsedDocItem = {
+ name: string
+ section?: string
+}
+
+/**
+ * Parse a documentation item string into name and optional section
+ * Handles formats like "Window Functions - avg()" or "Window Functions"
+ */
+export function parseDocItem(item: string): ParsedDocItem | null {
+ if (!item || !item.trim()) {
+ return null
+ }
+
+ const parts = item.split(/\s+-\s+/)
+ if (parts.length >= 2) {
+ return {
+ name: parts[0].trim(),
+ section: parts.slice(1).join(" - ").trim(),
+ }
+ }
+
+ return { name: item.trim() }
+}
+
+/**
+ * Parse multiple documentation item strings into an array of parsed items
+ */
+export function parseDocItems(
+ items: string[],
+): Array<{ name: string; section?: string }> {
+ return items
+ .map(parseDocItem)
+ .filter((item): item is ParsedDocItem => item !== null)
+}
+
+// Base URL for documentation
+const DOCS_BASE_URL = " https://questdb.com/docs"
+
+// Interface for metadata (no content, includes url)
+export interface DocFileMetadata {
+ path: string
+ title: string
+ headers: string[]
+ url: string
+}
+
+/**
+ * Fetch JSON from URL
+ */
+async function fetchJson(url: string): Promise {
+ const response = await fetch(url)
+ if (!response.ok) {
+ throw new Error(`Failed to fetch ${url}: ${response.statusText}`)
+ }
+ return response.json() as T
+}
+
+/**
+ * Fetch markdown content from URL
+ */
+async function fetchMarkdown(url: string): Promise {
+ const response = await fetch(url)
+ if (!response.ok) {
+ throw new Error(`Failed to fetch ${url}: ${response.statusText}`)
+ }
+ return response.text()
+}
+
+/**
+ * Get the table of contents for all QuestDB documentation
+ */
+export async function getQuestDBTableOfContents(): Promise {
+ const tocUrl = `${DOCS_BASE_URL}/web-console/toc-list.json`
+ const toc = await fetchJson>(tocUrl)
+
+ let result = "# QuestDB Documentation Table of Contents\n\n"
+
+ // Functions
+ result += "## Functions\n"
+ result += toc.functions.join(", ") + "\n\n"
+
+ // Operators
+ result += "## Operators\n"
+ result += toc.operators.join(", ") + "\n\n"
+
+ // SQL Keywords
+ result += "## SQL Syntax & Keywords\n"
+ result += toc.sql.join(", ") + "\n\n"
+
+ // Concepts
+ if (toc.concepts) {
+ result += "## Concepts\n"
+ result += toc.concepts.join(", ") + "\n\n"
+ }
+
+ // Schema
+ if (toc.schema) {
+ result += "## Schema\n"
+ result += toc.schema.join(", ") + "\n"
+ }
+
+ return result
+}
+
+/**
+ * Get documentation for specific items
+ */
+export async function getSpecificDocumentation(
+ category: DocCategory,
+ items: string[],
+): Promise {
+ // Fetch metadata for this category
+ const metadataUrl = `${DOCS_BASE_URL}/web-console/${category}-docs.json`
+ const categoryDocs = await fetchJson(metadataUrl)
+
+ if (!categoryDocs) {
+ return `Unknown category: ${category}`
+ }
+
+ const chunks: string[] = []
+ const processedPaths = new Set()
+
+ for (const item of items) {
+ const normalizedItem = item.toLowerCase().replace(/[^a-z0-9_]/g, "_")
+ const parsed = parseDocItem(item)
+ if (!parsed) continue
+
+ const queryTitle = parsed.name
+ const querySection = parsed.section
+ const hasTitleAndSection = !!querySection
+
+ // Find files containing this item
+ for (const file of categoryDocs) {
+ // Handle explicit "Title - Section" lookups
+ if (hasTitleAndSection && queryTitle && querySection) {
+ if (file.title.toLowerCase() === queryTitle.toLowerCase()) {
+ const matchingHeaderFromTitleSection = file.headers.find(
+ (h) =>
+ h.toLowerCase() === querySection.toLowerCase() ||
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") ===
+ querySection.toLowerCase().replace(/[^a-z0-9_]/g, "_"),
+ )
+ if (
+ matchingHeaderFromTitleSection &&
+ !processedPaths.has(
+ `${file.path}::${matchingHeaderFromTitleSection}`,
+ )
+ ) {
+ processedPaths.add(
+ `${file.path}::${matchingHeaderFromTitleSection}`,
+ )
+
+ // Fetch the markdown content
+ const content = await fetchMarkdown(file.url)
+ const sectionContent = extractSection(
+ content,
+ matchingHeaderFromTitleSection,
+ )
+ if (sectionContent) {
+ chunks.push(
+ `### ${file.path} - ${matchingHeaderFromTitleSection}\n\n${sectionContent}`,
+ )
+ continue
+ }
+ }
+ }
+ }
+
+ // Check if file name matches
+ const fileKey = file.path
+ .split("/")
+ .pop()
+ ?.replace(".md", "")
+ .replace(/-/g, "_")
+ const hasItemInPath = fileKey === normalizedItem
+
+ // Check if title matches
+ const normalizedTitle = file.title
+ .toLowerCase()
+ .replace(/[^a-z0-9_]/g, "_")
+ const hasItemInTitle =
+ normalizedTitle === normalizedItem ||
+ file.title.toLowerCase() === item.toLowerCase()
+
+ // Check if any header matches
+ const hasItemInHeaders = file.headers.some(
+ (h) =>
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") === normalizedItem ||
+ h.toLowerCase() === item.toLowerCase(),
+ )
+
+ if (
+ (hasItemInPath || hasItemInTitle || hasItemInHeaders) &&
+ !processedPaths.has(file.path)
+ ) {
+ processedPaths.add(file.path)
+
+ // Fetch the markdown content
+ const content = await fetchMarkdown(file.url)
+
+ // If looking for a specific function/operator, try to extract just that section
+ const matchingHeader = file.headers.find(
+ (h) =>
+ h.toLowerCase() === item.toLowerCase() ||
+ h.toLowerCase().replace(/[^a-z0-9_]/g, "_") === normalizedItem,
+ )
+
+ if (matchingHeader) {
+ const sectionContent = extractSection(content, matchingHeader)
+ if (sectionContent) {
+ chunks.push(
+ `### ${file.path} - ${matchingHeader}\n\n${sectionContent}`,
+ )
+ continue
+ }
+ }
+
+ // Otherwise include the whole file
+ chunks.push(`### ${file.path}\n\n${content}`)
+ }
+ }
+ }
+
+ if (chunks.length === 0) {
+ return `No documentation found for: ${items.join(", ")}`
+ }
+
+ return chunks.join("\n\n---\n\n")
+}
+
+/**
+ * Extract a specific section from markdown content
+ */
+function extractSection(content: string, sectionHeader: string): string | null {
+ const lines = content.split("\n")
+ let inSection = false
+ const sectionContent: string[] = []
+
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i]
+
+ // Check if we found the section header
+ if (line === `## ${sectionHeader}`) {
+ inSection = true
+ sectionContent.push(line)
+ } else if (inSection) {
+ // Check if we reached the next section
+ if (line.match(/^##?\s/)) {
+ break
+ }
+ sectionContent.push(line)
+ }
+ }
+
+ return sectionContent.length > 0 ? sectionContent.join("\n") : null
+}
+
+/**
+ * Search for documentation by keyword
+ */
+export async function searchDocumentation(query: string): Promise {
+ const lowerQuery = query.toLowerCase()
+ const results: string[] = []
+
+ // Search in all categories
+ const categories: DocCategory[] = [
+ "functions",
+ "operators",
+ "sql",
+ "concepts",
+ "schema",
+ ]
+
+ for (const category of categories) {
+ const metadataUrl = `${DOCS_BASE_URL}/web-console/${category}-docs.json`
+ const docs = await fetchJson(metadataUrl)
+
+ for (const file of docs) {
+ // Check file name
+ if (file.path.toLowerCase().includes(lowerQuery)) {
+ results.push(`${category}/${file.title}`)
+ }
+
+ // Check headers
+ for (const header of file.headers) {
+ if (header.toLowerCase().includes(lowerQuery)) {
+ results.push(`${category}/${header}`)
+ }
+ }
+ }
+ }
+
+ if (results.length === 0) {
+ return `No results found for: ${query}`
+ }
+
+ return `Found ${results.length} results:\n${results.join("\n")}`
+}
+
+export async function getReferenceFull(): Promise {
+ const url = `${DOCS_BASE_URL}/reference-full.md`
+ return fetchMarkdown(url)
+}
diff --git a/vite.config.mts b/vite.config.mts
index e9d410b84..12de9a1ca 100644
--- a/vite.config.mts
+++ b/vite.config.mts
@@ -54,7 +54,12 @@ export default defineConfig(({ mode }) => {
groups: ["group1", "group2"]
}))
}
- }
+ },
+ "/api": {
+ target: 'http://127.0.0.1:9000',
+ changeOrigin: true,
+ rewrite: (path: string) => `${contextPath}${path}`,
+ },
}
return {
diff --git a/yarn.lock b/yarn.lock
index 714267ff8..2a7d81376 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -211,6 +211,22 @@ __metadata:
languageName: node
linkType: hard
+"@anthropic-ai/sdk@npm:^0.71.2":
+ version: 0.71.2
+ resolution: "@anthropic-ai/sdk@npm:0.71.2"
+ dependencies:
+ json-schema-to-ts: "npm:^3.1.1"
+ peerDependencies:
+ zod: ^3.25.0 || ^4.0.0
+ peerDependenciesMeta:
+ zod:
+ optional: true
+ bin:
+ anthropic-ai-sdk: bin/cli
+ checksum: 10/a8190f9e860079dd97a544a95f36bd4b0b3a9a941610d7e067c431dc47febe03e3e761fc371166b261af9629d832533eeb3d8e72298e9f73dd52994a61881a2c
+ languageName: node
+ linkType: hard
+
"@babel/code-frame@npm:^7.0.0, @babel/code-frame@npm:^7.27.1":
version: 7.27.1
resolution: "@babel/code-frame@npm:7.27.1"
@@ -1412,7 +1428,7 @@ __metadata:
languageName: node
linkType: hard
-"@babel/runtime@npm:^7.19.0, @babel/runtime@npm:^7.20.7, @babel/runtime@npm:^7.5.5, @babel/runtime@npm:^7.8.7, @babel/runtime@npm:^7.9.2":
+"@babel/runtime@npm:^7.18.3, @babel/runtime@npm:^7.19.0, @babel/runtime@npm:^7.20.7, @babel/runtime@npm:^7.5.5, @babel/runtime@npm:^7.8.7, @babel/runtime@npm:^7.9.2":
version: 7.28.4
resolution: "@babel/runtime@npm:7.28.4"
checksum: 10/6c9a70452322ea80b3c9b2a412bcf60771819213a67576c8cec41e88a95bb7bf01fc983754cda35dc19603eef52df22203ccbf7777b9d6316932f9fb77c25163
@@ -2393,6 +2409,16 @@ __metadata:
languageName: node
linkType: hard
+"@phosphor-icons/react@npm:^2.1.10":
+ version: 2.1.10
+ resolution: "@phosphor-icons/react@npm:2.1.10"
+ peerDependencies:
+ react: ">= 16.8"
+ react-dom: ">= 16.8"
+ checksum: 10/a75dabb9a628c7ca415ce79b6a48fd1c4d7dce9bc5f19b71b58d8ab923c35149e0fddbf1c0a0be43655817577821c3d50906aeebfb2596d91af1222040b09b46
+ languageName: node
+ linkType: hard
+
"@pkgjs/parseargs@npm:^0.11.0":
version: 0.11.0
resolution: "@pkgjs/parseargs@npm:0.11.0"
@@ -2435,6 +2461,7 @@ __metadata:
resolution: "@questdb/web-console@workspace:."
dependencies:
"@4tw/cypress-drag-drop": "npm:^2.2.5"
+ "@anthropic-ai/sdk": "npm:^0.71.2"
"@babel/core": "npm:^7.28.5"
"@babel/preset-env": "npm:^7.20.2"
"@babel/preset-react": "npm:^7.17.12"
@@ -2445,6 +2472,7 @@ __metadata:
"@eslint/js": "npm:^9.13.0"
"@hookform/resolvers": "npm:2.8.5"
"@monaco-editor/react": "npm:^4.6.0"
+ "@phosphor-icons/react": "npm:^2.1.10"
"@popperjs/core": "npm:2.4.2"
"@questdb/sql-grammar": "npm:1.4.1"
"@radix-ui/react-alert-dialog": "npm:^1.1.15"
@@ -2517,6 +2545,7 @@ __metadata:
lodash.isequal: "npm:^4.5.0"
lodash.merge: "npm:^4.6.2"
monaco-editor: "npm:^0.44.0"
+ openai: "npm:^5.21.0"
posthog-js: "npm:1.298.1"
prettier: "npm:^3.3.0"
ramda: "npm:0.27.1"
@@ -2534,6 +2563,7 @@ __metadata:
react-virtuoso: "npm:^2.2.6"
redux: "npm:4.0.5"
redux-observable: "npm:1.2.0"
+ remark-gfm: "npm:^3.0.1"
resize-observer-polyfill: "npm:1.5.1"
rxjs: "npm:6.5.5"
sass: "npm:^1.93.2"
@@ -4899,6 +4929,13 @@ __metadata:
languageName: node
linkType: hard
+"ccount@npm:^2.0.0":
+ version: 2.0.1
+ resolution: "ccount@npm:2.0.1"
+ checksum: 10/48193dada54c9e260e0acf57fc16171a225305548f9ad20d5471e0f7a8c026aedd8747091dccb0d900cde7df4e4ddbd235df0d8de4a64c71b12f0d3303eeafd4
+ languageName: node
+ linkType: hard
+
"chai@npm:^4.3.10":
version: 4.5.0
resolution: "chai@npm:4.5.0"
@@ -6121,6 +6158,13 @@ __metadata:
languageName: node
linkType: hard
+"escape-string-regexp@npm:^5.0.0":
+ version: 5.0.0
+ resolution: "escape-string-regexp@npm:5.0.0"
+ checksum: 10/20daabe197f3cb198ec28546deebcf24b3dbb1a5a269184381b3116d12f0532e06007f4bc8da25669d6a7f8efb68db0758df4cd981f57bc5b57f521a3e12c59e
+ languageName: node
+ linkType: hard
+
"eslint-config-prettier@npm:^10.1.8":
version: 10.1.8
resolution: "eslint-config-prettier@npm:10.1.8"
@@ -7915,6 +7959,16 @@ __metadata:
languageName: node
linkType: hard
+"json-schema-to-ts@npm:^3.1.1":
+ version: 3.1.1
+ resolution: "json-schema-to-ts@npm:3.1.1"
+ dependencies:
+ "@babel/runtime": "npm:^7.18.3"
+ ts-algebra: "npm:^2.0.0"
+ checksum: 10/9fd0490279d36ff8b4604cc10632df05e4e5f5ee1d0a77841c927623fc1e636c47eb4ace488018c4e9a2e7e5dde5520d0870e06dfc84b930d9c98c6eacd0f041
+ languageName: node
+ linkType: hard
+
"json-schema-traverse@npm:^0.4.1":
version: 0.4.1
resolution: "json-schema-traverse@npm:0.4.1"
@@ -8244,6 +8298,13 @@ __metadata:
languageName: node
linkType: hard
+"longest-streak@npm:^3.0.0":
+ version: 3.1.0
+ resolution: "longest-streak@npm:3.1.0"
+ checksum: 10/d7f952ed004cbdb5c8bcfc4f7f5c3d65449e6c5a9e9be4505a656e3df5a57ee125f284286b4bf8ecea0c21a7b3bf2b8f9001ad506c319b9815ad6a63a47d0fd0
+ languageName: node
+ linkType: hard
+
"loose-envify@npm:^1.0.0, loose-envify@npm:^1.1.0, loose-envify@npm:^1.4.0":
version: 1.4.0
resolution: "loose-envify@npm:1.4.0"
@@ -8340,6 +8401,13 @@ __metadata:
languageName: node
linkType: hard
+"markdown-table@npm:^3.0.0":
+ version: 3.0.4
+ resolution: "markdown-table@npm:3.0.4"
+ checksum: 10/bc699819e6a15607e5def0f21aa862aa061cf1f49877baa93b0185574f6ab143591afe0e18b94d9b15ea80c6a693894150dbccfacf4f6767160dc32ae393dfe0
+ languageName: node
+ linkType: hard
+
"math-intrinsics@npm:^1.1.0":
version: 1.1.0
resolution: "math-intrinsics@npm:1.1.0"
@@ -8365,6 +8433,18 @@ __metadata:
languageName: node
linkType: hard
+"mdast-util-find-and-replace@npm:^2.0.0":
+ version: 2.2.2
+ resolution: "mdast-util-find-and-replace@npm:2.2.2"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ escape-string-regexp: "npm:^5.0.0"
+ unist-util-is: "npm:^5.0.0"
+ unist-util-visit-parents: "npm:^5.0.0"
+ checksum: 10/59e11e853b74d8f6083950327df39e27287b383930ff836298a5100aeda5568282bb45046c27886d2156ea101580bb0689b890c29623cefa5adc74e95d9ca9ff
+ languageName: node
+ linkType: hard
+
"mdast-util-from-markdown@npm:^0.8.0":
version: 0.8.5
resolution: "mdast-util-from-markdown@npm:0.8.5"
@@ -8398,6 +8478,86 @@ __metadata:
languageName: node
linkType: hard
+"mdast-util-gfm-autolink-literal@npm:^1.0.0":
+ version: 1.0.3
+ resolution: "mdast-util-gfm-autolink-literal@npm:1.0.3"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ ccount: "npm:^2.0.0"
+ mdast-util-find-and-replace: "npm:^2.0.0"
+ micromark-util-character: "npm:^1.0.0"
+ checksum: 10/272d075cdc7937bec0179af4052bd9032a6fbb05608b387b1b075b0491c73ce012f3ff1c718cdb5fb0ed1032c1fa7570d955b59c0ab3c3c72609928754774529
+ languageName: node
+ linkType: hard
+
+"mdast-util-gfm-footnote@npm:^1.0.0":
+ version: 1.0.2
+ resolution: "mdast-util-gfm-footnote@npm:1.0.2"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ mdast-util-to-markdown: "npm:^1.3.0"
+ micromark-util-normalize-identifier: "npm:^1.0.0"
+ checksum: 10/825f207afc98fd1daa0acc8adcb5754d1f0d577ccb1749245289bee7c892557668d8ee3a5ab618f42e710646cf018dcda84f3c0c608ae11718e9014e5bf4f9dc
+ languageName: node
+ linkType: hard
+
+"mdast-util-gfm-strikethrough@npm:^1.0.0":
+ version: 1.0.3
+ resolution: "mdast-util-gfm-strikethrough@npm:1.0.3"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ mdast-util-to-markdown: "npm:^1.3.0"
+ checksum: 10/a9c2dc3ef46be7952d13b7063a16171bba8aa266bffe6b1e7267df02a60b4fa3734115cca311e9127db8cfcbbcd68fdd92aa26152bcd0c14372c79b254e4df2f
+ languageName: node
+ linkType: hard
+
+"mdast-util-gfm-table@npm:^1.0.0":
+ version: 1.0.7
+ resolution: "mdast-util-gfm-table@npm:1.0.7"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ markdown-table: "npm:^3.0.0"
+ mdast-util-from-markdown: "npm:^1.0.0"
+ mdast-util-to-markdown: "npm:^1.3.0"
+ checksum: 10/167f7f7a9dc17ce852f4f9bd155d7be179588e2ccf4ce3c4f23b12c1c9db5de904cdacc6f41b2d635cb84eb09a7ff5a33497585f2664a7f1e6bd6f7ab7e1197a
+ languageName: node
+ linkType: hard
+
+"mdast-util-gfm-task-list-item@npm:^1.0.0":
+ version: 1.0.2
+ resolution: "mdast-util-gfm-task-list-item@npm:1.0.2"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ mdast-util-to-markdown: "npm:^1.3.0"
+ checksum: 10/958417a7d7690728b44d65127ab9189c7feaa17aea924dd56a888c781ab3abaa4eb0c209f05c4dbf203da3d0c4df8fdace4c9471b644268bfc7fc792a018a171
+ languageName: node
+ linkType: hard
+
+"mdast-util-gfm@npm:^2.0.0":
+ version: 2.0.2
+ resolution: "mdast-util-gfm@npm:2.0.2"
+ dependencies:
+ mdast-util-from-markdown: "npm:^1.0.0"
+ mdast-util-gfm-autolink-literal: "npm:^1.0.0"
+ mdast-util-gfm-footnote: "npm:^1.0.0"
+ mdast-util-gfm-strikethrough: "npm:^1.0.0"
+ mdast-util-gfm-table: "npm:^1.0.0"
+ mdast-util-gfm-task-list-item: "npm:^1.0.0"
+ mdast-util-to-markdown: "npm:^1.0.0"
+ checksum: 10/70e6cd32af94181d409f171f984f83fc18b3efe316844c62f31816f5c1612a92517b8ed766340f23e0a6d6cb0f27a8b07d288bab6619cbdbb0c5341006bcdc4d
+ languageName: node
+ linkType: hard
+
+"mdast-util-phrasing@npm:^3.0.0":
+ version: 3.0.1
+ resolution: "mdast-util-phrasing@npm:3.0.1"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ unist-util-is: "npm:^5.0.0"
+ checksum: 10/c5b616d9b1eb76a6b351d195d94318494722525a12a89d9c8a3b091af7db3dd1fc55d294f9d29266d8159a8267b0df4a7a133bda8a3909d5331c383e1e1ff328
+ languageName: node
+ linkType: hard
+
"mdast-util-to-hast@npm:^12.1.0":
version: 12.3.0
resolution: "mdast-util-to-hast@npm:12.3.0"
@@ -8428,6 +8588,22 @@ __metadata:
languageName: node
linkType: hard
+"mdast-util-to-markdown@npm:^1.0.0, mdast-util-to-markdown@npm:^1.3.0":
+ version: 1.5.0
+ resolution: "mdast-util-to-markdown@npm:1.5.0"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ "@types/unist": "npm:^2.0.0"
+ longest-streak: "npm:^3.0.0"
+ mdast-util-phrasing: "npm:^3.0.0"
+ mdast-util-to-string: "npm:^3.0.0"
+ micromark-util-decode-string: "npm:^1.0.0"
+ unist-util-visit: "npm:^4.0.0"
+ zwitch: "npm:^2.0.0"
+ checksum: 10/713f674588a01969a2ce524a69985bd57e507377eea2c4ba69800fb305414468b30144ae9b837fbdde8c609877673140e4f56f6cabe9e0e2bc1487291e3c5144
+ languageName: node
+ linkType: hard
+
"mdast-util-to-string@npm:^2.0.0":
version: 2.0.0
resolution: "mdast-util-to-string@npm:2.0.0"
@@ -8435,7 +8611,7 @@ __metadata:
languageName: node
linkType: hard
-"mdast-util-to-string@npm:^3.1.0":
+"mdast-util-to-string@npm:^3.0.0, mdast-util-to-string@npm:^3.1.0":
version: 3.2.0
resolution: "mdast-util-to-string@npm:3.2.0"
dependencies:
@@ -8495,7 +8671,7 @@ __metadata:
languageName: node
linkType: hard
-"micromark-core-commonmark@npm:^1.0.1":
+"micromark-core-commonmark@npm:^1.0.0, micromark-core-commonmark@npm:^1.0.1":
version: 1.1.0
resolution: "micromark-core-commonmark@npm:1.1.0"
dependencies:
@@ -8519,6 +8695,99 @@ __metadata:
languageName: node
linkType: hard
+"micromark-extension-gfm-autolink-literal@npm:^1.0.0":
+ version: 1.0.5
+ resolution: "micromark-extension-gfm-autolink-literal@npm:1.0.5"
+ dependencies:
+ micromark-util-character: "npm:^1.0.0"
+ micromark-util-sanitize-uri: "npm:^1.0.0"
+ micromark-util-symbol: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ checksum: 10/1e0ccc758baef3cd0478ba84ff86fa1ec2b389042421c7cade9485b775456c1a9c3bd797393002b2c6f6abd9bdf829cb114874557bbcb8e43d16d06a464811c0
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm-footnote@npm:^1.0.0":
+ version: 1.1.2
+ resolution: "micromark-extension-gfm-footnote@npm:1.1.2"
+ dependencies:
+ micromark-core-commonmark: "npm:^1.0.0"
+ micromark-factory-space: "npm:^1.0.0"
+ micromark-util-character: "npm:^1.0.0"
+ micromark-util-normalize-identifier: "npm:^1.0.0"
+ micromark-util-sanitize-uri: "npm:^1.0.0"
+ micromark-util-symbol: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ uvu: "npm:^0.5.0"
+ checksum: 10/8777073fb76d2fd01f6b2405106af6c349c1e25660c4d37cadcc61c187d71c8444870f73cefaaa67f12884d5e45c78ee3c5583561a0b330bd91c6d997113584a
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm-strikethrough@npm:^1.0.0":
+ version: 1.0.7
+ resolution: "micromark-extension-gfm-strikethrough@npm:1.0.7"
+ dependencies:
+ micromark-util-chunked: "npm:^1.0.0"
+ micromark-util-classify-character: "npm:^1.0.0"
+ micromark-util-resolve-all: "npm:^1.0.0"
+ micromark-util-symbol: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ uvu: "npm:^0.5.0"
+ checksum: 10/8411ef1aa5dc83f662e8b45b085f70ddff29deb3c4259269e8a1ff656397abb755d8ea841a14be23e8585a31d3c0a5de1bd2c05f3453b66670e499d4a0004f5e
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm-table@npm:^1.0.0":
+ version: 1.0.7
+ resolution: "micromark-extension-gfm-table@npm:1.0.7"
+ dependencies:
+ micromark-factory-space: "npm:^1.0.0"
+ micromark-util-character: "npm:^1.0.0"
+ micromark-util-symbol: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ uvu: "npm:^0.5.0"
+ checksum: 10/f05d86a099c941a2a309d60bf4839d16a00a93cb880cda4ab8faeb831647763fff6e03197ec15b80e1f195002afcca6afe2b95c3622b049b82d7ff8ef1c1c776
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm-tagfilter@npm:^1.0.0":
+ version: 1.0.2
+ resolution: "micromark-extension-gfm-tagfilter@npm:1.0.2"
+ dependencies:
+ micromark-util-types: "npm:^1.0.0"
+ checksum: 10/55c7d9019d6a39efaaed2c2e40b0aaa137d2c4f9c94cac82e93f509a806c3a775e4c815b5d8e986617450b68861a19776e4b886307e83db452b393f15a837b39
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm-task-list-item@npm:^1.0.0":
+ version: 1.0.5
+ resolution: "micromark-extension-gfm-task-list-item@npm:1.0.5"
+ dependencies:
+ micromark-factory-space: "npm:^1.0.0"
+ micromark-util-character: "npm:^1.0.0"
+ micromark-util-symbol: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ uvu: "npm:^0.5.0"
+ checksum: 10/46bb1baa10bfb785a2e3e2f975e5509260b9995d5c3aeddf77051957d218ce1af4ea737bcb6a56a930e62d42b05307b20632a400eff25cdb290789ff3170cad5
+ languageName: node
+ linkType: hard
+
+"micromark-extension-gfm@npm:^2.0.0":
+ version: 2.0.3
+ resolution: "micromark-extension-gfm@npm:2.0.3"
+ dependencies:
+ micromark-extension-gfm-autolink-literal: "npm:^1.0.0"
+ micromark-extension-gfm-footnote: "npm:^1.0.0"
+ micromark-extension-gfm-strikethrough: "npm:^1.0.0"
+ micromark-extension-gfm-table: "npm:^1.0.0"
+ micromark-extension-gfm-tagfilter: "npm:^1.0.0"
+ micromark-extension-gfm-task-list-item: "npm:^1.0.0"
+ micromark-util-combine-extensions: "npm:^1.0.0"
+ micromark-util-types: "npm:^1.0.0"
+ checksum: 10/3ffd06ced4314abd0f0c72ec227f034f38dd47facbb62439ef3216d42f32433f3901d14675cf806e8d73689802a11849958b330bb5b55dd4fd5cdc64ebaf345c
+ languageName: node
+ linkType: hard
+
"micromark-factory-destination@npm:^1.0.0":
version: 1.1.0
resolution: "micromark-factory-destination@npm:1.1.0"
@@ -9211,6 +9480,23 @@ __metadata:
languageName: node
linkType: hard
+"openai@npm:^5.21.0":
+ version: 5.23.2
+ resolution: "openai@npm:5.23.2"
+ peerDependencies:
+ ws: ^8.18.0
+ zod: ^3.23.8
+ peerDependenciesMeta:
+ ws:
+ optional: true
+ zod:
+ optional: true
+ bin:
+ openai: bin/cli
+ checksum: 10/ee22ddc948a9b7b8ea1518bc6aea73776d0c1a5abcac029e8f494106beedbf175e6a9497de74f72754cdad504e03f8d9fd128963207bdd45cbf2b11cd02657d6
+ languageName: node
+ linkType: hard
+
"optionator@npm:^0.9.3":
version: 0.9.4
resolution: "optionator@npm:0.9.4"
@@ -10198,6 +10484,18 @@ __metadata:
languageName: node
linkType: hard
+"remark-gfm@npm:^3.0.1":
+ version: 3.0.1
+ resolution: "remark-gfm@npm:3.0.1"
+ dependencies:
+ "@types/mdast": "npm:^3.0.0"
+ mdast-util-gfm: "npm:^2.0.0"
+ micromark-extension-gfm: "npm:^2.0.0"
+ unified: "npm:^10.0.0"
+ checksum: 10/8ec301f5fb1f52c548b5a6d7ca6a3422d55db73cd703f147c979d16dca003f065181f55404d6f3f49d33f1faca3fe56ae731ed7fe0acc00cd945a8e605f155f2
+ languageName: node
+ linkType: hard
+
"remark-parse@npm:^10.0.0":
version: 10.0.2
resolution: "remark-parse@npm:10.0.2"
@@ -11501,6 +11799,13 @@ __metadata:
languageName: node
linkType: hard
+"ts-algebra@npm:^2.0.0":
+ version: 2.0.0
+ resolution: "ts-algebra@npm:2.0.0"
+ checksum: 10/b970eef64ca9594a77337e03b9c1732c1b7a0d2c4d316638b654e921a47b40c4cc42f41821445e9e54408d5dfdf4ecca27ffa59554373033b9c92dee8b52066d
+ languageName: node
+ linkType: hard
+
"ts-api-utils@npm:^2.1.0":
version: 2.1.0
resolution: "ts-api-utils@npm:2.1.0"
@@ -11881,7 +12186,7 @@ __metadata:
languageName: node
linkType: hard
-"unist-util-visit-parents@npm:^5.1.1":
+"unist-util-visit-parents@npm:^5.0.0, unist-util-visit-parents@npm:^5.1.1":
version: 5.1.3
resolution: "unist-util-visit-parents@npm:5.1.3"
dependencies:
@@ -12626,3 +12931,10 @@ __metadata:
checksum: 10/28a1bebacab3bc60150b6b0a2ba1db2ad033f068e81f05e4892ec0ea13ae63f5d140a1d692062ac0657840c8da076f35b94433b5f1c329d7803b247de80f064a
languageName: node
linkType: hard
+
+"zwitch@npm:^2.0.0":
+ version: 2.0.4
+ resolution: "zwitch@npm:2.0.4"
+ checksum: 10/f22ec5fc2d5f02c423c93d35cdfa83573a3a3bd98c66b927c368ea4d0e7252a500df2a90a6b45522be536a96a73404393c958e945fdba95e6832c200791702b6
+ languageName: node
+ linkType: hard