Skip to content

Commit 3fad5d3

Browse files
kubaerorZevan770albanxCopilot
committed
feat: add event logging and Codex CLI responses endpoints
- Add /api/event_logging/batch endpoint returning 200 (PR ericc-ch#165) - Add /v1/responses endpoint for OpenAI Codex CLI support (PR ericc-ch#195) - Create response service with streaming support Co-authored-by: Zevan770 <Zevan770@users.noreply.github.com> Co-authored-by: albanx <albanx@users.noreply.github.com> Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent 72e9cc8 commit 3fad5d3

5 files changed

Lines changed: 184 additions & 0 deletions

File tree

src/routes/event-logging/route.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
/**
2+
* Anthropic Event Logging Endpoint
3+
* PR #165: Add Anthropic event logging endpoint (@Zevan770)
4+
*
5+
* This endpoint returns 200 for /api/event_logging/batch to satisfy
6+
* Anthropic clients that expect this endpoint to exist.
7+
*/
8+
9+
import { Hono } from "hono"
10+
11+
export const eventLoggingRoute = new Hono()
12+
13+
// Accept event logging batch requests and return 200
14+
eventLoggingRoute.post("/batch", (c) => {
15+
return c.json({ status: "ok" }, 200)
16+
})

src/routes/responses/handler.ts

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
/**
2+
* Responses API Handler
3+
* PR #195: Add Codex CLI support (@albanx)
4+
*
5+
* Handles the /v1/responses endpoint for OpenAI Codex CLI compatibility.
6+
*/
7+
8+
import type { Context } from "hono"
9+
10+
import consola from "consola"
11+
import { streamSSE, type SSEMessage } from "hono/streaming"
12+
13+
import { checkRateLimit } from "~/lib/rate-limit"
14+
import { state } from "~/lib/state"
15+
import {
16+
createResponse,
17+
type ResponseOutput,
18+
type ResponsesPayload,
19+
} from "~/services/copilot/create-response"
20+
21+
export async function handleResponse(c: Context) {
22+
await checkRateLimit(state)
23+
24+
const payload = await c.req.json<ResponsesPayload>()
25+
consola.debug("Responses API payload:", JSON.stringify(payload).slice(-400))
26+
27+
const response = await createResponse(payload)
28+
29+
if (isNonStreaming(response)) {
30+
consola.debug("Non-streaming response:", JSON.stringify(response))
31+
return c.json(response)
32+
}
33+
34+
consola.debug("Streaming response")
35+
return streamSSE(c, async (stream) => {
36+
for await (const chunk of response) {
37+
consola.debug("Streaming chunk:", JSON.stringify(chunk))
38+
await stream.writeSSE(chunk as SSEMessage)
39+
}
40+
})
41+
}
42+
43+
const isNonStreaming = (
44+
response: Awaited<ReturnType<typeof createResponse>>,
45+
): response is ResponseOutput => Object.hasOwn(response, "output")

src/routes/responses/route.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
/**
2+
* Responses API Route
3+
* PR #195: Add Codex CLI support (@albanx)
4+
*
5+
* This endpoint provides /v1/responses compatibility for OpenAI Codex CLI.
6+
*/
7+
8+
import { Hono } from "hono"
9+
10+
import { handleResponse } from "./handler"
11+
12+
export const responsesRoute = new Hono()
13+
14+
responsesRoute.post("/", handleResponse)

src/server.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@ import { logger } from "hono/logger"
55
import { apiKeyAuth } from "./lib/api-key-auth"
66
import { completionRoutes } from "./routes/chat-completions/route"
77
import { embeddingRoutes } from "./routes/embeddings/route"
8+
import { eventLoggingRoute } from "./routes/event-logging/route"
89
import { messageRoutes } from "./routes/messages/route"
910
import { modelRoutes } from "./routes/models/route"
11+
import { responsesRoute } from "./routes/responses/route"
1012
import { tokenRoute } from "./routes/token/route"
1113
import { usageRoute } from "./routes/usage/route"
1214

@@ -23,6 +25,7 @@ server.use("/chat/*", apiKeyAuth)
2325
server.use("/models/*", apiKeyAuth)
2426
server.use("/embeddings/*", apiKeyAuth)
2527
server.use("/v1/*", apiKeyAuth)
28+
server.use("/api/*", apiKeyAuth)
2629

2730
server.route("/chat/completions", completionRoutes)
2831
server.route("/models", modelRoutes)
@@ -37,3 +40,9 @@ server.route("/v1/embeddings", embeddingRoutes)
3740

3841
// Anthropic compatible endpoints
3942
server.route("/v1/messages", messageRoutes)
43+
44+
// PR #165: Anthropic event logging endpoint (@Zevan770)
45+
server.route("/api/event_logging", eventLoggingRoute)
46+
47+
// PR #195: Codex CLI responses endpoint (@albanx)
48+
server.route("/v1/responses", responsesRoute)
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
/**
2+
* Create Responses Service
3+
* PR #195: Add Codex CLI support (@albanx)
4+
*
5+
* This service handles the /v1/responses endpoint for OpenAI Codex CLI compatibility.
6+
* The Codex CLI requires this endpoint to function properly.
7+
*
8+
* Docs:
9+
* - https://developers.openai.com/codex/config-advanced
10+
* - https://github.com/openai/codex/discussions/7782
11+
*/
12+
13+
import consola from "consola"
14+
import { events } from "fetch-event-stream"
15+
16+
import { copilotBaseUrl, copilotHeaders } from "~/lib/api-config"
17+
import { HTTPError } from "~/lib/error"
18+
import { state } from "~/lib/state"
19+
20+
export interface ResponsesPayload {
21+
model: string
22+
input: string | Array<ResponseInputItem>
23+
instructions?: string
24+
max_output_tokens?: number
25+
temperature?: number
26+
top_p?: number
27+
stream?: boolean
28+
tools?: Array<ResponseTool>
29+
tool_choice?:
30+
| "auto"
31+
| "none"
32+
| "required"
33+
| { type: "function"; name: string }
34+
reasoning?: {
35+
effort?: "low" | "medium" | "high"
36+
}
37+
truncation?: "auto" | "disabled"
38+
}
39+
40+
export interface ResponseInputItem {
41+
type: "message" | "item_reference"
42+
role?: "user" | "assistant" | "system"
43+
content?: string | Array<{ type: "input_text"; text: string }>
44+
id?: string
45+
}
46+
47+
export interface ResponseTool {
48+
type: "function"
49+
name: string
50+
description?: string
51+
parameters?: Record<string, unknown>
52+
}
53+
54+
export interface ResponseOutput {
55+
id: string
56+
object: "response"
57+
created_at: number
58+
model: string
59+
output: Array<ResponseOutputItem>
60+
usage?: {
61+
input_tokens: number
62+
output_tokens: number
63+
total_tokens: number
64+
}
65+
status: "completed" | "failed" | "cancelled" | "incomplete"
66+
}
67+
68+
export interface ResponseOutputItem {
69+
type: "message"
70+
id: string
71+
role: "assistant"
72+
content: Array<{ type: "output_text"; text: string }>
73+
}
74+
75+
export const createResponse = async (payload: ResponsesPayload) => {
76+
if (!state.copilotToken) throw new Error("Copilot token not found")
77+
78+
const headers: Record<string, string> = {
79+
...copilotHeaders(state, false),
80+
"X-Initiator": "agent",
81+
}
82+
83+
// Copilot uses /responses endpoint
84+
const response = await fetch(`${copilotBaseUrl(state)}/responses`, {
85+
method: "POST",
86+
headers,
87+
body: JSON.stringify(payload),
88+
})
89+
90+
if (!response.ok) {
91+
consola.error("Failed to create response", response)
92+
throw new HTTPError("Failed to create response", response)
93+
}
94+
95+
if (payload.stream) {
96+
return events(response)
97+
}
98+
99+
return (await response.json()) as ResponseOutput
100+
}

0 commit comments

Comments
 (0)